diff --git a/client/consul_template.go b/client/consul_template.go index 6861c5159071..1335fd7e216b 100644 --- a/client/consul_template.go +++ b/client/consul_template.go @@ -12,7 +12,6 @@ import ( ctconf "github.com/hashicorp/consul-template/config" "github.com/hashicorp/consul-template/manager" "github.com/hashicorp/consul-template/signals" - "github.com/hashicorp/consul-template/watch" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/driver/env" @@ -337,20 +336,23 @@ func templateRunner(tmpls []*structs.Template, config *config.Config, } // Set the config - flat := make([]*ctconf.ConfigTemplate, 0, len(ctmplMapping)) + flat := ctconf.TemplateConfigs(make([]*ctconf.TemplateConfig, 0, len(ctmplMapping))) for ctmpl := range ctmplMapping { local := ctmpl flat = append(flat, &local) } - runnerConfig.ConfigTemplates = flat + runnerConfig.Templates = &flat runner, err := manager.NewRunner(runnerConfig, false, false) if err != nil { return nil, nil, err } + // Set Nomad's environment variables + runner.Env = taskEnv.Build().EnvMap() + // Build the lookup - idMap := runner.ConfigTemplateMapping() + idMap := runner.TemplateConfigMapping() lookup := make(map[string][]*structs.Template, len(idMap)) for id, ctmpls := range idMap { for _, ctmpl := range ctmpls { @@ -365,13 +367,11 @@ func templateRunner(tmpls []*structs.Template, config *config.Config, // parseTemplateConfigs converts the tasks templates into consul-templates func parseTemplateConfigs(tmpls []*structs.Template, taskDir string, - taskEnv *env.TaskEnvironment, allowAbs bool) (map[ctconf.ConfigTemplate]*structs.Template, error) { + taskEnv *env.TaskEnvironment, allowAbs bool) (map[ctconf.TemplateConfig]*structs.Template, error) { // Build the task environment - // TODO Should be able to inject the Nomad env vars into Consul-template for - // rendering taskEnv.Build() - ctmpls := make(map[ctconf.ConfigTemplate]*structs.Template, len(tmpls)) + ctmpls := make(map[ctconf.TemplateConfig]*structs.Template, len(tmpls)) for _, tmpl := range tmpls { var src, dest string if tmpl.SourcePath != "" { @@ -389,15 +389,13 @@ func parseTemplateConfigs(tmpls []*structs.Template, taskDir string, dest = filepath.Join(taskDir, taskEnv.ReplaceEnv(tmpl.DestPath)) } - ct := ctconf.ConfigTemplate{ - Source: src, - Destination: dest, - EmbeddedTemplate: tmpl.EmbeddedTmpl, - Perms: ctconf.DefaultFilePerms, - Wait: &watch.Wait{}, - } + ct := ctconf.DefaultTemplateConfig() + ct.Source = &src + ct.Destination = &dest + ct.Contents = &tmpl.EmbeddedTmpl + ct.Finalize() - ctmpls[ct] = tmpl + ctmpls[*ct] = tmpl } return ctmpls, nil @@ -406,34 +404,30 @@ func parseTemplateConfigs(tmpls []*structs.Template, taskDir string, // runnerConfig returns a consul-template runner configuration, setting the // Vault and Consul configurations based on the clients configs. func runnerConfig(config *config.Config, vaultToken string) (*ctconf.Config, error) { - conf := &ctconf.Config{} + conf := ctconf.DefaultConfig() - set := func(keys []string) { - for _, k := range keys { - conf.Set(k) - } - } + t, f := true, false + // Force faster retries if testRetryRate != 0 { - conf.Retry = testRetryRate - conf.Set("retry") + rate := testRetryRate + conf.Consul.Retry.Backoff = &rate } // Setup the Consul config if config.ConsulConfig != nil { - conf.Consul = config.ConsulConfig.Addr - conf.Token = config.ConsulConfig.Token - set([]string{"consul", "token"}) + conf.Consul.Address = &config.ConsulConfig.Addr + conf.Consul.Token = &config.ConsulConfig.Token if config.ConsulConfig.EnableSSL != nil && *config.ConsulConfig.EnableSSL { - conf.SSL = &ctconf.SSLConfig{ - Enabled: true, - Verify: *config.ConsulConfig.VerifySSL, - Cert: config.ConsulConfig.CertFile, - Key: config.ConsulConfig.KeyFile, - CaCert: config.ConsulConfig.CAFile, + verify := config.ConsulConfig.VerifySSL != nil && *config.ConsulConfig.VerifySSL + conf.Consul.SSL = &ctconf.SSLConfig{ + Enabled: &t, + Verify: &verify, + Cert: &config.ConsulConfig.CertFile, + Key: &config.ConsulConfig.KeyFile, + CaCert: &config.ConsulConfig.CAFile, } - set([]string{"ssl", "ssl.enabled", "ssl.verify", "ssl.cert", "ssl.key", "ssl.ca_cert"}) } if config.ConsulConfig.Auth != "" { @@ -442,42 +436,46 @@ func runnerConfig(config *config.Config, vaultToken string) (*ctconf.Config, err return nil, fmt.Errorf("Failed to parse Consul Auth config") } - conf.Auth = &ctconf.AuthConfig{ - Enabled: true, - Username: parts[0], - Password: parts[1], + conf.Consul.Auth = &ctconf.AuthConfig{ + Enabled: &t, + Username: &parts[0], + Password: &parts[1], } - - set([]string{"auth", "auth.username", "auth.password", "auth.enabled"}) } } // Setup the Vault config // Always set these to ensure nothing is picked up from the environment - conf.Vault = &ctconf.VaultConfig{ - RenewToken: false, - } - set([]string{"vault", "vault.token", "vault.renew_token"}) + emptyStr := "" + conf.Vault.RenewToken = &f + conf.Vault.Token = &emptyStr if config.VaultConfig != nil && config.VaultConfig.IsEnabled() { - conf.Vault.Address = config.VaultConfig.Addr - conf.Vault.Token = vaultToken - set([]string{"vault.address"}) + conf.Vault.Address = &config.VaultConfig.Addr + conf.Vault.Token = &vaultToken if strings.HasPrefix(config.VaultConfig.Addr, "https") || config.VaultConfig.TLSCertFile != "" { - verify := config.VaultConfig.TLSSkipVerify == nil || !*config.VaultConfig.TLSSkipVerify + skipVerify := config.VaultConfig.TLSSkipVerify != nil && *config.VaultConfig.TLSSkipVerify + verify := !skipVerify conf.Vault.SSL = &ctconf.SSLConfig{ - Enabled: true, - Verify: !verify, - Cert: config.VaultConfig.TLSCertFile, - Key: config.VaultConfig.TLSKeyFile, - CaCert: config.VaultConfig.TLSCaFile, - CaPath: config.VaultConfig.TLSCaPath, + Enabled: &t, + Verify: &verify, + Cert: &config.VaultConfig.TLSCertFile, + Key: &config.VaultConfig.TLSKeyFile, + CaCert: &config.VaultConfig.TLSCaFile, + CaPath: &config.VaultConfig.TLSCaPath, + } + } else { + conf.Vault.SSL = &ctconf.SSLConfig{ + Enabled: &f, + Verify: &f, + Cert: &emptyStr, + Key: &emptyStr, + CaCert: &emptyStr, + CaPath: &emptyStr, } - - set([]string{"vault.ssl", "vault.ssl.enabled", "vault.ssl.verify", - "vault.ssl.cert", "vault.ssl.key", "vault.ssl.ca_cert"}) } } + conf.Finalize() return conf, nil } diff --git a/client/consul_template_test.go b/client/consul_template_test.go index 50d4a543773e..14b8f077a681 100644 --- a/client/consul_template_test.go +++ b/client/consul_template_test.go @@ -19,6 +19,12 @@ import ( "github.com/hashicorp/nomad/testutil" ) +const ( + // TestTaskName is the name of the injected task. It should appear in the + // environment variable $NOMAD_TASK_NAME + TestTaskName = "test-task" +) + // MockTaskHooks is a mock of the TaskHooks interface useful for testing type MockTaskHooks struct { Restarts int @@ -105,7 +111,7 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b } // Build the task environment - harness.taskEnv = env.NewTaskEnvironment(harness.node) + harness.taskEnv = env.NewTaskEnvironment(harness.node).SetTaskName(TestTaskName) // Make a tempdir d, err := ioutil.TempDir("", "") @@ -304,6 +310,40 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) { } } +func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { + // Make a template that will render immediately + content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}` + expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName) + file := "my.tmpl" + template := &structs.Template{ + EmbeddedTmpl: content, + DestPath: file, + ChangeMode: structs.TemplateChangeModeNoop, + } + + harness := newTestHarness(t, []*structs.Template{template}, false, false) + harness.start(t) + defer harness.stop() + + // Wait for the unblock + select { + case <-harness.mockHooks.UnblockCh: + case <-time.After(time.Duration(5*testutil.TestMultiplier()) * time.Second): + t.Fatalf("Task unblock should have been called") + } + + // Check the file is there + path := filepath.Join(harness.taskDir, file) + raw, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read rendered template from %q: %v", path, err) + } + + if s := string(raw); s != expected { + t.Fatalf("Unexpected template data; got %q, want %q", s, expected) + } +} + func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { // Make a template that will render immediately content := "hello, world!" diff --git a/vendor/github.com/hashicorp/consul-template/child/child.go b/vendor/github.com/hashicorp/consul-template/child/child.go index 7577bdd1d268..e9a496db9147 100644 --- a/vendor/github.com/hashicorp/consul-template/child/child.go +++ b/vendor/github.com/hashicorp/consul-template/child/child.go @@ -2,11 +2,13 @@ package child import ( "errors" + "fmt" "io" "log" "math/rand" "os" "os/exec" + "strings" "sync" "syscall" "time" @@ -15,14 +17,14 @@ import ( var ( // ErrMissingCommand is the error returned when no command is specified // to run. - ErrMissingCommand error = errors.New("missing command") + ErrMissingCommand = errors.New("missing command") // ExitCodeOK is the default OK exit code. - ExitCodeOK int = 0 + ExitCodeOK = 0 // ExitCodeError is the default error code returned when the child exits with // an error without a more specific code. - ExitCodeError int = 127 + ExitCodeError = 127 ) // Child is a wrapper around a child process which can be used to send signals @@ -34,6 +36,9 @@ type Child struct { stdout, stderr io.Writer command string args []string + env []string + + timeout time.Duration reloadSignal os.Signal @@ -69,6 +74,16 @@ type NewInput struct { Command string Args []string + // Timeout is the maximum amount of time to allow the command to execute. If + // set to 0, the command is permitted to run infinitely. + Timeout time.Duration + + // Env represents the condition of the child processes' environment + // variables. Only these environment variables will be given to the child, so + // it is the responsibility of the caller to include the parent processes + // environment, if required. This should be in the key=value format. + Env []string + // ReloadSignal is the signal to send to reload this process. This value may // be nil. ReloadSignal os.Signal @@ -107,6 +122,8 @@ func New(i *NewInput) (*Child, error) { stderr: i.Stderr, command: i.Command, args: i.Args, + env: i.Env, + timeout: i.Timeout, reloadSignal: i.ReloadSignal, killSignal: i.KillSignal, killTimeout: i.KillTimeout, @@ -134,13 +151,19 @@ func (c *Child) Pid() int { return c.pid() } +// Command returns the human-formatted command with arguments. +func (c *Child) Command() string { + list := append([]string{c.command}, c.args...) + return strings.Join(list, " ") +} + // Start starts and begins execution of the child process. A buffered channel // is returned which is where the command's exit code will be returned upon // exit. Any errors that occur prior to starting the command will be returned // as the second error argument, but any errors returned by the command after // execution will be returned as a non-zero value over the exit code channel. func (c *Child) Start() error { - log.Printf("[INFO] (child) spawning %q %q", c.command, c.args) + log.Printf("[INFO] (child) spawning: %s", c.Command()) c.Lock() defer c.Unlock() return c.start() @@ -170,16 +193,16 @@ func (c *Child) Reload() error { c.kill() return c.start() - } else { - log.Printf("[INFO] (child) reloading process") + } - // We only need a read lock here because neither the process nor the exit - // channel are changing. - c.RLock() - defer c.RUnlock() + log.Printf("[INFO] (child) reloading process") - return c.reload() - } + // We only need a read lock here because neither the process nor the exit + // channel are changing. + c.RLock() + defer c.RUnlock() + + return c.reload() } // Kill sends the kill signal to the child process and waits for successful @@ -223,6 +246,7 @@ func (c *Child) start() error { cmd.Stdin = c.stdin cmd.Stdout = c.stdout cmd.Stderr = c.stderr + cmd.Env = c.env if err := cmd.Start(); err != nil { return err } @@ -257,10 +281,47 @@ func (c *Child) start() error { case <-c.stopCh: case exitCh <- code: } - }() c.exitCh = exitCh + + // If a timeout was given, start the timer to wait for the child to exit + if c.timeout != 0 { + select { + case code := <-exitCh: + if code != 0 { + return fmt.Errorf( + "command exited with a non-zero exit status:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "This is assumed to be a failure. Please ensure the command\n"+ + "exits with a zero exit status.", + c.Command(), + ) + } + case <-time.After(c.timeout): + // Force-kill the process + c.stopLock.Lock() + defer c.stopLock.Unlock() + if c.cmd != nil && c.cmd.Process != nil { + c.cmd.Process.Kill() + } + + return fmt.Errorf( + "command did not exit within %q:\n"+ + "\n"+ + " %s\n"+ + "\n"+ + "Commands must exit in a timely manner in order for processing to\n"+ + "continue. Consider using a process supervisor or utilizing the\n"+ + "built-in exec mode instead.", + c.timeout, + c.Command(), + ) + } + } + return nil } diff --git a/vendor/github.com/hashicorp/consul-template/config/auth.go b/vendor/github.com/hashicorp/consul-template/config/auth.go new file mode 100644 index 000000000000..207c78136db9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/auth.go @@ -0,0 +1,142 @@ +package config + +import ( + "errors" + "fmt" + "strings" +) + +var ( + // ErrAuthStringEmpty is the error returned with authentication is provided, + // but empty. + ErrAuthStringEmpty = errors.New("auth: cannot be empty") +) + +// AuthConfig is the HTTP basic authentication data. +type AuthConfig struct { + Enabled *bool `mapstructure:"enabled"` + Username *string `mapstructure:"username"` + Password *string `mapstructure:"password"` +} + +// DefaultAuthConfig is the default configuration. +func DefaultAuthConfig() *AuthConfig { + return &AuthConfig{} +} + +// ParseAuthConfig parses the auth into username:password. +func ParseAuthConfig(s string) (*AuthConfig, error) { + if s == "" { + return nil, ErrAuthStringEmpty + } + + var a AuthConfig + + if strings.Contains(s, ":") { + split := strings.SplitN(s, ":", 2) + a.Username = String(split[0]) + a.Password = String(split[1]) + } else { + a.Username = String(s) + } + + return &a, nil +} + +// Copy returns a deep copy of this configuration. +func (c *AuthConfig) Copy() *AuthConfig { + if c == nil { + return nil + } + + var o AuthConfig + o.Enabled = c.Enabled + o.Username = c.Username + o.Password = c.Password + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *AuthConfig) Merge(o *AuthConfig) *AuthConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Username != nil { + r.Username = o.Username + } + + if o.Password != nil { + r.Password = o.Password + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *AuthConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Username) || + StringPresent(c.Password)) + } + if c.Username == nil { + c.Username = String("") + } + + if c.Password == nil { + c.Password = String("") + } + + if c.Enabled == nil { + c.Enabled = Bool(*c.Username != "" || *c.Password != "") + } +} + +// GoString defines the printable version of this struct. +func (c *AuthConfig) GoString() string { + if c == nil { + return "(*AuthConfig)(nil)" + } + + return fmt.Sprintf("&AuthConfig{"+ + "Enabled:%s, "+ + "Username:%s, "+ + "Password:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Username), + StringGoString(c.Password), + ) +} + +// String is the string representation of this authentication. If authentication +// is not enabled, this returns the empty string. The username and password will +// be separated by a colon. +func (c *AuthConfig) String() string { + if !BoolVal(c.Enabled) { + return "" + } + + if c.Password != nil { + return fmt.Sprintf("%s:%s", StringVal(c.Username), StringVal(c.Password)) + } + + return StringVal(c.Username) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/config.go b/vendor/github.com/hashicorp/consul-template/config/config.go index eaade92c3dee..a958be70a324 100644 --- a/vendor/github.com/hashicorp/consul-template/config/config.go +++ b/vendor/github.com/hashicorp/consul-template/config/config.go @@ -1,495 +1,236 @@ package config import ( - "errors" "fmt" "io/ioutil" "log" "os" "path/filepath" - "regexp" + "strconv" "strings" "syscall" "time" "github.com/hashicorp/consul-template/signals" - "github.com/hashicorp/consul-template/watch" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/mitchellh/mapstructure" -) -// The pattern to split the config template syntax on -var configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") + "github.com/pkg/errors" +) const ( - // DefaultFilePerms are the default file permissions for templates rendered - // onto disk when a specific file permission has not already been specified. - DefaultFilePerms = 0644 - - // DefaultDedupPrefix is the default prefix used for de-duplication mode - DefaultDedupPrefix = "consul-template/dedup/" + // DefaultLogLevel is the default logging level. + DefaultLogLevel = "WARN" - // DefaultCommandTimeout is the amount of time to wait for a command to return. - DefaultCommandTimeout = 30 * time.Second + // DefaultMaxStale is the default staleness permitted. This enables stale + // queries by default for performance reasons. + DefaultMaxStale = 2 * time.Second // DefaultReloadSignal is the default signal for reload. DefaultReloadSignal = syscall.SIGHUP - // DefaultDumpSignal is the default signal for a core dump. - DefaultDumpSignal = syscall.SIGQUIT - // DefaultKillSignal is the default signal for termination. DefaultKillSignal = syscall.SIGINT ) // Config is used to configure Consul Template type Config struct { - // Path is the path to this configuration file on disk. This value is not - // read from disk by rather dynamically populated by the code so the Config - // has a reference to the path to the file on disk that created it. - Path string `mapstructure:"-"` - - // Consul is the location of the Consul instance to query (may be an IP - // address or FQDN) with port. - Consul string `mapstructure:"consul"` - - // Token is the Consul API token. - Token string `mapstructure:"token"` + // Consul is the configuration for connecting to a Consul cluster. + Consul *ConsulConfig `mapstructure:"consul"` - // ReloadSignal is the signal to listen for a reload event. - ReloadSignal os.Signal `mapstructure:"reload_signal"` + // Dedup is used to configure the dedup settings + Dedup *DedupConfig `mapstructure:"deduplicate"` - // DumpSignal is the signal to listen for a core dump event. - DumpSignal os.Signal `mapstructure:"dump_signal"` + // Exec is the configuration for exec/supervise mode. + Exec *ExecConfig `mapstructure:"exec"` // KillSignal is the signal to listen for a graceful terminate event. - KillSignal os.Signal `mapstructure:"kill_signal"` - - // Auth is the HTTP basic authentication for communicating with Consul. - Auth *AuthConfig `mapstructure:"auth"` - - // Vault is the configuration for connecting to a vault server. - Vault *VaultConfig `mapstructure:"vault"` - - // SSL indicates we should use a secure connection while talking to - // Consul. This requires Consul to be configured to serve HTTPS. - SSL *SSLConfig `mapstructure:"ssl"` - - // Syslog is the configuration for syslog. - Syslog *SyslogConfig `mapstructure:"syslog"` + KillSignal *os.Signal `mapstructure:"kill_signal"` - // Exec is the configuration for exec/supervise mode. - Exec *ExecConfig `mapstructure:"exec"` + // LogLevel is the level with which to log for this config. + LogLevel *string `mapstructure:"log_level"` // MaxStale is the maximum amount of time for staleness from Consul as given // by LastContact. If supplied, Consul Template will query all servers instead // of just the leader. - MaxStale time.Duration `mapstructure:"max_stale"` - - // ConfigTemplates is a slice of the ConfigTemplate objects in the config. - ConfigTemplates []*ConfigTemplate `mapstructure:"template"` - - // Retry is the duration of time to wait between Consul failures. - Retry time.Duration `mapstructure:"retry"` - - // Wait is the quiescence timers. - Wait *watch.Wait `mapstructure:"wait"` + MaxStale *time.Duration `mapstructure:"max_stale"` // PidFile is the path on disk where a PID file should be written containing // this processes PID. - PidFile string `mapstructure:"pid_file"` + PidFile *string `mapstructure:"pid_file"` - // LogLevel is the level with which to log for this config. - LogLevel string `mapstructure:"log_level"` + // ReloadSignal is the signal to listen for a reload event. + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Syslog is the configuration for syslog. + Syslog *SyslogConfig `mapstructure:"syslog"` + + // Templates is the list of templates. + Templates *TemplateConfigs `mapstructure:"template"` - // Deduplicate is used to configure the dedup settings - Deduplicate *DeduplicateConfig `mapstructure:"deduplicate"` + // Vault is the configuration for connecting to a vault server. + Vault *VaultConfig `mapstructure:"vault"` - // setKeys is the list of config keys that were set by the user. - setKeys map[string]struct{} + // Wait is the quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` } // Copy returns a deep copy of the current configuration. This is useful because // the nested data structures may be shared. func (c *Config) Copy() *Config { - config := new(Config) - config.Path = c.Path - config.Consul = c.Consul - config.Token = c.Token - config.ReloadSignal = c.ReloadSignal - config.DumpSignal = c.DumpSignal - config.KillSignal = c.KillSignal - - if c.Auth != nil { - config.Auth = &AuthConfig{ - Enabled: c.Auth.Enabled, - Username: c.Auth.Username, - Password: c.Auth.Password, - } - } - - if c.Vault != nil { - config.Vault = &VaultConfig{ - Address: c.Vault.Address, - Token: c.Vault.Token, - UnwrapToken: c.Vault.UnwrapToken, - RenewToken: c.Vault.RenewToken, - } - - if c.Vault.SSL != nil { - config.Vault.SSL = &SSLConfig{ - Enabled: c.Vault.SSL.Enabled, - Verify: c.Vault.SSL.Verify, - Cert: c.Vault.SSL.Cert, - Key: c.Vault.SSL.Key, - CaCert: c.Vault.SSL.CaCert, - CaPath: c.Vault.SSL.CaPath, - ServerName: c.Vault.SSL.ServerName, - } - } - } + var o Config - if c.SSL != nil { - config.SSL = &SSLConfig{ - Enabled: c.SSL.Enabled, - Verify: c.SSL.Verify, - Cert: c.SSL.Cert, - Key: c.SSL.Key, - CaCert: c.SSL.CaCert, - CaPath: c.SSL.CaPath, - ServerName: c.SSL.ServerName, - } - } + o.Consul = c.Consul - if c.Syslog != nil { - config.Syslog = &SyslogConfig{ - Enabled: c.Syslog.Enabled, - Facility: c.Syslog.Facility, - } + if c.Consul != nil { + o.Consul = c.Consul.Copy() } if c.Exec != nil { - config.Exec = &ExecConfig{ - Command: c.Exec.Command, - Splay: c.Exec.Splay, - ReloadSignal: c.Exec.ReloadSignal, - KillSignal: c.Exec.KillSignal, - KillTimeout: c.Exec.KillTimeout, - } - } - - config.MaxStale = c.MaxStale - - config.ConfigTemplates = make([]*ConfigTemplate, len(c.ConfigTemplates)) - for i, t := range c.ConfigTemplates { - config.ConfigTemplates[i] = &ConfigTemplate{ - Source: t.Source, - Destination: t.Destination, - EmbeddedTemplate: t.EmbeddedTemplate, - Command: t.Command, - CommandTimeout: t.CommandTimeout, - Perms: t.Perms, - Backup: t.Backup, - LeftDelim: t.LeftDelim, - RightDelim: t.RightDelim, - Wait: t.Wait, - } - } - - config.Retry = c.Retry - - if c.Wait != nil { - config.Wait = &watch.Wait{ - Min: c.Wait.Min, - Max: c.Wait.Max, - } + o.Exec = c.Exec.Copy() } - config.PidFile = c.PidFile - config.LogLevel = c.LogLevel + o.KillSignal = c.KillSignal - if c.Deduplicate != nil { - config.Deduplicate = &DeduplicateConfig{ - Enabled: c.Deduplicate.Enabled, - Prefix: c.Deduplicate.Prefix, - TTL: c.Deduplicate.TTL, - } - } + o.LogLevel = c.LogLevel - config.setKeys = c.setKeys + o.MaxStale = c.MaxStale - return config -} + o.PidFile = c.PidFile -// Merge merges the values in config into this config object. Values in the -// config object overwrite the values in c. -func (c *Config) Merge(config *Config) { - if config.WasSet("path") { - c.Path = config.Path - } + o.ReloadSignal = c.ReloadSignal - if config.WasSet("consul") { - c.Consul = config.Consul + if c.Syslog != nil { + o.Syslog = c.Syslog.Copy() } - if config.WasSet("token") { - c.Token = config.Token + if c.Templates != nil { + o.Templates = c.Templates.Copy() } - if config.WasSet("reload_signal") { - c.ReloadSignal = config.ReloadSignal + if c.Vault != nil { + o.Vault = c.Vault.Copy() } - if config.WasSet("dump_signal") { - c.DumpSignal = config.DumpSignal + if c.Wait != nil { + o.Wait = c.Wait.Copy() } - if config.WasSet("kill_signal") { - c.KillSignal = config.KillSignal - } + return &o +} - if config.WasSet("vault") { - if c.Vault == nil { - c.Vault = &VaultConfig{} - } - if config.WasSet("vault.address") { - c.Vault.Address = config.Vault.Address - } - if config.WasSet("vault.token") { - c.Vault.Token = config.Vault.Token - } - if config.WasSet("vault.unwrap_token") { - c.Vault.UnwrapToken = config.Vault.UnwrapToken - } - if config.WasSet("vault.renew_token") { - c.Vault.RenewToken = config.Vault.RenewToken - } - if config.WasSet("vault.ssl") { - if c.Vault.SSL == nil { - c.Vault.SSL = &SSLConfig{} - } - if config.WasSet("vault.ssl.verify") { - c.Vault.SSL.Verify = config.Vault.SSL.Verify - c.Vault.SSL.Enabled = true - } - if config.WasSet("vault.ssl.cert") { - c.Vault.SSL.Cert = config.Vault.SSL.Cert - c.Vault.SSL.Enabled = true - } - if config.WasSet("vault.ssl.key") { - c.Vault.SSL.Key = config.Vault.SSL.Key - c.Vault.SSL.Enabled = true - } - if config.WasSet("vault.ssl.ca_cert") { - c.Vault.SSL.CaCert = config.Vault.SSL.CaCert - c.Vault.SSL.Enabled = true - } - if config.WasSet("vault.ssl.ca_path") { - c.Vault.SSL.CaPath = config.Vault.SSL.CaPath - c.Vault.SSL.Enabled = true - } - if config.WasSet("vault.ssl.enabled") { - c.Vault.SSL.Enabled = config.Vault.SSL.Enabled - } - if config.WasSet("vault.ssl.server_name") { - c.Vault.SSL.ServerName = config.Vault.SSL.ServerName - } +// Merge merges the values in config into this config object. Values in the +// config object overwrite the values in c. +func (c *Config) Merge(o *Config) *Config { + if c == nil { + if o == nil { + return nil } + return o.Copy() } - if config.WasSet("auth") { - if c.Auth == nil { - c.Auth = &AuthConfig{} - } - if config.WasSet("auth.username") { - c.Auth.Username = config.Auth.Username - c.Auth.Enabled = true - } - if config.WasSet("auth.password") { - c.Auth.Password = config.Auth.Password - c.Auth.Enabled = true - } - if config.WasSet("auth.enabled") { - c.Auth.Enabled = config.Auth.Enabled - } + if o == nil { + return c.Copy() } - if config.WasSet("ssl") { - if c.SSL == nil { - c.SSL = &SSLConfig{} - } - if config.WasSet("ssl.verify") { - c.SSL.Verify = config.SSL.Verify - c.SSL.Enabled = true - } - if config.WasSet("ssl.cert") { - c.SSL.Cert = config.SSL.Cert - c.SSL.Enabled = true - } - if config.WasSet("ssl.key") { - c.SSL.Key = config.SSL.Key - c.SSL.Enabled = true - } - if config.WasSet("ssl.ca_cert") { - c.SSL.CaCert = config.SSL.CaCert - c.SSL.Enabled = true - } - if config.WasSet("ssl.ca_path") { - c.SSL.CaPath = config.SSL.CaPath - c.SSL.Enabled = true - } - if config.WasSet("ssl.enabled") { - c.SSL.Enabled = config.SSL.Enabled - } - if config.WasSet("ssl.server_name") { - c.SSL.ServerName = config.SSL.ServerName - } - } + r := c.Copy() - if config.WasSet("syslog") { - if c.Syslog == nil { - c.Syslog = &SyslogConfig{} - } - if config.WasSet("syslog.facility") { - c.Syslog.Facility = config.Syslog.Facility - c.Syslog.Enabled = true - } - if config.WasSet("syslog.enabled") { - c.Syslog.Enabled = config.Syslog.Enabled - } + if o.Consul != nil { + r.Consul = r.Consul.Merge(o.Consul) } - if config.WasSet("exec") { - if c.Exec == nil { - c.Exec = &ExecConfig{} - } - if config.WasSet("exec.command") { - c.Exec.Command = config.Exec.Command - } - if config.WasSet("exec.splay") { - c.Exec.Splay = config.Exec.Splay - } - if config.WasSet("exec.reload_signal") { - c.Exec.ReloadSignal = config.Exec.ReloadSignal - } - if config.WasSet("exec.kill_signal") { - c.Exec.KillSignal = config.Exec.KillSignal - } - if config.WasSet("exec.kill_timeout") { - c.Exec.KillTimeout = config.Exec.KillTimeout - } + if o.Dedup != nil { + r.Dedup = r.Dedup.Merge(o.Dedup) } - if config.WasSet("max_stale") { - c.MaxStale = config.MaxStale + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) } - if len(config.ConfigTemplates) > 0 { - if c.ConfigTemplates == nil { - c.ConfigTemplates = make([]*ConfigTemplate, 0, 1) - } - for _, template := range config.ConfigTemplates { - c.ConfigTemplates = append(c.ConfigTemplates, &ConfigTemplate{ - Source: template.Source, - Destination: template.Destination, - EmbeddedTemplate: template.EmbeddedTemplate, - Command: template.Command, - CommandTimeout: template.CommandTimeout, - Perms: template.Perms, - Backup: template.Backup, - LeftDelim: template.LeftDelim, - RightDelim: template.RightDelim, - Wait: template.Wait, - }) - } + if o.KillSignal != nil { + r.KillSignal = o.KillSignal } - if config.WasSet("retry") { - c.Retry = config.Retry + if o.LogLevel != nil { + r.LogLevel = o.LogLevel } - if config.WasSet("wait") { - c.Wait = &watch.Wait{ - Min: config.Wait.Min, - Max: config.Wait.Max, - } + if o.MaxStale != nil { + r.MaxStale = o.MaxStale } - if config.WasSet("pid_file") { - c.PidFile = config.PidFile + if o.PidFile != nil { + r.PidFile = o.PidFile } - if config.WasSet("log_level") { - c.LogLevel = config.LogLevel + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal } - if config.WasSet("deduplicate") { - if c.Deduplicate == nil { - c.Deduplicate = &DeduplicateConfig{} - } - if config.WasSet("deduplicate.enabled") { - c.Deduplicate.Enabled = config.Deduplicate.Enabled - } - if config.WasSet("deduplicate.prefix") { - c.Deduplicate.Prefix = config.Deduplicate.Prefix - } + if o.Syslog != nil { + r.Syslog = r.Syslog.Merge(o.Syslog) } - if c.setKeys == nil { - c.setKeys = make(map[string]struct{}) + if o.Templates != nil { + r.Templates = r.Templates.Merge(o.Templates) } - for k := range config.setKeys { - if _, ok := c.setKeys[k]; !ok { - c.setKeys[k] = struct{}{} - } - } -} -// WasSet determines if the given key was set in the config (as opposed to just -// having the default value). -func (c *Config) WasSet(key string) bool { - if _, ok := c.setKeys[key]; ok { - return true + if o.Vault != nil { + r.Vault = r.Vault.Merge(o.Vault) } - return false -} -// Set is a helper function for marking a key as set. -func (c *Config) Set(key string) { - if c.setKeys == nil { - c.setKeys = make(map[string]struct{}) - } - if _, ok := c.setKeys[key]; !ok { - c.setKeys[key] = struct{}{} + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) } + + return r } // Parse parses the given string contents as a config func Parse(s string) (*Config, error) { - var errs *multierror.Error - - // Parse the file (could be HCL or JSON) var shadow interface{} if err := hcl.Decode(&shadow, s); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) + return nil, errors.Wrap(err, "error decoding config") } // Convert to a map and flatten the keys we want to flatten parsed, ok := shadow.(map[string]interface{}) if !ok { - return nil, fmt.Errorf("error converting config") + return nil, errors.New("error converting config") } + flattenKeys(parsed, []string{ "auth", + "consul", + "consul.auth", + "consul.retry", + "consul.ssl", + "deduplicate", + "env", + "exec", + "exec.env", "ssl", "syslog", - "exec", "vault", - "deduplicate", + "vault.retry", + "vault.ssl", + "wait", }) - // Deprecations + // FlattenFlatten keys belonging to the templates. We cannot do this above + // because it is an array of tmeplates. + if templates, ok := parsed["template"].([]map[string]interface{}); ok { + for _, template := range templates { + flattenKeys(template, []string{ + "env", + "exec", + "exec.env", + "wait", + }) + } + } + + // TODO: Deprecations if vault, ok := parsed["vault"].(map[string]interface{}); ok { if val, ok := vault["renew"]; ok { log.Println(`[WARN] vault.renew has been renamed to vault.renew_token. ` + @@ -499,86 +240,74 @@ func Parse(s string) (*Config, error) { } } + if auth, ok := parsed["auth"].(map[string]interface{}); ok { + log.Println("[WARN] auth has been moved under the consul stanza. " + + "Update your configuration files and place auth inside consul { }.") + if _, ok := parsed["consul"]; !ok { + parsed["consul"] = make(map[string]interface{}) + } + parsed["consul"].(map[string]interface{})["auth"] = auth + delete(parsed, "auth") + } + + if retry, ok := parsed["retry"].(string); ok { + log.Println("[WARN] retry has been moved under the consul stanza. " + + "Update your configuration files and place retry inside consul { }.") + if _, ok := parsed["consul"]; !ok { + parsed["consul"] = make(map[string]interface{}) + } + parsed["consul"].(map[string]interface{})["retry"] = map[string]interface{}{ + "backoff": retry, + } + delete(parsed, "retry") + } + + if ssl, ok := parsed["ssl"].(map[string]interface{}); ok { + log.Println("[WARN] ssl has been moved under the consul stanza. " + + "Update your configuration files and place ssl inside consul { }.") + if _, ok := parsed["consul"]; !ok { + parsed["consul"] = make(map[string]interface{}) + } + parsed["consul"].(map[string]interface{})["ssl"] = ssl + delete(parsed, "ssl") + } + + if token, ok := parsed["token"].(string); ok { + log.Println("[WARN] token has been moved under the consul stanza. " + + "Update your configuration files and place token inside consul { }.") + if _, ok := parsed["consul"]; !ok { + parsed["consul"] = make(map[string]interface{}) + } + parsed["consul"].(map[string]interface{})["token"] = token + delete(parsed, "token") + } + // Create a new, empty config - config := new(Config) + var c Config // Use mapstructure to populate the basic config fields - metadata := new(mapstructure.Metadata) + var md mapstructure.Metadata decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.ComposeDecodeHookFunc( + ConsulStringToStructFunc(), StringToFileModeFunc(), signals.StringToSignalFunc(), - watch.StringToWaitDurationHookFunc(), + StringToWaitDurationHookFunc(), mapstructure.StringToSliceHookFunc(","), mapstructure.StringToTimeDurationHookFunc(), ), ErrorUnused: true, - Metadata: metadata, - Result: config, + Metadata: &md, + Result: &c, }) if err != nil { - errs = multierror.Append(errs, err) - return nil, errs.ErrorOrNil() + return nil, errors.Wrap(err, "mapstructure decoder creation failed") } if err := decoder.Decode(parsed); err != nil { - errs = multierror.Append(errs, err) - return nil, errs.ErrorOrNil() - } - - // Explicitly check for the nil signal and set the value back to nil - if config.ReloadSignal == signals.SIGNIL { - config.ReloadSignal = nil - } - if config.DumpSignal == signals.SIGNIL { - config.DumpSignal = nil - } - if config.KillSignal == signals.SIGNIL { - config.KillSignal = nil + return nil, errors.Wrap(err, "mapstructure decode failed") } - if config.Exec != nil { - if config.Exec.ReloadSignal == signals.SIGNIL { - config.Exec.ReloadSignal = nil - } - if config.Exec.KillSignal == signals.SIGNIL { - config.Exec.KillSignal = nil - } - } - - // Setup default values for templates - for _, t := range config.ConfigTemplates { - // Ensure there's a default value for the template's file permissions - if t.Perms == 0000 { - t.Perms = DefaultFilePerms - } - - // Ensure we have a default command timeout - if t.CommandTimeout == 0 { - t.CommandTimeout = DefaultCommandTimeout - } - - // Set up a default zero wait, which disables it for this - // template. - if t.Wait == nil { - t.Wait = &watch.Wait{} - } - } - - // Update the list of set keys - if config.setKeys == nil { - config.setKeys = make(map[string]struct{}) - } - for _, key := range metadata.Keys { - if _, ok := config.setKeys[key]; !ok { - config.setKeys[key] = struct{}{} - } - } - config.setKeys["path"] = struct{}{} - - d := DefaultConfig() - d.Merge(config) - config = d - return config, errs.ErrorOrNil() + return &c, nil } // Must returns a config object that must compile. If there are any errors, this @@ -586,19 +315,26 @@ func Parse(s string) (*Config, error) { func Must(s string) *Config { c, err := Parse(s) if err != nil { - panic(err) + log.Fatal(err) } return c } +// TestConfig returuns a default, finalized config, with the provided +// configuration taking precedence. +func TestConfig(c *Config) *Config { + d := DefaultConfig().Merge(c) + d.Finalize() + return d +} + // FromFile reads the configuration file at the given path and returns a new // Config struct with the data populated. func FromFile(path string) (*Config, error) { c, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("error reading config at %q: %s", path, err) + return nil, errors.Wrap(err, fmt.Sprintf("from file %s", path)) } - return Parse(string(c)) } @@ -607,13 +343,13 @@ func FromFile(path string) (*Config, error) { func FromPath(path string) (*Config, error) { // Ensure the given filepath exists if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, fmt.Errorf("config: missing file/folder: %s", path) + return nil, errors.Wrap(err, "missing file/folder"+path) } // Check if a file was given or a path to a directory stat, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf("config: error stating file: %s", err) + return nil, errors.Wrap(err, "failed stating file "+path) } // Recursively parse directories, single load files @@ -621,11 +357,11 @@ func FromPath(path string) (*Config, error) { // Ensure the given filepath has at least one config file _, err := ioutil.ReadDir(path) if err != nil { - return nil, fmt.Errorf("config: error listing directory: %s", err) + return nil, errors.Wrap(err, "failed listing dir "+path) } // Create a blank config to merge off of - config := DefaultConfig() + var c *Config // Potential bug: Walk does not follow symlinks! err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { @@ -644,237 +380,163 @@ func FromPath(path string) (*Config, error) { if err != nil { return err } - config.Merge(newConfig) + c = c.Merge(newConfig) return nil }) if err != nil { - return nil, fmt.Errorf("config: walk error: %s", err) + return nil, errors.Wrap(err, "walk error") } - return config, nil + return c, nil } else if stat.Mode().IsRegular() { return FromFile(path) } - return nil, fmt.Errorf("config: unknown filetype: %q", stat.Mode().String()) + return nil, fmt.Errorf("unknown filetype: %q", stat.Mode().String()) } -// DefaultConfig returns the default configuration struct. +// GoString defines the printable version of this struct. +func (c *Config) GoString() string { + if c == nil { + return "(*Config)(nil)" + } + + return fmt.Sprintf("&Config{"+ + "Consul:%#v, "+ + "Dedup:%#v, "+ + "Exec:%#v, "+ + "KillSignal:%s, "+ + "LogLevel:%s, "+ + "MaxStale:%s, "+ + "PidFile:%s, "+ + "ReloadSignal:%s, "+ + "Syslog:%#v, "+ + "Templates:%#v, "+ + "Vault:%#v, "+ + "Wait:%#v"+ + "}", + c.Consul, + c.Dedup, + c.Exec, + SignalGoString(c.KillSignal), + StringGoString(c.LogLevel), + TimeDurationGoString(c.MaxStale), + StringGoString(c.PidFile), + SignalGoString(c.ReloadSignal), + c.Syslog, + c.Templates, + c.Vault, + c.Wait, + ) +} + +// DefaultConfig returns the default configuration struct. Certain environment +// variables may be set which control the values for the default configuration. func DefaultConfig() *Config { - logLevel := os.Getenv("CONSUL_TEMPLATE_LOG") - if logLevel == "" { - logLevel = "WARN" + return &Config{ + Consul: DefaultConsulConfig(), + Dedup: DefaultDedupConfig(), + Exec: DefaultExecConfig(), + KillSignal: Signal(DefaultKillSignal), + LogLevel: stringFromEnv("CT_LOG", "CONSUL_TEMPLATE_LOG"), + MaxStale: TimeDuration(DefaultMaxStale), + PidFile: String(""), + ReloadSignal: Signal(DefaultReloadSignal), + Syslog: DefaultSyslogConfig(), + Templates: DefaultTemplateConfigs(), + Vault: DefaultVaultConfig(), + Wait: DefaultWaitConfig(), } +} - config := &Config{ - Vault: &VaultConfig{ - RenewToken: true, - SSL: &SSLConfig{ - Enabled: true, - Verify: true, - }, - }, - Auth: &AuthConfig{ - Enabled: false, - }, - ReloadSignal: DefaultReloadSignal, - DumpSignal: DefaultDumpSignal, - KillSignal: DefaultKillSignal, - SSL: &SSLConfig{ - Enabled: false, - Verify: true, - }, - Syslog: &SyslogConfig{ - Enabled: false, - Facility: "LOCAL0", - }, - Deduplicate: &DeduplicateConfig{ - Enabled: false, - Prefix: DefaultDedupPrefix, - TTL: 15 * time.Second, - }, - Exec: &ExecConfig{ - KillSignal: syscall.SIGTERM, - KillTimeout: 30 * time.Second, - }, - ConfigTemplates: make([]*ConfigTemplate, 0), - Retry: 5 * time.Second, - MaxStale: 1 * time.Second, - Wait: &watch.Wait{}, - LogLevel: logLevel, - setKeys: make(map[string]struct{}), +// Finalize ensures all configuration options have the default values, so it +// is safe to dereference the pointers later down the line. It also +// intelligently tries to activate stanzas that should be "enabled" because +// data was given, but the user did not explicitly add "Enabled: true" to the +// configuration. +func (c *Config) Finalize() { + if c.Consul == nil { + c.Consul = DefaultConsulConfig() } + c.Consul.Finalize() - if v := os.Getenv("CONSUL_HTTP_ADDR"); v != "" { - config.Consul = v + if c.Dedup == nil { + c.Dedup = DefaultDedupConfig() } + c.Dedup.Finalize() - if v := os.Getenv("CONSUL_TOKEN"); v != "" { - config.Token = v + if c.Exec == nil { + c.Exec = DefaultExecConfig() } + c.Exec.Finalize() - if v := os.Getenv("VAULT_ADDR"); v != "" { - config.Vault.Address = v + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultKillSignal) } - if v := os.Getenv("VAULT_TOKEN"); v != "" { - config.Vault.Token = v + if c.LogLevel == nil { + c.LogLevel = String(DefaultLogLevel) } - if v := os.Getenv("VAULT_UNWRAP_TOKEN"); v != "" { - config.Vault.UnwrapToken = true + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultMaxStale) } - if v := os.Getenv("VAULT_CAPATH"); v != "" { - config.Vault.SSL.Cert = v + if c.PidFile == nil { + c.PidFile = String("") } - if v := os.Getenv("VAULT_CACERT"); v != "" { - config.Vault.SSL.CaCert = v + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultReloadSignal) } - if v := os.Getenv("VAULT_SKIP_VERIFY"); v != "" { - config.Vault.SSL.Verify = false + if c.Syslog == nil { + c.Syslog = DefaultSyslogConfig() } + c.Syslog.Finalize() - if v := os.Getenv("VAULT_TLS_SERVER_NAME"); v != "" { - config.Vault.SSL.ServerName = v + if c.Templates == nil { + c.Templates = DefaultTemplateConfigs() } + c.Templates.Finalize() - return config -} - -// AuthConfig is the HTTP basic authentication data. -type AuthConfig struct { - Enabled bool `mapstructure:"enabled"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` -} - -// String is the string representation of this authentication. If authentication -// is not enabled, this returns the empty string. The username and password will -// be separated by a colon. -func (a *AuthConfig) String() string { - if !a.Enabled { - return "" + if c.Vault == nil { + c.Vault = DefaultVaultConfig() } + c.Vault.Finalize() - if a.Password != "" { - return fmt.Sprintf("%s:%s", a.Username, a.Password) + if c.Wait == nil { + c.Wait = DefaultWaitConfig() } - - return a.Username -} - -// ExecConfig is used to configure the application when it runs in -// exec/supervise mode. -type ExecConfig struct { - // Command is the command to execute and watch as a child process. - Command string `mapstructure:"command"` - - // Splay is the maximum amount of time to wait to kill the process. - Splay time.Duration `mapstructure:"splay"` - - // ReloadSignal is the signal to send to the child process when a template - // changes. This tells the child process that templates have - ReloadSignal os.Signal `mapstructure:"reload_signal"` - - // KillSignal is the signal to send to the command to kill it gracefully. The - // default value is "SIGTERM". - KillSignal os.Signal `mapstructure:"kill_signal"` - - // KillTimeout is the amount of time to give the process to cleanup before - // hard-killing it. - KillTimeout time.Duration `mapstructure:"kill_timeout"` -} - -// DeduplicateConfig is used to enable the de-duplication mode, which depends -// on electing a leader per-template and watching of a key. This is used -// to reduce the cost of many instances of CT running the same template. -type DeduplicateConfig struct { - // Controls if deduplication mode is enabled - Enabled bool `mapstructure:"enabled"` - - // Controls the KV prefix used. Defaults to defaultDedupPrefix - Prefix string `mapstructure:"prefix"` - - // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. - TTL time.Duration `mapstructure:"ttl"` + c.Wait.Finalize() } -// SSLConfig is the configuration for SSL. -type SSLConfig struct { - Enabled bool `mapstructure:"enabled"` - Verify bool `mapstructure:"verify"` - Cert string `mapstructure:"cert"` - Key string `mapstructure:"key"` - CaCert string `mapstructure:"ca_cert"` - CaPath string `mapstructure:"ca_path"` - ServerName string `mapstructure:"server_name"` -} - -// SyslogConfig is the configuration for syslog. -type SyslogConfig struct { - Enabled bool `mapstructure:"enabled"` - Facility string `mapstructure:"facility"` -} - -// ConfigTemplate is the representation of an input template, output location, -// and optional command to execute when rendered -type ConfigTemplate struct { - Source string `mapstructure:"source"` - Destination string `mapstructure:"destination"` - EmbeddedTemplate string `mapstructure:"contents"` - Command string `mapstructure:"command"` - CommandTimeout time.Duration `mapstructure:"command_timeout"` - Perms os.FileMode `mapstructure:"perms"` - Backup bool `mapstructure:"backup"` - LeftDelim string `mapstructure:"left_delimiter"` - RightDelim string `mapstructure:"right_delimiter"` - Wait *watch.Wait `mapstructure:"wait"` +func stringFromEnv(list ...string) *string { + for _, s := range list { + if v := os.Getenv(s); v != "" { + return String(strings.TrimSpace(v)) + } + } + return nil } -// VaultConfig is the configuration for connecting to a vault server. -type VaultConfig struct { - Address string `mapstructure:"address"` - Token string `mapstructure:"token" json:"-"` - UnwrapToken bool `mapstructure:"unwrap_token"` - RenewToken bool `mapstructure:"renew_token"` - - // SSL indicates we should use a secure connection while talking to Vault. - SSL *SSLConfig `mapstructure:"ssl"` +func antiboolFromEnv(s string) *bool { + if b := boolFromEnv(s); b != nil { + return Bool(!*b) + } + return nil } -// ParseConfigTemplate parses a string into a ConfigTemplate struct -func ParseConfigTemplate(s string) (*ConfigTemplate, error) { - if len(strings.TrimSpace(s)) < 1 { - return nil, errors.New("cannot specify empty template declaration") - } - - var source, destination, command string - parts := configTemplateRe.FindAllString(s, -1) - - switch len(parts) { - case 1: - source = parts[0] - case 2: - source, destination = parts[0], parts[1] - case 3: - source, destination, command = parts[0], parts[1], parts[2] - default: - return nil, errors.New("invalid template declaration format") - } - - return &ConfigTemplate{ - Source: source, - Destination: destination, - Command: command, - CommandTimeout: DefaultCommandTimeout, - Perms: DefaultFilePerms, - Wait: &watch.Wait{}, - }, nil +func boolFromEnv(s string) *bool { + if v := os.Getenv(s); v != "" { + b, err := strconv.ParseBool(v) + if err == nil { + return Bool(b) + } + } + return nil } // flattenKeys is a function that takes a map[string]interface{} and recursively @@ -886,10 +548,16 @@ func flattenKeys(m map[string]interface{}, keys []string) { keyMap[key] = struct{}{} } - var flatten func(map[string]interface{}) - flatten = func(m map[string]interface{}) { + var flatten func(map[string]interface{}, string) + flatten = func(m map[string]interface{}, parent string) { for k, v := range m { - if _, ok := keyMap[k]; !ok { + // Calculate the map key, since it could include a parent. + mapKey := k + if parent != "" { + mapKey = parent + "." + k + } + + if _, ok := keyMap[mapKey]; !ok { continue } @@ -897,13 +565,13 @@ func flattenKeys(m map[string]interface{}, keys []string) { case []map[string]interface{}: if len(typed) > 0 { last := typed[len(typed)-1] - flatten(last) + flatten(last, mapKey) m[k] = last } else { m[k] = nil } case map[string]interface{}: - flatten(typed) + flatten(typed, mapKey) m[k] = typed default: m[k] = v @@ -911,5 +579,5 @@ func flattenKeys(m map[string]interface{}, keys []string) { } } - flatten(m) + flatten(m, "") } diff --git a/vendor/github.com/hashicorp/consul-template/config/consul.go b/vendor/github.com/hashicorp/consul-template/config/consul.go new file mode 100644 index 000000000000..a4fd0caa9375 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/consul.go @@ -0,0 +1,150 @@ +package config + +import "fmt" + +// ConsulConfig contains the configurations options for connecting to a +// Consul cluster. +type ConsulConfig struct { + // Address is the address of the Consul server. It may be an IP or FQDN. + Address *string + + // Auth is the HTTP basic authentication for communicating with Consul. + Auth *AuthConfig `mapstructure:"auth"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to + // Consul. This requires Consul to be configured to serve HTTPS. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the token to communicate with Consul securely. + Token *string +} + +// DefaultConsulConfig returns a configuration that is populated with the +// default values. +func DefaultConsulConfig() *ConsulConfig { + return &ConsulConfig{ + Address: stringFromEnv("CONSUL_HTTP_ADDR"), + Auth: DefaultAuthConfig(), + Retry: DefaultRetryConfig(), + SSL: DefaultSSLConfig(), + Token: stringFromEnv("CONSUL_TOKEN", "CONSUL_HTTP_TOKEN"), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ConsulConfig) Copy() *ConsulConfig { + if c == nil { + return nil + } + + var o ConsulConfig + + o.Address = c.Address + + if c.Auth != nil { + o.Auth = c.Auth.Copy() + } + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ConsulConfig) Merge(o *ConsulConfig) *ConsulConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Auth != nil { + r.Auth = r.Auth.Merge(o.Auth) + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ConsulConfig) Finalize() { + if c.Address == nil { + c.Address = String("") + } + + if c.Auth == nil { + c.Auth = DefaultAuthConfig() + } + c.Auth.Finalize() + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + c.SSL.Finalize() + + if c.Token == nil { + c.Token = String("") + } +} + +// GoString defines the printable version of this struct. +func (c *ConsulConfig) GoString() string { + if c == nil { + return "(*ConsulConfig)(nil)" + } + + return fmt.Sprintf("&ConsulConfig{"+ + "Address:%s, "+ + "Auth:%#v, "+ + "Retry:%#v, "+ + "SSL:%#v, "+ + "Token:%t"+ + "}", + StringGoString(c.Address), + c.Auth, + c.Retry, + c.SSL, + StringPresent(c.Token), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/convert.go b/vendor/github.com/hashicorp/consul-template/config/convert.go new file mode 100644 index 000000000000..79b77e5a01ee --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/convert.go @@ -0,0 +1,197 @@ +package config + +import ( + "fmt" + "os" + "time" + + "github.com/hashicorp/consul-template/signals" +) + +// Bool returns a pointer to the given bool. +func Bool(b bool) *bool { + return &b +} + +// BoolVal returns the value of the boolean at the pointer, or false if the +// pointer is nil. +func BoolVal(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// BoolGoString returns the value of the boolean for printing in a string. +func BoolGoString(b *bool) string { + if b == nil { + return "(*bool)(nil)" + } + return fmt.Sprintf("%t", *b) +} + +// BoolPresent returns a boolean indiciating if the pointer is nil, or if the +// pointer is pointing to the zero value.. +func BoolPresent(b *bool) bool { + if b == nil { + return false + } + return true +} + +// FileMode returns a pointer to the given os.FileMode. +func FileMode(o os.FileMode) *os.FileMode { + return &o +} + +// FileModeVal returns the value of the os.FileMode at the pointer, or 0 if the +// pointer is nil. +func FileModeVal(o *os.FileMode) os.FileMode { + if o == nil { + return 0 + } + return *o +} + +// FileModeGoString returns the value of the os.FileMode for printing in a +// string. +func FileModeGoString(o *os.FileMode) string { + if o == nil { + return "(*os.FileMode)(nil)" + } + return fmt.Sprintf("%q", *o) +} + +// FileModePresent returns a boolean indiciating if the pointer is nil, or if +// the pointer is pointing to the zero value. +func FileModePresent(o *os.FileMode) bool { + if o == nil { + return false + } + return *o != 0 +} + +// Int returns a pointer to the given int. +func Int(i int) *int { + return &i +} + +// IntVal returns the value of the int at the pointer, or 0 if the pointer is +// nil. +func IntVal(i *int) int { + if i == nil { + return 0 + } + return *i +} + +// IntGoString returns the value of the int for printing in a string. +func IntGoString(i *int) string { + if i == nil { + return "(*int)(nil)" + } + return fmt.Sprintf("%d", *i) +} + +// IntPresent returns a boolean indiciating if the pointer is nil, or if the +// pointer is pointing to the zero value. +func IntPresent(i *int) bool { + if i == nil { + return false + } + return *i != 0 +} + +// Signal returns a pointer to the given os.Signal. +func Signal(s os.Signal) *os.Signal { + return &s +} + +// SignalVal returns the value of the os.Signal at the pointer, or 0 if the +// pointer is nil. +func SignalVal(s *os.Signal) os.Signal { + if s == nil { + return (os.Signal)(nil) + } + return *s +} + +// SignalGoString returns the value of the os.Signal for printing in a string. +func SignalGoString(s *os.Signal) string { + if s == nil { + return "(*os.Signal)(nil)" + } + if *s == nil { + return "" + } + return fmt.Sprintf("%q", *s) +} + +// SignalPresent returns a boolean indiciating if the pointer is nil, or if the pointer is pointing to the zero value.. +func SignalPresent(s *os.Signal) bool { + if s == nil { + return false + } + return *s != signals.SIGNIL +} + +// String returns a pointer to the given string. +func String(s string) *string { + return &s +} + +// StringVal returns the value of the string at the pointer, or "" if the +// pointer is nil. +func StringVal(s *string) string { + if s == nil { + return "" + } + return *s +} + +// StringGoString returns the value of the string for printing in a string. +func StringGoString(s *string) string { + if s == nil { + return "(*string)(nil)" + } + return fmt.Sprintf("%q", *s) +} + +// StringPresent returns a boolean indiciating if the pointer is nil, or if the pointer is pointing to the zero value.. +func StringPresent(s *string) bool { + if s == nil { + return false + } + return *s != "" +} + +// TimeDuration returns a pointer to the given time.Duration. +func TimeDuration(t time.Duration) *time.Duration { + return &t +} + +// TimeDurationVal returns the value of the string at the pointer, or 0 if the +// pointer is nil. +func TimeDurationVal(t *time.Duration) time.Duration { + if t == nil { + return time.Duration(0) + } + return *t +} + +// TimeDurationGoString returns the value of the time.Duration for printing in a +// string. +func TimeDurationGoString(t *time.Duration) string { + if t == nil { + return "(*time.Duration)(nil)" + } + return fmt.Sprintf("%s", t) +} + +// TimeDurationPresent returns a boolean indiciating if the pointer is nil, or if the pointer is pointing to the zero value.. +func TimeDurationPresent(t *time.Duration) bool { + if t == nil { + return false + } + return *t != 0 +} diff --git a/vendor/github.com/hashicorp/consul-template/config/dedup.go b/vendor/github.com/hashicorp/consul-template/config/dedup.go new file mode 100644 index 000000000000..247855a938e3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/dedup.go @@ -0,0 +1,132 @@ +package config + +import ( + "fmt" + "time" +) + +const ( + // DefaultDedupPrefix is the default prefix used for deduplication mode. + DefaultDedupPrefix = "consul-template/dedup/" + + // DefaultDedupTTL is the default TTL for deduplicate mode. + DefaultDedupTTL = 15 * time.Second + + // DefaultDedupMaxStale is the default max staleness for the deduplication + // manager. + DefaultDedupMaxStale = DefaultMaxStale +) + +// DedupConfig is used to enable the de-duplication mode, which depends +// on electing a leader per-template and watching of a key. This is used +// to reduce the cost of many instances of CT running the same template. +type DedupConfig struct { + // Controls if deduplication mode is enabled + Enabled *bool `mapstructure:"enabled"` + + // MaxStale is the maximum amount of time to allow for stale queries. + MaxStale *time.Duration `mapstructure:"max_stale"` + + // Controls the KV prefix used. Defaults to defaultDedupPrefix + Prefix *string `mapstructure:"prefix"` + + // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. + TTL *time.Duration `mapstructure:"ttl"` +} + +// DefaultDedupConfig returns a configuration that is populated with the +// default values. +func DefaultDedupConfig() *DedupConfig { + return &DedupConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *DedupConfig) Copy() *DedupConfig { + if c == nil { + return nil + } + + var o DedupConfig + o.Enabled = c.Enabled + o.MaxStale = c.MaxStale + o.Prefix = c.Prefix + o.TTL = c.TTL + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *DedupConfig) Merge(o *DedupConfig) *DedupConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.MaxStale != nil { + r.MaxStale = o.MaxStale + } + + if o.Prefix != nil { + r.Prefix = o.Prefix + } + + if o.TTL != nil { + r.TTL = o.TTL + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *DedupConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + TimeDurationPresent(c.MaxStale) || + StringPresent(c.Prefix) || + TimeDurationPresent(c.TTL)) + } + + if c.MaxStale == nil { + c.MaxStale = TimeDuration(DefaultDedupMaxStale) + } + + if c.Prefix == nil { + c.Prefix = String(DefaultDedupPrefix) + } + + if c.TTL == nil { + c.TTL = TimeDuration(DefaultDedupTTL) + } +} + +// GoString defines the printable version of this struct. +func (c *DedupConfig) GoString() string { + if c == nil { + return "(*DedupConfig)(nil)" + } + return fmt.Sprintf("&DedupConfig{"+ + "Enabled:%s, "+ + "MaxStale:%s, "+ + "Prefix:%s, "+ + "TTL:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.MaxStale), + StringGoString(c.Prefix), + TimeDurationGoString(c.TTL), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/env.go b/vendor/github.com/hashicorp/consul-template/config/env.go new file mode 100644 index 000000000000..fad2dd72fda5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/env.go @@ -0,0 +1,209 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// EnvConfig is an embeddable struct for things that accept environment +// variable filtering. You should not use this directly and it is only public +// for mapstructure's decoding. +type EnvConfig struct { + // BlacklistEnv specifies a list of environment variables to explicitly + // disclude from the list of environment variables populated to the child. + // If both WhitelistEnv and BlacklistEnv are provided, BlacklistEnv takes + // precedence over the values in WhitelistEnv. + Blacklist []string `mapstructure:"blacklist"` + + // CustomEnv specifies custom environment variables to pass to the child + // process. These are provided programatically, override any environment + // variables of the same name, are ignored from whitelist/blacklist, and + // are still included even if PristineEnv is set to true. + Custom []string `mapstructure:"custom"` + + // PristineEnv specifies if the child process should inherit the parent's + // environment. + Pristine *bool `mapstructure:"pristine"` + + // WhitelistEnv specifies a list of environment variables to exclusively + // include in the list of environment variables populated to the child. + Whitelist []string `mapstructure:"whitelist"` +} + +// DefaultEnvConfig returns a configuration that is populated with the +// default values. +func DefaultEnvConfig() *EnvConfig { + return &EnvConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *EnvConfig) Copy() *EnvConfig { + if c == nil { + return nil + } + + var o EnvConfig + + if c.Blacklist != nil { + o.Blacklist = append([]string{}, c.Blacklist...) + } + + if c.Custom != nil { + o.Custom = append([]string{}, c.Custom...) + } + + o.Pristine = c.Pristine + + if c.Whitelist != nil { + o.Whitelist = append([]string{}, c.Whitelist...) + } + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *EnvConfig) Merge(o *EnvConfig) *EnvConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Blacklist != nil { + r.Blacklist = append(r.Blacklist, o.Blacklist...) + } + + if o.Custom != nil { + r.Custom = append(r.Custom, o.Custom...) + } + + if o.Pristine != nil { + r.Pristine = o.Pristine + } + + if o.Whitelist != nil { + r.Whitelist = append(r.Whitelist, o.Whitelist...) + } + + return r +} + +// Env calculates and returns the finalized environment for this exec +// configuration. It takes into account pristine, custom environment, whitelist, +// and blacklist values. +func (c *EnvConfig) Env() []string { + // In pristine mode, just return the custom environment. If the user did not + // specify a custom environment, just return the empty slice to force an + // empty environment. We cannot return nil here because the later call to + // os/exec will think we want to inherit the parent. + if BoolVal(c.Pristine) { + if len(c.Custom) > 0 { + return c.Custom + } + return []string{} + } + + // Pull all the key-value pairs out of the environment + environ := os.Environ() + keys := make([]string, len(environ)) + env := make(map[string]string, len(environ)) + for i, v := range environ { + list := strings.SplitN(v, "=", 2) + keys[i] = list[0] + env[list[0]] = list[1] + } + + // anyGlobMatch is a helper function which checks if any of the given globs + // match the string. + anyGlobMatch := func(s string, patterns []string) bool { + for _, pattern := range patterns { + if matched, _ := filepath.Match(pattern, s); matched { + return true + } + } + return false + } + + // Pull out any envvars that match the whitelist. + if len(c.Whitelist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if anyGlobMatch(k, c.Whitelist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Remove any envvars that match the blacklist. + if len(c.Blacklist) > 0 { + newKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if !anyGlobMatch(k, c.Blacklist) { + newKeys = append(newKeys, k) + } + } + keys = newKeys + } + + // Build the final list using only the filtered keys. + finalEnv := make([]string, 0, len(keys)+len(c.Custom)) + for _, k := range keys { + finalEnv = append(finalEnv, k+"="+env[k]) + } + + // Append remaining custom environment. + finalEnv = append(finalEnv, c.Custom...) + + return finalEnv +} + +// Finalize ensures there no nil pointers. +func (c *EnvConfig) Finalize() { + if c.Blacklist == nil { + c.Blacklist = []string{} + } + + if c.Custom == nil { + c.Custom = []string{} + } + + if c.Pristine == nil { + c.Pristine = Bool(false) + } + + if c.Whitelist == nil { + c.Whitelist = []string{} + } +} + +// GoString defines the printable version of this struct. +func (c *EnvConfig) GoString() string { + if c == nil { + return "(*EnvConfig)(nil)" + } + + return fmt.Sprintf("&EnvConfig{"+ + "Blacklist:%v, "+ + "Custom:%v, "+ + "Pristine:%s, "+ + "Whitelist:%v"+ + "}", + c.Blacklist, + c.Custom, + BoolGoString(c.Pristine), + c.Whitelist, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/exec.go b/vendor/github.com/hashicorp/consul-template/config/exec.go new file mode 100644 index 000000000000..22c7070a4665 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/exec.go @@ -0,0 +1,216 @@ +package config + +import ( + "fmt" + "os" + "syscall" + "time" +) + +const ( + // DefaultExecKillSignal is the default signal to send to the process to + // tell it to gracefully terminate. + DefaultExecKillSignal = syscall.SIGINT + + // DefaultExecKillTimeout is the maximum amount of time to wait for the + // process to gracefully terminate before force-killing it. + DefaultExecKillTimeout = 30 * time.Second + + // DefaultExecTimeout is the default amount of time to wait for a + // command to exit. By default, this is disabled, which means the command + // is allowed to run for an infinite amount of time. + DefaultExecTimeout = 0 * time.Second +) + +var ( + // DefaultExecReloadSignal is the default signal to send to the process to + // tell it to reload its configuration. + DefaultExecReloadSignal = (os.Signal)(nil) +) + +// ExecConfig is used to configure the application when it runs in +// exec/supervise mode. +type ExecConfig struct { + // Command is the command to execute and watch as a child process. + Command *string `mapstructure:"command"` + + // Enabled controls if this exec is enabled. + Enabled *bool `mapstructure:"enabled"` + + // EnvConfig is the environmental customizations. + Env *EnvConfig `mapstructure:"env"` + + // KillSignal is the signal to send to the command to kill it gracefully. The + // default value is "SIGTERM". + KillSignal *os.Signal `mapstructure:"kill_signal"` + + // KillTimeout is the amount of time to give the process to cleanup before + // hard-killing it. + KillTimeout *time.Duration `mapstructure:"kill_timeout"` + + // ReloadSignal is the signal to send to the child process when a template + // changes. This tells the child process that templates have + ReloadSignal *os.Signal `mapstructure:"reload_signal"` + + // Splay is the maximum amount of random time to wait to signal or kill the + // process. By default this is disabled, but it can be set to low values to + // reduce the "thundering herd" problem where all tasks are restarted at once. + Splay *time.Duration `mapstructure:"splay"` + + // Timeout is the maximum amount of time to wait for a command to complete. + // By default, this is 0, which means "wait forever". + Timeout *time.Duration `mapstructure:"timeout"` +} + +// DefaultExecConfig returns a configuration that is populated with the +// default values. +func DefaultExecConfig() *ExecConfig { + return &ExecConfig{ + Env: DefaultEnvConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *ExecConfig) Copy() *ExecConfig { + if c == nil { + return nil + } + + var o ExecConfig + + o.Command = c.Command + + o.Enabled = c.Enabled + + if c.Env != nil { + o.Env = c.Env.Copy() + } + + o.KillSignal = c.KillSignal + + o.KillTimeout = c.KillTimeout + + o.ReloadSignal = c.ReloadSignal + + o.Splay = c.Splay + + o.Timeout = c.Timeout + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *ExecConfig) Merge(o *ExecConfig) *ExecConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Command != nil { + r.Command = o.Command + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Env != nil { + r.Env = r.Env.Merge(o.Env) + } + + if o.KillSignal != nil { + r.KillSignal = o.KillSignal + } + + if o.KillTimeout != nil { + r.KillTimeout = o.KillTimeout + } + + if o.ReloadSignal != nil { + r.ReloadSignal = o.ReloadSignal + } + + if o.Splay != nil { + r.Splay = o.Splay + } + + if o.Timeout != nil { + r.Timeout = o.Timeout + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *ExecConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Command)) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.Env == nil { + c.Env = DefaultEnvConfig() + } + c.Env.Finalize() + + if c.KillSignal == nil { + c.KillSignal = Signal(DefaultExecKillSignal) + } + + if c.KillTimeout == nil { + c.KillTimeout = TimeDuration(DefaultExecKillTimeout) + } + + if c.ReloadSignal == nil { + c.ReloadSignal = Signal(DefaultExecReloadSignal) + } + + if c.Splay == nil { + c.Splay = TimeDuration(0 * time.Second) + } + + if c.Timeout == nil { + c.Timeout = TimeDuration(DefaultExecTimeout) + } +} + +// GoString defines the printable version of this struct. +func (c *ExecConfig) GoString() string { + if c == nil { + return "(*ExecConfig)(nil)" + } + + return fmt.Sprintf("&ExecConfig{"+ + "Command:%s, "+ + "Enabled:%s, "+ + "Env:%#v, "+ + "KillSignal:%s, "+ + "KillTimeout:%s, "+ + "ReloadSignal:%s, "+ + "Splay:%s, "+ + "Timeout:%s"+ + "}", + StringGoString(c.Command), + BoolGoString(c.Enabled), + c.Env, + SignalGoString(c.KillSignal), + TimeDurationGoString(c.KillTimeout), + SignalGoString(c.ReloadSignal), + TimeDurationGoString(c.Splay), + TimeDurationGoString(c.Timeout), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go index e788a0b67f7c..64ea53933dc7 100644 --- a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go +++ b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go @@ -1,6 +1,7 @@ package config import ( + "log" "os" "reflect" "strconv" @@ -31,3 +32,44 @@ func StringToFileModeFunc() mapstructure.DecodeHookFunc { return os.FileMode(v), nil } } + +// StringToWaitDurationHookFunc returns a function that converts strings to wait +// value. This is designed to be used with mapstructure for parsing out a wait +// value. +func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(WaitConfig{}) { + return data, nil + } + + // Convert it by parsing + return ParseWaitConfig(data.(string)) + } +} + +// ConsulStringToStructFunc checks if the value set for the key should actually +// be a struct and sets the appropriate value in the struct. This is for +// backwards-compatability with older versions of Consul Template. +func ConsulStringToStructFunc() mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if t == reflect.TypeOf(ConsulConfig{}) && f.Kind() == reflect.String { + log.Println("[WARN] consul now accepts a stanza instead of a string. " + + "Update your configuration files and change consul = \"\" to " + + "consul { } instead.") + return &ConsulConfig{ + Address: String(data.(string)), + }, nil + } + + return data, nil + } +} diff --git a/vendor/github.com/hashicorp/consul-template/config/retry.go b/vendor/github.com/hashicorp/consul-template/config/retry.go new file mode 100644 index 000000000000..57c39f57d0f1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/retry.go @@ -0,0 +1,140 @@ +package config + +import ( + "fmt" + "math" + "time" +) + +const ( + // DefaultRetryAttempts is the default number of maximum retry attempts. + DefaultRetryAttempts = 5 + + // DefaultRetryBackoff is the default base for the exponential backoff + // algorithm. + DefaultRetryBackoff = 250 * time.Millisecond +) + +// RetryFunc is the signature of a function that supports retries. +type RetryFunc func(int) (bool, time.Duration) + +// RetryConfig is a shared configuration for upstreams that support retires on +// failure. +type RetryConfig struct { + // Attempts is the total number of maximum attempts to retry before letting + // the error fall through. + Attempts *int + + // Backoff is the base of the exponentialbackoff. This number will be + // multipled by the next power of 2 on each iteration. + Backoff *time.Duration + + // Enabled signals if this retry is enabled. + Enabled *bool +} + +// DefaultRetryConfig returns a configuration that is populated with the +// default values. +func DefaultRetryConfig() *RetryConfig { + return &RetryConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *RetryConfig) Copy() *RetryConfig { + if c == nil { + return nil + } + + var o RetryConfig + + o.Attempts = c.Attempts + + o.Backoff = c.Backoff + + o.Enabled = c.Enabled + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Attempts != nil { + r.Attempts = o.Attempts + } + + if o.Backoff != nil { + r.Backoff = o.Backoff + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + return r +} + +// RetryFunc returns the retry function associated with this configuration. +func (c *RetryConfig) RetryFunc() RetryFunc { + return func(retry int) (bool, time.Duration) { + if !BoolVal(c.Enabled) { + return false, 0 + } + + if IntVal(c.Attempts) > 0 && retry > IntVal(c.Attempts)-1 { + return false, 0 + } + + base := math.Pow(2, float64(retry)) + sleep := time.Duration(base) * TimeDurationVal(c.Backoff) + + return true, sleep + } +} + +// Finalize ensures there no nil pointers. +func (c *RetryConfig) Finalize() { + if c.Attempts == nil { + c.Attempts = Int(DefaultRetryAttempts) + } + + if c.Backoff == nil { + c.Backoff = TimeDuration(DefaultRetryBackoff) + } + + if c.Enabled == nil { + c.Enabled = Bool(true) + } +} + +// GoString defines the printable version of this struct. +func (c *RetryConfig) GoString() string { + if c == nil { + return "(*RetryConfig)(nil)" + } + + return fmt.Sprintf("&RetryConfig{"+ + "Attempts:%s, "+ + "Backoff:%s, "+ + "Enabled:%s"+ + "}", + IntGoString(c.Attempts), + TimeDurationGoString(c.Backoff), + BoolGoString(c.Enabled), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/ssl.go b/vendor/github.com/hashicorp/consul-template/config/ssl.go new file mode 100644 index 000000000000..ab3b77e614b0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/ssl.go @@ -0,0 +1,153 @@ +package config + +import "fmt" + +const ( + // DefaultSSLVerify is the default value for SSL verification. + DefaultSSLVerify = true +) + +// SSLConfig is the configuration for SSL. +type SSLConfig struct { + CaCert *string `mapstructure:"ca_cert"` + CaPath *string `mapstructure:"ca_path"` + Cert *string `mapstructure:"cert"` + Enabled *bool `mapstructure:"enabled"` + Key *string `mapstructure:"key"` + ServerName *string `mapstructure:"server_name"` + Verify *bool `mapstructure:"verify"` +} + +// DefaultSSLConfig returns a configuration that is populated with the +// default values. +func DefaultSSLConfig() *SSLConfig { + return &SSLConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SSLConfig) Copy() *SSLConfig { + if c == nil { + return nil + } + + var o SSLConfig + o.CaCert = c.CaCert + o.CaPath = c.CaPath + o.Cert = c.Cert + o.Enabled = c.Enabled + o.Key = c.Key + o.ServerName = c.ServerName + o.Verify = c.Verify + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SSLConfig) Merge(o *SSLConfig) *SSLConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Cert != nil { + r.Cert = o.Cert + } + + if o.CaCert != nil { + r.CaCert = o.CaCert + } + + if o.CaPath != nil { + r.CaPath = o.CaPath + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Key != nil { + r.Key = o.Key + } + + if o.ServerName != nil { + r.ServerName = o.ServerName + } + + if o.Verify != nil { + r.Verify = o.Verify + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SSLConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(false || + StringPresent(c.Cert) || + StringPresent(c.CaCert) || + StringPresent(c.CaPath) || + StringPresent(c.Key) || + StringPresent(c.ServerName) || + BoolPresent(c.Verify)) + } + + if c.Cert == nil { + c.Cert = String("") + } + + if c.CaCert == nil { + c.CaCert = String("") + } + + if c.CaPath == nil { + c.CaPath = String("") + } + + if c.Key == nil { + c.Key = String("") + } + + if c.ServerName == nil { + c.ServerName = String("") + } + + if c.Verify == nil { + c.Verify = Bool(DefaultSSLVerify) + } +} + +// GoString defines the printable version of this struct. +func (c *SSLConfig) GoString() string { + if c == nil { + return "(*SSLConfig)(nil)" + } + + return fmt.Sprintf("&SSLConfig{"+ + "CaCert:%s, "+ + "CaPath:%s, "+ + "Cert:%s, "+ + "Enabled:%s, "+ + "Key:%s, "+ + "ServerName:%s, "+ + "Verify:%s"+ + "}", + StringGoString(c.CaCert), + StringGoString(c.CaPath), + StringGoString(c.Cert), + BoolGoString(c.Enabled), + StringGoString(c.Key), + StringGoString(c.ServerName), + BoolGoString(c.Verify), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/syslog.go b/vendor/github.com/hashicorp/consul-template/config/syslog.go new file mode 100644 index 000000000000..0de67199d7d2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/syslog.go @@ -0,0 +1,87 @@ +package config + +import "fmt" + +const ( + // DefaultSyslogFacility is the default facility to log to. + DefaultSyslogFacility = "LOCAL0" +) + +// SyslogConfig is the configuration for syslog. +type SyslogConfig struct { + Enabled *bool `mapstructure:"enabled"` + Facility *string `mapstructure:"facility"` +} + +// DefaultSyslogConfig returns a configuration that is populated with the +// default values. +func DefaultSyslogConfig() *SyslogConfig { + return &SyslogConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *SyslogConfig) Copy() *SyslogConfig { + if c == nil { + return nil + } + + var o SyslogConfig + o.Enabled = c.Enabled + o.Facility = c.Facility + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *SyslogConfig) Merge(o *SyslogConfig) *SyslogConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Facility != nil { + r.Facility = o.Facility + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *SyslogConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Facility)) + } + + if c.Facility == nil { + c.Facility = String(DefaultSyslogFacility) + } +} + +// GoString defines the printable version of this struct. +func (c *SyslogConfig) GoString() string { + if c == nil { + return "(*SyslogConfig)(nil)" + } + + return fmt.Sprintf("&SyslogConfig{"+ + "Enabled:%s, "+ + "Facility:%s"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Facility), + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/template.go b/vendor/github.com/hashicorp/consul-template/config/template.go new file mode 100644 index 000000000000..13ff1409c2d2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/template.go @@ -0,0 +1,405 @@ +package config + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + "time" +) + +const ( + // DefaultTemplateFilePerms are the default file permissions for templates + // rendered onto disk when a specific file permission has not already been + // specified. + DefaultTemplateFilePerms = 0644 + + // DefaultTemplateCommandTimeout is the amount of time to wait for a command + // to return. + DefaultTemplateCommandTimeout = 30 * time.Second +) + +var ( + // ErrTemplateStringEmpty is the error returned with the template contents + // are empty. + ErrTemplateStringEmpty = errors.New("template: cannot be empty") + + // ErrTemplateInvalidFormat is the error returned with the template is not + // a valid format. + ErrTemplateInvalidFormat = errors.New("template: invalid format") + + // configTemplateRe is the pattern to split the config template syntax. + configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") +) + +// TemplateConfig is a representation of a template on disk, as well as the +// associated commands and reload instructions. +type TemplateConfig struct { + // Backup determines if this template should retain a backup. The default + // value is false. + Backup *bool `mapstructure:"backup"` + + // Command is the arbitrary command to execute after a template has + // successfully rendered. This is DEPRECATED. Use Exec instead. + Command *string `mapstructure:"command"` + + // CommandTimeout is the amount of time to wait for the command to finish + // before force-killing it. This is DEPRECATED. Use Exec instead. + CommandTimeout *time.Duration `mapstructure:"command_timeout"` + + // Contents are the raw template contents to evaluate. Either this or Source + // must be specified, but not both. + Contents *string `mapstructure:"contents"` + + // Destination is the location on disk where the template should be rendered. + // This is required unless running in debug/dry mode. + Destination *string `mapstructure:"destination"` + + // Exec is the configuration for the command to run when the template renders + // successfully. + Exec *ExecConfig `mapstructure:"exec"` + + // Perms are the file system permissions to use when creating the file on + // disk. This is useful for when files contain sensitive information, such as + // secrets from Vault. + Perms *os.FileMode `mapstructure:"perms"` + + // Source is the path on disk to the template contents to evaluate. Either + // this or Contents should be specified, but not both. + Source *string `mapstructure:"source"` + + // Wait configures per-template quiescence timers. + Wait *WaitConfig `mapstructure:"wait"` + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim *string `mapstructure:"left_delimiter"` + RightDelim *string `mapstructure:"right_delimiter"` +} + +// DefaultTemplateConfig returns a configuration that is populated with the +// default values. +func DefaultTemplateConfig() *TemplateConfig { + return &TemplateConfig{ + Exec: DefaultExecConfig(), + Wait: DefaultWaitConfig(), + } +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfig) Copy() *TemplateConfig { + if c == nil { + return nil + } + + var o TemplateConfig + + o.Backup = c.Backup + + o.Command = c.Command + + o.CommandTimeout = c.CommandTimeout + + o.Contents = c.Contents + + o.Destination = c.Destination + + if c.Exec != nil { + o.Exec = c.Exec.Copy() + } + + o.Perms = c.Perms + + o.Source = c.Source + + if c.Wait != nil { + o.Wait = c.Wait.Copy() + } + + o.LeftDelim = c.LeftDelim + o.RightDelim = c.RightDelim + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfig) Merge(o *TemplateConfig) *TemplateConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Backup != nil { + r.Backup = o.Backup + } + + if o.Command != nil { + r.Command = o.Command + } + + if o.CommandTimeout != nil { + r.CommandTimeout = o.CommandTimeout + } + + if o.Contents != nil { + r.Contents = o.Contents + } + + if o.Destination != nil { + r.Destination = o.Destination + } + + if o.Exec != nil { + r.Exec = r.Exec.Merge(o.Exec) + } + + if o.Perms != nil { + r.Perms = o.Perms + } + + if o.Source != nil { + r.Source = o.Source + } + + if o.Wait != nil { + r.Wait = r.Wait.Merge(o.Wait) + } + + if o.LeftDelim != nil { + r.LeftDelim = o.LeftDelim + } + + if o.RightDelim != nil { + r.RightDelim = o.RightDelim + } + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfig) Finalize() { + if c.Backup == nil { + c.Backup = Bool(false) + } + + if c.Command == nil { + c.Command = String("") + } + + if c.CommandTimeout == nil { + c.CommandTimeout = TimeDuration(DefaultTemplateCommandTimeout) + } + + if c.Contents == nil { + c.Contents = String("") + } + + if c.Destination == nil { + c.Destination = String("") + } + + if c.Exec == nil { + c.Exec = DefaultExecConfig() + } + + // Backwards compat for specifying command directly + if c.Exec.Command == nil && c.Command != nil { + c.Exec.Command = c.Command + } + if c.Exec.Timeout == nil && c.CommandTimeout != nil { + c.Exec.Timeout = c.CommandTimeout + } + c.Exec.Finalize() + + if c.Perms == nil { + c.Perms = FileMode(DefaultTemplateFilePerms) + } + + if c.Source == nil { + c.Source = String("") + } + + if c.Wait == nil { + c.Wait = DefaultWaitConfig() + } + c.Wait.Finalize() + + if c.LeftDelim == nil { + c.LeftDelim = String("") + } + + if c.RightDelim == nil { + c.RightDelim = String("") + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfig) GoString() string { + if c == nil { + return "(*TemplateConfig)(nil)" + } + + return fmt.Sprintf("&TemplateConfig{"+ + "Backup:%s, "+ + "Command:%s, "+ + "CommandTimeout:%s, "+ + "Contents:%s, "+ + "Destination:%s, "+ + "Exec:%#v, "+ + "Perms:%s, "+ + "Source:%s, "+ + "Wait:%#v, "+ + "LeftDelim:%s, "+ + "RightDelim:%s"+ + "}", + BoolGoString(c.Backup), + StringGoString(c.Command), + TimeDurationGoString(c.CommandTimeout), + StringGoString(c.Contents), + StringGoString(c.Destination), + c.Exec, + FileModeGoString(c.Perms), + StringGoString(c.Source), + c.Wait, + StringGoString(c.LeftDelim), + StringGoString(c.RightDelim), + ) +} + +// Display is the human-friendly form of this configuration. It tries to +// describe this template in as much detail as possible in a single line, so +// log consumers can uniquely identify it. +func (c *TemplateConfig) Display() string { + if c == nil { + return "" + } + + source := c.Source + if StringPresent(c.Contents) { + source = String("(dynamic)") + } + + return fmt.Sprintf("%q => %q", + StringVal(source), + StringVal(c.Destination), + ) +} + +// TemplateConfigs is a collection of TemplateConfigs +type TemplateConfigs []*TemplateConfig + +// DefaultTemplateConfigs returns a configuration that is populated with the +// default values. +func DefaultTemplateConfigs() *TemplateConfigs { + return &TemplateConfigs{} +} + +// Copy returns a deep copy of this configuration. +func (c *TemplateConfigs) Copy() *TemplateConfigs { + o := make(TemplateConfigs, len(*c)) + for i, t := range *c { + o[i] = t.Copy() + } + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *TemplateConfigs) Merge(o *TemplateConfigs) *TemplateConfigs { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + *r = append(*r, *o...) + + return r +} + +// Finalize ensures the configuration has no nil pointers and sets default +// values. +func (c *TemplateConfigs) Finalize() { + if c == nil { + *c = *DefaultTemplateConfigs() + } + + for _, t := range *c { + t.Finalize() + } +} + +// GoString defines the printable version of this struct. +func (c *TemplateConfigs) GoString() string { + if c == nil { + return "(*TemplateConfigs)(nil)" + } + + s := make([]string, len(*c)) + for i, t := range *c { + s[i] = t.GoString() + } + + return "{" + strings.Join(s, ", ") + "}" +} + +// ParseTemplateConfig parses a string in the form source:destination:command +// into a TemplateConfig. +func ParseTemplateConfig(s string) (*TemplateConfig, error) { + if len(strings.TrimSpace(s)) < 1 { + return nil, ErrTemplateStringEmpty + } + + var source, destination, command string + parts := configTemplateRe.FindAllString(s, -1) + + switch len(parts) { + case 1: + source = parts[0] + case 2: + source, destination = parts[0], parts[1] + case 3: + source, destination, command = parts[0], parts[1], parts[2] + default: + return nil, ErrTemplateInvalidFormat + } + + var sourcePtr, destinationPtr, commandPtr *string + if source != "" { + sourcePtr = String(source) + } + if destination != "" { + destinationPtr = String(destination) + } + if command != "" { + commandPtr = String(command) + } + + return &TemplateConfig{ + Source: sourcePtr, + Destination: destinationPtr, + Command: commandPtr, + }, nil +} diff --git a/vendor/github.com/hashicorp/consul-template/config/vault.go b/vendor/github.com/hashicorp/consul-template/config/vault.go new file mode 100644 index 000000000000..e6ed33ea5d4e --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/vault.go @@ -0,0 +1,212 @@ +package config + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/api" +) + +const ( + // DefaultVaultRenewToken is the default value for if the Vault token should + // be renewed. + DefaultVaultRenewToken = true + + // DefaultVaultUnwrapToken is the default value for if the Vault token should + // be unwrapped. + DefaultVaultUnwrapToken = false + + // DefaultVaultRetryBase is the default value for the base time to use for + // exponential backoff. + DefaultVaultRetryBase = 250 * time.Millisecond + + // DefaultVaultRetryMaxAttempts is the default maximum number of attempts to + // retry before quitting. + DefaultVaultRetryMaxAttempts = 5 +) + +// VaultConfig is the configuration for connecting to a vault server. +type VaultConfig struct { + // Address is the URI to the Vault server. + Address *string `mapstructure:"address"` + + // Enabled controls whether the Vault integration is active. + Enabled *bool `mapstructure:"enabled"` + + // RenewToken renews the Vault token. + RenewToken *bool `mapstructure:"renew_token"` + + // Retry is the configuration for specifying how to behave on failure. + Retry *RetryConfig `mapstructure:"retry"` + + // SSL indicates we should use a secure connection while talking to Vault. + SSL *SSLConfig `mapstructure:"ssl"` + + // Token is the Vault token to communicate with for requests. It may be + // a wrapped token or a real token. This can also be set via the VAULT_TOKEN + // environment variable. + Token *string `mapstructure:"token" json:"-"` + + // UnwrapToken unwraps the provided Vault token as a wrapped token. + UnwrapToken *bool `mapstructure:"unwrap_token"` +} + +// DefaultVaultConfig returns a configuration that is populated with the +// default values. +func DefaultVaultConfig() *VaultConfig { + v := &VaultConfig{ + Address: stringFromEnv(api.EnvVaultAddress), + RenewToken: boolFromEnv("VAULT_RENEW_TOKEN"), + UnwrapToken: boolFromEnv("VAULT_UNWRAP_TOKEN"), + Retry: DefaultRetryConfig(), + SSL: &SSLConfig{ + CaCert: stringFromEnv(api.EnvVaultCACert), + CaPath: stringFromEnv(api.EnvVaultCAPath), + Cert: stringFromEnv(api.EnvVaultClientCert), + Key: stringFromEnv(api.EnvVaultClientKey), + ServerName: stringFromEnv(api.EnvVaultTLSServerName), + Verify: antiboolFromEnv(api.EnvVaultInsecure), + }, + Token: stringFromEnv("VAULT_TOKEN"), + } + + // Force SSL when communicating with Vault. + v.SSL.Enabled = Bool(true) + + return v +} + +// Copy returns a deep copy of this configuration. +func (c *VaultConfig) Copy() *VaultConfig { + if c == nil { + return nil + } + + var o VaultConfig + o.Address = c.Address + + o.Enabled = c.Enabled + + o.RenewToken = c.RenewToken + + if c.Retry != nil { + o.Retry = c.Retry.Copy() + } + + if c.SSL != nil { + o.SSL = c.SSL.Copy() + } + + o.Token = c.Token + + o.UnwrapToken = c.UnwrapToken + + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Address != nil { + r.Address = o.Address + } + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.RenewToken != nil { + r.RenewToken = o.RenewToken + } + + if o.Retry != nil { + r.Retry = r.Retry.Merge(o.Retry) + } + + if o.SSL != nil { + r.SSL = r.SSL.Merge(o.SSL) + } + + if o.Token != nil { + r.Token = o.Token + } + + if o.UnwrapToken != nil { + r.UnwrapToken = o.UnwrapToken + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *VaultConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(StringPresent(c.Address)) + } + + if c.Address == nil { + c.Address = String("") + } + + if c.RenewToken == nil { + c.RenewToken = Bool(DefaultVaultRenewToken) + } + + if c.Retry == nil { + c.Retry = DefaultRetryConfig() + } + c.Retry.Finalize() + + if c.SSL == nil { + c.SSL = DefaultSSLConfig() + } + c.SSL.Finalize() + + if c.Token == nil { + c.Token = String("") + } + + if c.UnwrapToken == nil { + c.UnwrapToken = Bool(DefaultVaultUnwrapToken) + } +} + +// GoString defines the printable version of this struct. +func (c *VaultConfig) GoString() string { + if c == nil { + return "(*VaultConfig)(nil)" + } + + return fmt.Sprintf("&VaultConfig{"+ + "Enabled:%s, "+ + "Address:%s, "+ + "Token:%t, "+ + "UnwrapToken:%s, "+ + "RenewToken:%s, "+ + "Retry:%#v, "+ + "SSL:%#v"+ + "}", + BoolGoString(c.Enabled), + StringGoString(c.Address), + StringPresent(c.Token), + BoolGoString(c.UnwrapToken), + BoolGoString(c.RenewToken), + c.Retry, + c.SSL, + ) +} diff --git a/vendor/github.com/hashicorp/consul-template/config/wait.go b/vendor/github.com/hashicorp/consul-template/config/wait.go new file mode 100644 index 000000000000..8e3d56c19683 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/config/wait.go @@ -0,0 +1,191 @@ +package config + +import ( + "errors" + "fmt" + "strings" + "time" +) + +var ( + // ErrWaitStringEmpty is the error returned when wait is specified as an empty + // string. + ErrWaitStringEmpty = errors.New("wait: cannot be empty") + + // ErrWaitInvalidFormat is the error returned when the wait is specified + // incorrectly. + ErrWaitInvalidFormat = errors.New("wait: invalid format") + + // ErrWaitNegative is the error returned with the wait is negative. + ErrWaitNegative = errors.New("wait: cannot be negative") + + // ErrWaitMinLTMax is the error returned with the minimum wait time is not + // less than the maximum wait time. + ErrWaitMinLTMax = errors.New("wait: min must be less than max") +) + +// WaitConfig is the Min/Max duration used by the Watcher +type WaitConfig struct { + // Enabled determines if this wait is enabled. + Enabled *bool `mapstructure:"bool"` + + // Min and Max are the minimum and maximum time, respectively, to wait for + // data changes before rendering a new template to disk. + Min *time.Duration `mapstructure:"min"` + Max *time.Duration `mapstructure:"max"` +} + +// DefaultWaitConfig is the default configuration. +func DefaultWaitConfig() *WaitConfig { + return &WaitConfig{} +} + +// Copy returns a deep copy of this configuration. +func (c *WaitConfig) Copy() *WaitConfig { + if c == nil { + return nil + } + + var o WaitConfig + o.Enabled = c.Enabled + o.Min = c.Min + o.Max = c.Max + return &o +} + +// Merge combines all values in this configuration with the values in the other +// configuration, with values in the other configuration taking precedence. +// Maps and slices are merged, most other values are overwritten. Complex +// structs define their own merge functionality. +func (c *WaitConfig) Merge(o *WaitConfig) *WaitConfig { + if c == nil { + if o == nil { + return nil + } + return o.Copy() + } + + if o == nil { + return c.Copy() + } + + r := c.Copy() + + if o.Enabled != nil { + r.Enabled = o.Enabled + } + + if o.Min != nil { + r.Min = o.Min + } + + if o.Max != nil { + r.Max = o.Max + } + + return r +} + +// Finalize ensures there no nil pointers. +func (c *WaitConfig) Finalize() { + if c.Enabled == nil { + c.Enabled = Bool(TimeDurationPresent(c.Min)) + } + + if c.Min == nil { + c.Min = TimeDuration(0 * time.Second) + } + + if c.Max == nil { + c.Max = TimeDuration(4 * *c.Min) + } +} + +// GoString defines the printable version of this struct. +func (c *WaitConfig) GoString() string { + if c == nil { + return "(*WaitConfig)(nil)" + } + + return fmt.Sprintf("&WaitConfig{"+ + "Enabled:%s, "+ + "Min:%s, "+ + "Max:%s"+ + "}", + BoolGoString(c.Enabled), + TimeDurationGoString(c.Min), + TimeDurationGoString(c.Max), + ) +} + +// ParseWaitConfig parses a string of the format `minimum(:maximum)` into a +// WaitConfig. +func ParseWaitConfig(s string) (*WaitConfig, error) { + s = strings.TrimSpace(s) + if len(s) < 1 { + return nil, ErrWaitStringEmpty + } + + parts := strings.Split(s, ":") + + var min, max time.Duration + var err error + + switch len(parts) { + case 1: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max = 4 * min + case 2: + min, err = time.ParseDuration(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + + max, err = time.ParseDuration(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + default: + return nil, ErrWaitInvalidFormat + } + + if min < 0 || max < 0 { + return nil, ErrWaitNegative + } + + if max < min { + return nil, ErrWaitMinLTMax + } + + var c WaitConfig + c.Min = TimeDuration(min) + c.Max = TimeDuration(max) + + return &c, nil +} + +// WaitVar implements the Flag.Value interface and allows the user to specify +// a watch interval using Go's flag parsing library. +type WaitVar WaitConfig + +// Set sets the value in the format min[:max] for a wait timer. +func (w *WaitVar) Set(value string) error { + wait, err := ParseWaitConfig(value) + if err != nil { + return err + } + + w.Min = wait.Min + w.Max = wait.Max + + return nil +} + +// String returns the string format for this wait variable +func (w *WaitVar) String() string { + return fmt.Sprintf("%s:%s", w.Min, w.Max) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go new file mode 100644 index 000000000000..398fbc50e3a1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go @@ -0,0 +1,92 @@ +package dependency + +import ( + "log" + "net/url" + "sort" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogDatacentersQuery)(nil) + + // CatalogDatacentersQuerySleepTime is the amount of time to sleep between + // queries, since the endpoint does not support blocking queries. + CatalogDatacentersQuerySleepTime = 15 * time.Second +) + +// CatalogDatacentersQuery is the dependency to query all datacenters +type CatalogDatacentersQuery struct { + stopCh chan struct{} +} + +// NewCatalogDatacentersQuery creates a new datacenter dependency. +func NewCatalogDatacentersQuery() (*CatalogDatacentersQuery, error) { + return &CatalogDatacentersQuery{ + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of strings representing the datacenters +func (d *CatalogDatacentersQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + opts = opts.Merge(&QueryOptions{}) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/datacenters", + RawQuery: opts.String(), + }) + + // This is pretty ghetto, but the datacenters endpoint does not support + // blocking queries, so we are going to "fake it until we make it". When we + // first query, the LastIndex will be "0", meaning we should immediately + // return data, but future calls will include a LastIndex. If we have a + // LastIndex in the query metadata, sleep for 15 seconds before asking Consul + // again. + // + // This is probably okay given the frequency in which datacenters actually + // change, but is technically not edge-triggering. + if opts.WaitIndex != 0 { + log.Printf("[TRACE] %s: long polling for %s", d, CatalogDatacentersQuerySleepTime) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(CatalogDatacentersQuerySleepTime): + } + } + + result, err := clients.Consul().Catalog().Datacenters() + if err != nil { + return nil, nil, errors.Wrapf(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + sort.Strings(result) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *CatalogDatacentersQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogDatacentersQuery) String() string { + return "catalog.datacenters" +} + +// Stop terminates this dependency's fetch. +func (d *CatalogDatacentersQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogDatacentersQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go index 74e358aa962d..d2e10b162cfe 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go @@ -2,29 +2,44 @@ package dependency import ( "encoding/gob" - "errors" "fmt" "log" + "net/url" "regexp" "sort" - "sync" - "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodeQuery)(nil) + + // CatalogNodeQueryRe is the regular expression to use. + CatalogNodeQueryRe = regexp.MustCompile(`\A` + nameRe + dcRe + `\z`) ) func init() { - gob.Register([]*NodeDetail{}) - gob.Register([]*NodeService{}) + gob.Register([]*CatalogNode{}) + gob.Register([]*CatalogNodeService{}) +} + +// CatalogNodeQuery represents a single node from the Consul catalog. +type CatalogNodeQuery struct { + stopCh chan struct{} + + dc string + name string } -// NodeDetail is a wrapper around the node and its services. -type NodeDetail struct { +// CatalogNode is a wrapper around the node and its services. +type CatalogNode struct { Node *Node - Services NodeServiceList + Services []*CatalogNodeService } -// NodeService is a service on a single node. -type NodeService struct { +// CatalogNodeService is a service on a single node. +type CatalogNodeService struct { ID string Service string Tags ServiceTags @@ -33,82 +48,71 @@ type NodeService struct { EnableTagOverride bool } -// CatalogNode represents a single node from the Consul catalog. -type CatalogNode struct { - sync.Mutex +// NewCatalogNodeQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodeQuery(s string) (*CatalogNodeQuery, error) { + if s != "" && !CatalogNodeQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.node: invalid format: %q", s) + } - rawKey string - dataCenter string - stopped bool - stopCh chan struct{} + m := regexpMatch(CatalogNodeQueryRe, s) + return &CatalogNodeQuery{ + dc: m["dc"], + name: m["name"], + stopCh: make(chan struct{}, 1), + }, nil } // Fetch queries the Consul API defined by the given client and returns a -// of NodeDetail object. -func (d *CatalogNode) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() +// of CatalogNode object. +func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: return nil, nil, ErrStopped + default: } - d.Unlock() - if opts == nil { - opts = &QueryOptions{} - } + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) - consulOpts := opts.consulQueryOptions() - if d.dataCenter != "" { - consulOpts.Datacenter = d.dataCenter - } + // Grab the name + name := d.name - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("catalog node: error getting client: %s", err) - } - - nodeName := d.rawKey - if nodeName == "" { - log.Printf("[DEBUG] (%s) getting local agent name", d.Display()) - nodeName, err = consul.Agent().NodeName() + if name == "" { + log.Printf("[TRACE] %s: getting local agent name", d) + var err error + name, err = clients.Consul().Agent().NodeName() if err != nil { - return nil, nil, fmt.Errorf("catalog node: error getting local agent: %s", err) + return nil, nil, errors.Wrapf(err, d.String()) } } - var n *api.CatalogNode - var qm *api.QueryMeta - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) - n, qm, err = consul.Catalog().Node(nodeName, consulOpts) - close(dataCh) - }() - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-dataCh: - } - + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/node/" + name, + RawQuery: opts.String(), + }) + node, qm, err := clients.Consul().Catalog().Node(name, opts.ToConsulOpts()) if err != nil { - return nil, nil, fmt.Errorf("catalog node: error fetching: %s", err) + return nil, nil, errors.Wrap(err, d.String()) } + log.Printf("[TRACE] %s: returned response", d) + rm := &ResponseMetadata{ LastIndex: qm.LastIndex, LastContact: qm.LastContact, } - if n == nil { - log.Printf("[WARN] (%s) could not find node by that name", d.Display()) - var node *NodeDetail - return node, rm, nil + if node == nil { + log.Printf("[WARN] %s: no node exists with the name %q", d, name) + var node CatalogNode + return &node, rm, nil } - services := make(NodeServiceList, 0, len(n.Services)) - for _, v := range n.Services { - services = append(services, &NodeService{ + services := make([]*CatalogNodeService, 0, len(node.Services)) + for _, v := range node.Services { + services = append(services, &CatalogNodeService{ ID: v.ID, Service: v.Service, Tags: ServiceTags(deepCopyAndSortTags(v.Tags)), @@ -117,107 +121,54 @@ func (d *CatalogNode) Fetch(clients *ClientSet, opts *QueryOptions) (interface{} EnableTagOverride: v.EnableTagOverride, }) } - sort.Stable(services) + sort.Stable(ByService(services)) - node := &NodeDetail{ + detail := &CatalogNode{ Node: &Node{ - Node: n.Node.Node, - Address: n.Node.Address, - TaggedAddresses: n.Node.TaggedAddresses, + Node: node.Node.Node, + Address: node.Node.Address, + TaggedAddresses: node.Node.TaggedAddresses, }, Services: services, } - return node, rm, nil + return detail, rm, nil } // CanShare returns a boolean if this dependency is shareable. -func (d *CatalogNode) CanShare() bool { +func (d *CatalogNodeQuery) CanShare() bool { return false } -// HashCode returns a unique identifier. -func (d *CatalogNode) HashCode() string { - if d.dataCenter != "" { - return fmt.Sprintf("NodeDetail|%s@%s", d.rawKey, d.dataCenter) +// String returns the human-friendly version of this dependency. +func (d *CatalogNodeQuery) String() string { + name := d.name + if d.dc != "" { + name = name + "@" + d.dc } - return fmt.Sprintf("NodeDetail|%s", d.rawKey) -} -// Display prints the human-friendly output. -func (d *CatalogNode) Display() string { - if d.dataCenter != "" { - return fmt.Sprintf("node(%s@%s)", d.rawKey, d.dataCenter) + if name == "" { + return "catalog.node" } - return fmt.Sprintf(`"node(%s)"`, d.rawKey) + return fmt.Sprintf("catalog.node(%s)", name) } // Stop halts the dependency's fetch function. -func (d *CatalogNode) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +func (d *CatalogNodeQuery) Stop() { + close(d.stopCh) } -// ParseCatalogNode parses a name name and optional datacenter value. -// If the name is empty or not provided then the current agent is used. -func ParseCatalogNode(s ...string) (*CatalogNode, error) { - switch len(s) { - case 0: - cn := &CatalogNode{stopCh: make(chan struct{})} - return cn, nil - case 1: - cn := &CatalogNode{ - rawKey: s[0], - stopCh: make(chan struct{}), - } - return cn, nil - case 2: - dc := s[1] - - re := regexp.MustCompile(`\A` + - `(@(?P[[:word:]\.\-]+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(dc, -1) - - if len(match) == 0 { - return nil, errors.New("invalid node dependency format") - } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - nd := &CatalogNode{ - rawKey: s[0], - dataCenter: m["datacenter"], - stopCh: make(chan struct{}), - } - - return nd, nil - default: - return nil, fmt.Errorf("expected 0, 1, or 2 arguments, got %d", len(s)) - } +// Type returns the type of this dependency. +func (d *CatalogNodeQuery) Type() Type { + return TypeConsul } -// Sorting - -// NodeServiceList is a sortable list of node service names. -type NodeServiceList []*NodeService +// ByService is a sorter of node services by their service name and then ID. +type ByService []*CatalogNodeService -func (s NodeServiceList) Len() int { return len(s) } -func (s NodeServiceList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s NodeServiceList) Less(i, j int) bool { +func (s ByService) Len() int { return len(s) } +func (s ByService) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByService) Less(i, j int) bool { if s[i].Service == s[j].Service { return s[i].ID <= s[j].ID } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go index ad1d435b3a2c..059c5d0dc702 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go @@ -2,14 +2,21 @@ package dependency import ( "encoding/gob" - "errors" "fmt" "log" + "net/url" "regexp" "sort" - "sync" - "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogNodesQuery)(nil) + + // CatalogNodesQueryRe is the regular expression to use. + CatalogNodesQueryRe = regexp.MustCompile(`\A` + dcRe + nearRe + `\z`) ) func init() { @@ -23,60 +30,53 @@ type Node struct { TaggedAddresses map[string]string } -// CatalogNodes is the representation of all registered nodes in Consul. -type CatalogNodes struct { - sync.Mutex +// CatalogNodesQuery is the representation of all registered nodes in Consul. +type CatalogNodesQuery struct { + stopCh chan struct{} - rawKey string - DataCenter string - stopped bool - stopCh chan struct{} + dc string + near string } -// Fetch queries the Consul API defined by the given client and returns a slice -// of Node objects -func (d *CatalogNodes) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - consulOpts := opts.consulQueryOptions() - if d.DataCenter != "" { - consulOpts.Datacenter = d.DataCenter - } - - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("catalog nodes: error getting client: %s", err) +// NewCatalogNodesQuery parses the given string into a dependency. If the name is +// empty then the name of the local agent is used. +func NewCatalogNodesQuery(s string) (*CatalogNodesQuery, error) { + if !CatalogNodesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.nodes: invalid format: %q", s) } - var n []*api.Node - var qm *api.QueryMeta - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), consulOpts) - n, qm, err = consul.Catalog().Nodes(consulOpts) - close(dataCh) - }() + m := regexpMatch(CatalogNodesQueryRe, s) + return &CatalogNodesQuery{ + dc: m["dc"], + near: m["near"], + stopCh: make(chan struct{}, 1), + }, nil +} +// Fetch queries the Consul API defined by the given client and returns a slice +// of Node objects +func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { select { case <-d.stopCh: return nil, nil, ErrStopped - case <-dataCh: + default: } + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/nodes", + RawQuery: opts.String(), + }) + n, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts()) if err != nil { - return nil, nil, fmt.Errorf("catalog nodes: error fetching: %s", err) + return nil, nil, errors.Wrap(err, d.String()) } - log.Printf("[DEBUG] (%s) Consul returned %d nodes", d.Display(), len(n)) + log.Printf("[TRACE] %s: returned %d results", d, len(n)) nodes := make([]*Node, 0, len(n)) for _, node := range n { @@ -86,7 +86,7 @@ func (d *CatalogNodes) Fetch(clients *ClientSet, opts *QueryOptions) (interface{ TaggedAddresses: node.TaggedAddresses, }) } - sort.Stable(NodeList(nodes)) + sort.Stable(ByNode(nodes)) rm := &ResponseMetadata{ LastIndex: qm.LastIndex, @@ -97,84 +97,42 @@ func (d *CatalogNodes) Fetch(clients *ClientSet, opts *QueryOptions) (interface{ } // CanShare returns a boolean if this dependency is shareable. -func (d *CatalogNodes) CanShare() bool { +func (d *CatalogNodesQuery) CanShare() bool { return true } -// HashCode returns a unique identifier. -func (d *CatalogNodes) HashCode() string { - return fmt.Sprintf("CatalogNodes|%s", d.rawKey) -} - -// Display prints the human-friendly output. -func (d *CatalogNodes) Display() string { - if d.rawKey == "" { - return fmt.Sprintf(`"nodes"`) +// String returns the human-friendly version of this dependency. +func (d *CatalogNodesQuery) String() string { + name := "" + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near } - return fmt.Sprintf(`"nodes(%s)"`, d.rawKey) + if name == "" { + return "catalog.nodes" + } + return fmt.Sprintf("catalog.nodes(%s)", name) } // Stop halts the dependency's fetch function. -func (d *CatalogNodes) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +func (d *CatalogNodesQuery) Stop() { + close(d.stopCh) } -// ParseCatalogNodes parses a string of the format @dc. -func ParseCatalogNodes(s ...string) (*CatalogNodes, error) { - switch len(s) { - case 0: - cn := &CatalogNodes{ - rawKey: "", - stopCh: make(chan struct{}), - } - return cn, nil - case 1: - dc := s[0] - - re := regexp.MustCompile(`\A` + - `(@(?P[[:word:]\.\-]+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(dc, -1) - - if len(match) == 0 { - return nil, errors.New("invalid node dependency format") - } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - cn := &CatalogNodes{ - rawKey: dc, - DataCenter: m["datacenter"], - stopCh: make(chan struct{}), - } - - return cn, nil - default: - return nil, fmt.Errorf("expected 0 or 1 arguments, got %d", len(s)) - } +// Type returns the type of this dependency. +func (d *CatalogNodesQuery) Type() Type { + return TypeConsul } -// NodeList is a sortable list of node objects by name and then IP address. -type NodeList []*Node +// ByNode is a sortable list of nodes by name and then IP address. +type ByNode []*Node -func (s NodeList) Len() int { return len(s) } -func (s NodeList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s NodeList) Less(i, j int) bool { +func (s ByNode) Len() int { return len(s) } +func (s ByNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNode) Less(i, j int) bool { if s[i].Node == s[j].Node { return s[i].Address <= s[j].Address } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go new file mode 100644 index 000000000000..b284fb6fb31b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go @@ -0,0 +1,146 @@ +package dependency + +import ( + "encoding/gob" + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServiceQuery)(nil) + + // CatalogServiceQueryRe is the regular expression to use. + CatalogServiceQueryRe = regexp.MustCompile(`\A` + tagRe + nameRe + dcRe + nearRe + `\z`) +) + +func init() { + gob.Register([]*CatalogSnippet{}) +} + +// CatalogService is a catalog entry in Consul. +type CatalogService struct { + Node string + Address string + TaggedAddresses map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags ServiceTags + ServicePort int +} + +// CatalogServiceQuery is the representation of a requested catalog services +// dependency from inside a template. +type CatalogServiceQuery struct { + stopCh chan struct{} + + dc string + name string + near string + tag string +} + +// NewCatalogServiceQuery parses a string into a CatalogServiceQuery. +func NewCatalogServiceQuery(s string) (*CatalogServiceQuery, error) { + if !CatalogServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.service: invalid format: %q", s) + } + + m := regexpMatch(CatalogServiceQueryRe, s) + return &CatalogServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil +} + +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) + + u := &url.URL{ + Path: "/v1/catalog/service/" + d.name, + RawQuery: opts.String(), + } + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() + } + log.Printf("[TRACE] %s: GET %s", d, u) + + entries, qm, err := clients.Consul().Catalog().Service(d.name, d.tag, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + + var list []*CatalogService + for _, s := range entries { + list = append(list, &CatalogService{ + Node: s.Node, + Address: s.Address, + TaggedAddresses: s.TaggedAddresses, + ServiceID: s.ServiceID, + ServiceName: s.ServiceName, + ServiceAddress: s.ServiceAddress, + ServiceTags: ServiceTags(deepCopyAndSortTags(s.ServiceTags)), + ServicePort: s.ServicePort, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return list, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *CatalogServiceQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *CatalogServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name + } + if d.dc != "" { + name = name + "@" + d.dc + } + if d.near != "" { + name = name + "~" + d.near + } + return fmt.Sprintf("catalog.service(%s)", name) +} + +// Stop halts the dependency's fetch function. +func (d *CatalogServiceQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *CatalogServiceQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go index 33dc1527e3d1..06ce03a77be4 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go @@ -2,94 +2,88 @@ package dependency import ( "encoding/gob" - "errors" "fmt" "log" + "net/url" "regexp" "sort" - "sync" - "github.com/hashicorp/consul/api" + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*CatalogServicesQuery)(nil) + + // CatalogServicesQueryRe is the regular expression to use for CatalogNodesQuery. + CatalogServicesQueryRe = regexp.MustCompile(`\A` + dcRe + `\z`) ) func init() { - gob.Register([]*CatalogService{}) + gob.Register([]*CatalogSnippet{}) } -// CatalogService is a catalog entry in Consul. -type CatalogService struct { +// CatalogSnippet is a catalog entry in Consul. +type CatalogSnippet struct { Name string Tags ServiceTags } -// CatalogServices is the representation of a requested catalog service +// CatalogServicesQuery is the representation of a requested catalog service // dependency from inside a template. -type CatalogServices struct { - sync.Mutex - - rawKey string - Name string - Tags []string - DataCenter string - stopped bool - stopCh chan struct{} -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of CatalogService objects. -func (d *CatalogServices) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } +type CatalogServicesQuery struct { + stopCh chan struct{} - consulOpts := opts.consulQueryOptions() - if d.DataCenter != "" { - consulOpts.Datacenter = d.DataCenter - } + dc string +} - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("catalog services: error getting client: %s", err) +// NewCatalogServicesQuery parses a string of the format @dc. +func NewCatalogServicesQuery(s string) (*CatalogServicesQuery, error) { + if !CatalogServicesQueryRe.MatchString(s) { + return nil, fmt.Errorf("catalog.services: invalid format: %q", s) } - var entries map[string][]string - var qm *api.QueryMeta - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), consulOpts) - entries, qm, err = consul.Catalog().Services(consulOpts) - close(dataCh) - }() + m := regexpMatch(CatalogServicesQueryRe, s) + return &CatalogServicesQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + }, nil +} +// Fetch queries the Consul API defined by the given client and returns a slice +// of CatalogService objects. +func (d *CatalogServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { select { case <-d.stopCh: return nil, nil, ErrStopped - case <-dataCh: + default: } + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/catalog/services", + RawQuery: opts.String(), + }) + + entries, qm, err := clients.Consul().Catalog().Services(opts.ToConsulOpts()) if err != nil { - return nil, nil, fmt.Errorf("catalog services: error fetching: %s", err) + return nil, nil, errors.Wrap(err, d.String()) } - log.Printf("[DEBUG] (%s) Consul returned %d catalog services", d.Display(), len(entries)) + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - var catalogServices []*CatalogService + var catalogServices []*CatalogSnippet for name, tags := range entries { - tags = deepCopyAndSortTags(tags) - catalogServices = append(catalogServices, &CatalogService{ + catalogServices = append(catalogServices, &CatalogSnippet{ Name: name, - Tags: ServiceTags(tags), + Tags: ServiceTags(deepCopyAndSortTags(tags)), }) } - sort.Stable(CatalogServicesList(catalogServices)) + sort.Stable(ByName(catalogServices)) rm := &ResponseMetadata{ LastIndex: qm.LastIndex, @@ -100,86 +94,34 @@ func (d *CatalogServices) Fetch(clients *ClientSet, opts *QueryOptions) (interfa } // CanShare returns a boolean if this dependency is shareable. -func (d *CatalogServices) CanShare() bool { +func (d *CatalogServicesQuery) CanShare() bool { return true } -// HashCode returns a unique identifier. -func (d *CatalogServices) HashCode() string { - return fmt.Sprintf("CatalogServices|%s", d.rawKey) -} - -// Display prints the human-friendly output. -func (d *CatalogServices) Display() string { - if d.rawKey == "" { - return fmt.Sprintf(`"services"`) +// String returns the human-friendly version of this dependency. +func (d *CatalogServicesQuery) String() string { + if d.dc != "" { + return fmt.Sprintf("catalog.services(@%s)", d.dc) } - - return fmt.Sprintf(`"services(%s)"`, d.rawKey) + return "catalog.services" } // Stop halts the dependency's fetch function. -func (d *CatalogServices) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +func (d *CatalogServicesQuery) Stop() { + close(d.stopCh) } -// ParseCatalogServices parses a string of the format @dc. -func ParseCatalogServices(s ...string) (*CatalogServices, error) { - switch len(s) { - case 0: - cs := &CatalogServices{ - rawKey: "", - stopCh: make(chan struct{}), - } - return cs, nil - case 1: - dc := s[0] - - re := regexp.MustCompile(`\A` + - `(@(?P[[:word:]\.\-]+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(dc, -1) - - if len(match) == 0 { - return nil, errors.New("invalid catalog service dependency format") - } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - nd := &CatalogServices{ - rawKey: dc, - DataCenter: m["datacenter"], - stopCh: make(chan struct{}), - } - - return nd, nil - default: - return nil, fmt.Errorf("expected 0 or 1 arguments, got %d", len(s)) - } +// Type returns the type of this dependency. +func (d *CatalogServicesQuery) Type() Type { + return TypeConsul } -/// --- Sorting - -// CatalogServicesList is a sortable slice of CatalogService structs. -type CatalogServicesList []*CatalogService +// ByName is a sortable slice of CatalogService structs. +type ByName []*CatalogSnippet -func (s CatalogServicesList) Len() int { return len(s) } -func (s CatalogServicesList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s CatalogServicesList) Less(i, j int) bool { +func (s ByName) Len() int { return len(s) } +func (s ByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByName) Less(i, j int) bool { if s[i].Name <= s[j].Name { return true } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go index 0dbe14f7cd09..7487baf35820 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go @@ -71,26 +71,17 @@ func NewClientSet() *ClientSet { // CreateConsulClient creates a new Consul API client from the given input. func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { - log.Printf("[INFO] (clients) creating consul/api client") - - // Generate the default config consulConfig := consulapi.DefaultConfig() - // Set the address if i.Address != "" { - log.Printf("[DEBUG] (clients) setting consul address to %q", i.Address) consulConfig.Address = i.Address } - // Configure the token if i.Token != "" { - log.Printf("[DEBUG] (clients) setting consul token") consulConfig.Token = i.Token } - // Add basic auth if i.AuthEnabled { - log.Printf("[DEBUG] (clients) setting basic auth") consulConfig.HttpAuth = &consulapi.HttpBasicAuth{ Username: i.AuthUsername, Password: i.AuthPassword, @@ -102,7 +93,6 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { // Configure SSL if i.SSLEnabled { - log.Printf("[DEBUG] (clients) enabling consul SSL") consulConfig.Scheme = "https" var tlsConfig tls.Config @@ -140,7 +130,6 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { if i.ServerName != "" { tlsConfig.ServerName = i.ServerName tlsConfig.InsecureSkipVerify = false - log.Printf("[DEBUG] (clients) using explicit consul TLS server host name: %s", tlsConfig.ServerName) } if !i.SSLVerify { log.Printf("[WARN] (clients) disabling consul SSL verification") @@ -161,23 +150,20 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { } // Save the data on ourselves + c.Lock() c.consul = &consulClient{ client: client, httpClient: consulConfig.HttpClient, } + c.Unlock() return nil } func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { - log.Printf("[INFO] (clients) creating vault/api client") - - // Generate the default config vaultConfig := vaultapi.DefaultConfig() - // Set the address if i.Address != "" { - log.Printf("[DEBUG] (clients) setting vault address to %q", i.Address) vaultConfig.Address = i.Address } @@ -186,7 +172,6 @@ func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { // Configure SSL if i.SSLEnabled { - log.Printf("[DEBUG] (clients) enabling vault SSL") var tlsConfig tls.Config // Custom certificate or certificate and key @@ -222,7 +207,6 @@ func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { if i.ServerName != "" { tlsConfig.ServerName = i.ServerName tlsConfig.InsecureSkipVerify = false - log.Printf("[DEBUG] (clients) using explicit vault TLS server host name: %s", tlsConfig.ServerName) } if !i.SSLVerify { log.Printf("[WARN] (clients) disabling vault SSL verification") @@ -244,13 +228,11 @@ func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { // Set the token if given if i.Token != "" { - log.Printf("[DEBUG] (clients) setting vault token") client.SetToken(i.Token) } // Check if we are unwrapping if i.UnwrapToken { - log.Printf("[INFO] (clients) unwrapping vault token") secret, err := client.Logical().Unwrap(i.Token) if err != nil { return fmt.Errorf("client set: vault unwrap: %s", err) @@ -272,40 +254,28 @@ func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { } // Save the data on ourselves + c.Lock() c.vault = &vaultClient{ client: client, httpClient: vaultConfig.HttpClient, } + c.Unlock() return nil } -// Consul returns the Consul client for this clientset, or an error if no -// Consul client has been set. -func (c *ClientSet) Consul() (*consulapi.Client, error) { +// Consul returns the Consul client for this set. +func (c *ClientSet) Consul() *consulapi.Client { c.RLock() defer c.RUnlock() - - if c.consul == nil { - return nil, fmt.Errorf("clientset: missing consul client") - } - cp := new(consulapi.Client) - *cp = *c.consul.client - return cp, nil + return c.consul.client } -// Vault returns the Vault client for this clientset, or an error if no -// Vault client has been set. -func (c *ClientSet) Vault() (*vaultapi.Client, error) { +// Vault returns the Consul client for this set. +func (c *ClientSet) Vault() *vaultapi.Client { c.RLock() defer c.RUnlock() - - if c.vault == nil { - return nil, fmt.Errorf("clientset: missing vault client") - } - cp := new(vaultapi.Client) - *cp = *c.vault.client - return cp, nil + return c.vault.client } // Stop closes all idle connections for any attached clients. diff --git a/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go deleted file mode 100644 index 25cc28e90261..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/datacenters.go +++ /dev/null @@ -1,118 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "sort" - "sync" - "time" -) - -var sleepTime = 15 * time.Second - -// Datacenters is the dependency to query all datacenters -type Datacenters struct { - sync.Mutex - - rawKey string - - stopped bool - stopCh chan struct{} -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of strings representing the datacenters -func (d *Datacenters) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - log.Printf("[DEBUG] (%s) querying Consul with %+v", d.Display(), opts) - - // This is pretty ghetto, but the datacenters endpoint does not support - // blocking queries, so we are going to "fake it until we make it". When we - // first query, the LastIndex will be "0", meaning we should immediately - // return data, but future calls will include a LastIndex. If we have a - // LastIndex in the query metadata, sleep for 15 seconds before asking Consul - // again. - // - // This is probably okay given the frequency in which datacenters actually - // change, but is technically not edge-triggering. - if opts.WaitIndex != 0 { - log.Printf("[DEBUG] (%s) pretending to long-poll", d.Display()) - select { - case <-d.stopCh: - log.Printf("[DEBUG] (%s) received interrupt", d.Display()) - return nil, nil, ErrStopped - case <-time.After(sleepTime): - } - } - - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("datacenters: error getting client: %s", err) - } - - catalog := consul.Catalog() - result, err := catalog.Datacenters() - if err != nil { - return nil, nil, fmt.Errorf("datacenters: error fetching: %s", err) - } - - log.Printf("[DEBUG] (%s) Consul returned %d datacenters", d.Display(), len(result)) - sort.Strings(result) - - return respWithMetadata(result) -} - -// CanShare returns if this dependency is shareable. -func (d *Datacenters) CanShare() bool { - return true -} - -// HashCode returns the hash code for this dependency. -func (d *Datacenters) HashCode() string { - return fmt.Sprintf("Datacenters|%s", d.rawKey) -} - -// Display returns a string that should be displayed to the user in output (for -// example). -func (d *Datacenters) Display() string { - if d.rawKey == "" { - return fmt.Sprintf(`"datacenters"`) - } - - return fmt.Sprintf(`"datacenters(%s)"`, d.rawKey) -} - -// Stop terminates this dependency's execution early. -func (d *Datacenters) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } -} - -// ParseDatacenters creates a new datacenter dependency. -func ParseDatacenters(s ...string) (*Datacenters, error) { - switch len(s) { - case 0: - dcs := &Datacenters{ - rawKey: "", - stopCh: make(chan struct{}, 0), - } - return dcs, nil - default: - return nil, fmt.Errorf("expected 0 arguments, got %d", len(s)) - } -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go index f750257f17b1..1285d818c76d 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go @@ -1,66 +1,52 @@ package dependency import ( - "errors" - "fmt" + "log" + "net/url" + "regexp" "sort" + "strconv" "time" consulapi "github.com/hashicorp/consul/api" ) -// ErrStopped is a special error that is returned when a dependency is -// prematurely stopped, usually due to a configuration reload or a process -// interrupt. -var ErrStopped = errors.New("dependency stopped") +const ( + dcRe = `(@(?P[[:word:]\.\-\_]+))?` + keyRe = `/?(?P[^@]+)` + filterRe = `(\|(?P[[:word:]\,]+))?` + nameRe = `(?P[[:word:]\-\_]+)` + nearRe = `(~(?P[[:word:]\.\-\_]+))?` + prefixRe = `/?(?P[^@]+)` + tagRe = `((?P[[:word:]\.\-\_]+)\.)?` +) + +type Type int + +const ( + TypeConsul Type = iota + TypeVault + TypeLocal +) // Dependency is an interface for a dependency that Consul Template is capable // of watching. type Dependency interface { Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error) CanShare() bool - HashCode() string - Display() string + String() string Stop() -} - -// FetchError is a special kind of error returned by the Fetch method that -// contains additional metadata which informs the caller how to respond. This -// error implements the standard Error interface, so it can be passed as a -// regular error down the stack. -type FetchError struct { - originalError error - shouldExit bool -} - -func (e *FetchError) Error() string { - return e.originalError.Error() -} - -func (e *FetchError) OriginalError() error { - return e.originalError -} - -func (e *FetchError) ShouldExit() bool { - return e.shouldExit -} - -func ErrWithExit(err error) *FetchError { - return &FetchError{ - originalError: err, - shouldExit: true, - } -} - -func ErrWithExitf(s string, i ...interface{}) *FetchError { - return ErrWithExit(fmt.Errorf(s, i...)) + Type() Type } // ServiceTags is a slice of tags assigned to a Service type ServiceTags []string // Contains returns true if the tags exists in the ServiceTags slice. +// This is deprecated and should not be used. func (t ServiceTags) Contains(s string) bool { + log.Printf("[WARN] .Tags.Contains is deprecated. Use the built-in\n" + + "functions 'in' or 'contains' with a pipe instead.") for _, v := range t { if v == s { return true @@ -73,18 +59,97 @@ func (t ServiceTags) Contains(s string) bool { // client-agnostic, and the dependency determines which, if any, of the options // to use. type QueryOptions struct { - AllowStale bool - WaitIndex uint64 - WaitTime time.Duration + AllowStale bool + Datacenter string + Near string + RequireConsistent bool + WaitIndex uint64 + WaitTime time.Duration } -// Converts the query options to Consul API ready query options. -func (r *QueryOptions) consulQueryOptions() *consulapi.QueryOptions { +func (q *QueryOptions) Merge(o *QueryOptions) *QueryOptions { + var r QueryOptions + + if q == nil { + if o == nil { + return &QueryOptions{} + } + r = *o + return &r + } + + r = *q + + if o == nil { + return &r + } + + if o.AllowStale != false { + r.AllowStale = o.AllowStale + } + + if o.Datacenter != "" { + r.Datacenter = o.Datacenter + } + + if o.Near != "" { + r.Near = o.Near + } + + if o.RequireConsistent != false { + r.RequireConsistent = o.RequireConsistent + } + + if o.WaitIndex != 0 { + r.WaitIndex = o.WaitIndex + } + + if o.WaitTime != 0 { + r.WaitTime = o.WaitTime + } + + return &r +} + +func (q *QueryOptions) ToConsulOpts() *consulapi.QueryOptions { return &consulapi.QueryOptions{ - AllowStale: r.AllowStale, - WaitIndex: r.WaitIndex, - WaitTime: r.WaitTime, + AllowStale: q.AllowStale, + Datacenter: q.Datacenter, + Near: q.Near, + RequireConsistent: q.RequireConsistent, + WaitIndex: q.WaitIndex, + WaitTime: q.WaitTime, + } +} + +func (q *QueryOptions) String() string { + u := &url.Values{} + + if q.AllowStale { + u.Add("stale", strconv.FormatBool(q.AllowStale)) + } + + if q.Datacenter != "" { + u.Add("dc", q.Datacenter) + } + + if q.Near != "" { + u.Add("near", q.Near) } + + if q.RequireConsistent { + u.Add("consistent", strconv.FormatBool(q.RequireConsistent)) + } + + if q.WaitIndex != 0 { + u.Add("index", strconv.FormatUint(q.WaitIndex, 10)) + } + + if q.WaitTime != 0 { + u.Add("wait", q.WaitTime.String()) + } + + return u.Encode() } // ResponseMetadata is a struct that contains metadata about the response. This @@ -92,6 +157,7 @@ func (r *QueryOptions) consulQueryOptions() *consulapi.QueryOptions { type ResponseMetadata struct { LastIndex uint64 LastContact time.Duration + Block bool } // deepCopyAndSortTags deep copies the tags in the given string slice and then @@ -113,3 +179,23 @@ func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) { LastIndex: uint64(time.Now().Unix()), }, nil } + +// regexpMatch matches the given regexp and extracts the match groups into a +// named map. +func regexpMatch(re *regexp.Regexp, q string) map[string]string { + names := re.SubexpNames() + match := re.FindAllStringSubmatch(q, -1) + + if len(match) == 0 { + return map[string]string{} + } + + m := map[string]string{} + for i, n := range match[0] { + if names[i] != "" { + m[names[i]] = n + } + } + + return m +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/env.go b/vendor/github.com/hashicorp/consul-template/dependency/env.go new file mode 100644 index 000000000000..ec84980cfe94 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/env.go @@ -0,0 +1,84 @@ +package dependency + +import ( + "fmt" + "log" + "os" + "strings" + "time" +) + +var ( + // Ensure implements + _ Dependency = (*EnvQuery)(nil) + + // EnvQuerySleepTime is the amount of time to sleep between queries. Since + // it's not supporting to change a running processes' environment, this can + // be a fairly large value. + EnvQuerySleepTime = 5 * time.Minute +) + +// EnvQuery represents a local file dependency. +type EnvQuery struct { + stopCh chan struct{} + + key string + stat os.FileInfo +} + +// NewEnvQuery creates a file dependency from the given key. +func NewEnvQuery(s string) (*EnvQuery, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, fmt.Errorf("env: invalid format: %q", s) + } + + return &EnvQuery{ + key: s, + stopCh: make(chan struct{}, 1), + }, nil +} + +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *EnvQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + opts = opts.Merge(&QueryOptions{}) + + log.Printf("[TRACE] %s: ENV %s", d, d.key) + + if opts.WaitIndex != 0 { + log.Printf("[TRACE] %s: long polling for %s", d, EnvQuerySleepTime) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(EnvQuerySleepTime): + } + } + + result := os.Getenv(d.key) + + log.Printf("[TRACE] %s: returned result", d) + + return respWithMetadata(result) +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *EnvQuery) CanShare() bool { + return false +} + +// Stop halts the dependency's fetch function. +func (d *EnvQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *EnvQuery) String() string { + return fmt.Sprintf("env(%s)", d.key) +} + +// Type returns the type of this dependency. +func (d *EnvQuery) Type() Type { + return TypeLocal +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/errors.go b/vendor/github.com/hashicorp/consul-template/dependency/errors.go new file mode 100644 index 000000000000..2857ac9b685a --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/errors.go @@ -0,0 +1,11 @@ +package dependency + +import "errors" + +// ErrStopped is a special error that is returned when a dependency is +// prematurely stopped, usually due to a configuration reload or a process +// interrupt. +var ErrStopped = errors.New("dependency stopped") + +// ErrContinue is a special error which says to continue (retry) on error. +var ErrContinue = errors.New("dependency continue") diff --git a/vendor/github.com/hashicorp/consul-template/dependency/file.go b/vendor/github.com/hashicorp/consul-template/dependency/file.go index c9fbb1160c35..3f9fb52e8d19 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/file.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/file.go @@ -1,135 +1,129 @@ package dependency import ( - "errors" "fmt" "io/ioutil" "log" "os" - "sync" + "strings" "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*FileQuery)(nil) + + // FileQuerySleepTime is the amount of time to sleep between queries, since + // the fsnotify library is not compatible with solaris and other OSes yet. + FileQuerySleepTime = 2 * time.Second ) -// File represents a local file dependency. -type File struct { - sync.Mutex - mutex sync.RWMutex - rawKey string - lastStat os.FileInfo - stopped bool - stopCh chan struct{} +// FileQuery represents a local file dependency. +type FileQuery struct { + stopCh chan struct{} + + path string + stat os.FileInfo } -// Fetch retrieves this dependency and returns the result or any errors that -// occur in the process. -func (d *File) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped +// NewFileQuery creates a file dependency from the given path. +func NewFileQuery(s string) (*FileQuery, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, fmt.Errorf("file: invalid format: %q", s) } - d.Unlock() - var err error - var newStat os.FileInfo - var data []byte + return &FileQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying file", d.Display()) - newStat, err = d.watch() - close(dataCh) - }() +// Fetch retrieves this dependency and returns the result or any errors that +// occur in the process. +func (d *FileQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + log.Printf("[TRACE] %s: READ %s", d, d.path) select { case <-d.stopCh: - return nil, nil, ErrStopped - case <-dataCh: - } + log.Printf("[TRACE] %s: stopped", d) + return "", nil, ErrStopped + case r := <-d.watch(d.stat): + if r.err != nil { + return "", nil, errors.Wrap(r.err, d.String()) + } - if err != nil { - return nil, nil, fmt.Errorf("file: error watching: %s", err) - } + log.Printf("[TRACE] %s: reported change", d) - d.mutex.Lock() - defer d.mutex.Unlock() - d.lastStat = newStat + data, err := ioutil.ReadFile(d.path) + if err != nil { + return "", nil, errors.Wrap(err, d.String()) + } - if data, err = ioutil.ReadFile(d.rawKey); err == nil { + d.stat = r.stat return respWithMetadata(string(data)) } - return nil, nil, fmt.Errorf("file: error reading: %s", err) } // CanShare returns a boolean if this dependency is shareable. -func (d *File) CanShare() bool { +func (d *FileQuery) CanShare() bool { return false } -// HashCode returns a unique identifier. -func (d *File) HashCode() string { - return fmt.Sprintf("StoreKeyPrefix|%s", d.rawKey) +// Stop halts the dependency's fetch function. +func (d *FileQuery) Stop() { + close(d.stopCh) } -// Display prints the human-friendly output. -func (d *File) Display() string { - return fmt.Sprintf(`"file(%s)"`, d.rawKey) +// String returns the human-friendly version of this dependency. +func (d *FileQuery) String() string { + return fmt.Sprintf("file(%s)", d.path) } -// Stop halts the dependency's fetch function. -func (d *File) Stop() { - d.Lock() - defer d.Unlock() +// Type returns the type of this dependency. +func (d *FileQuery) Type() Type { + return TypeLocal +} - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +type watchResult struct { + stat os.FileInfo + err error } // watch watchers the file for changes -func (d *File) watch() (os.FileInfo, error) { - for { - stat, err := os.Stat(d.rawKey) - if err != nil { - return nil, err - } - - changed := func(d *File, stat os.FileInfo) bool { - d.mutex.RLock() - defer d.mutex.RUnlock() - - if d.lastStat == nil { - return true - } - if d.lastStat.Size() != stat.Size() { - return true +func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { + ch := make(chan *watchResult, 1) + + go func(lastStat os.FileInfo) { + for { + stat, err := os.Stat(d.path) + if err != nil { + select { + case <-d.stopCh: + return + case ch <- &watchResult{err: err}: + return + } } - if d.lastStat.ModTime() != stat.ModTime() { - return true + changed := lastStat == nil || + lastStat.Size() != stat.Size() || + lastStat.ModTime() != stat.ModTime() + + if changed { + select { + case <-d.stopCh: + return + case ch <- &watchResult{stat: stat}: + return + } } - return false - }(d, stat) - - if changed { - return stat, nil + time.Sleep(FileQuerySleepTime) } - time.Sleep(3 * time.Second) - } -} - -// ParseFile creates a file dependency from the given path. -func ParseFile(s string) (*File, error) { - if len(s) == 0 { - return nil, errors.New("cannot specify empty file dependency") - } - - kd := &File{ - rawKey: s, - stopCh: make(chan struct{}), - } + }(lastStat) - return kd, nil + return ch } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go index ac6541f6120b..44b31f6c2606 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go @@ -2,27 +2,21 @@ package dependency import ( "encoding/gob" - "errors" "fmt" "log" + "net/url" "regexp" "sort" "strings" - "sync" "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" ) -func init() { - gob.Register([]*HealthService{}) -} - const ( HealthAny = "any" HealthPassing = "passing" HealthWarning = "warning" - HealthUnknown = "unknown" HealthCritical = "critical" HealthMaint = "maintenance" @@ -30,6 +24,18 @@ const ( ServiceMaint = "_service_maintenance:" ) +var ( + // Ensure implements + _ Dependency = (*HealthServiceQuery)(nil) + + // HealthServiceQueryRe is the regular expression to use. + HealthServiceQueryRe = regexp.MustCompile(`\A` + tagRe + nameRe + dcRe + nearRe + filterRe + `\z`) +) + +func init() { + gob.Register([]*HealthService{}) +} + // HealthService is a service entry in Consul. type HealthService struct { Node string @@ -40,371 +46,188 @@ type HealthService struct { Tags ServiceTags Checks []*api.HealthCheck Status string - Port uint64 + Port int +} + +// HealthServiceQuery is the representation of all a service query in Consul. +type HealthServiceQuery struct { + stopCh chan struct{} + + dc string + filters []string + name string + near string + tag string } -// HealthServices is the struct that is formed from the dependency inside a -// template. -type HealthServices struct { - sync.Mutex - - rawKey string - Name string - Tag string - DataCenter string - StatusFilter ServiceStatusFilter - stopped bool - stopCh chan struct{} +// NewHealthServiceQuery processes the strings to build a service dependency. +func NewHealthServiceQuery(s string) (*HealthServiceQuery, error) { + if !HealthServiceQueryRe.MatchString(s) { + return nil, fmt.Errorf("health.service: invalid format: %q", s) + } + + m := regexpMatch(HealthServiceQueryRe, s) + + var filters []string + if filter := m["filter"]; filter != "" { + split := strings.Split(filter, ",") + for _, f := range split { + f = strings.TrimSpace(f) + switch f { + case HealthAny, + HealthPassing, + HealthWarning, + HealthCritical, + HealthMaint: + filters = append(filters, f) + case "": + default: + return nil, fmt.Errorf("health.service: invalid filter: %q in %q", f, s) + } + } + sort.Strings(filters) + } else { + filters = []string{HealthPassing} + } + + return &HealthServiceQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + filters: filters, + name: m["name"], + near: m["near"], + tag: m["tag"], + }, nil } // Fetch queries the Consul API defined by the given client and returns a slice // of HealthService objects. -func (d *HealthServices) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() +func (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: return nil, nil, ErrStopped + default: } - d.Unlock() - if opts == nil { - opts = &QueryOptions{} - } + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + Near: d.near, + }) - consulOpts := opts.consulQueryOptions() - if d.DataCenter != "" { - consulOpts.Datacenter = d.DataCenter + u := &url.URL{ + Path: "/v1/health/service/" + d.name, + RawQuery: opts.String(), } - - onlyHealthy := false - if d.StatusFilter == nil { - onlyHealthy = true + if d.tag != "" { + q := u.Query() + q.Set("tag", d.tag) + u.RawQuery = q.Encode() } + log.Printf("[TRACE] %s: GET %s", d, u) - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("health services: error getting client: %s", err) - } - - var entries []*api.ServiceEntry - var qm *api.QueryMeta - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) - entries, qm, err = consul.Health().Service(d.Name, d.Tag, onlyHealthy, consulOpts) - close(dataCh) - }() - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-dataCh: - } + // Check if a user-supplied filter was given. If so, we may be querying for + // more than healthy services, so we need to implement client-side filtering. + passingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing + entries, qm, err := clients.Consul().Health().Service(d.name, d.tag, passingOnly, opts.ToConsulOpts()) if err != nil { - return nil, nil, fmt.Errorf("health services: error fetching: %s", err) + return nil, nil, errors.Wrap(err, d.String()) } - log.Printf("[DEBUG] (%s) Consul returned %d services", d.Display(), len(entries)) - - services := make([]*HealthService, 0, len(entries)) + log.Printf("[TRACE] %s: returned %d results", d, len(entries)) + list := make([]*HealthService, 0, len(entries)) for _, entry := range entries { // Get the status of this service from its checks. - status, err := statusFromChecks(entry.Checks) - if err != nil { - return nil, nil, fmt.Errorf("health services: "+ - "error getting status from checks: %s", err) - } + status := entry.Checks.AggregatedStatus() // If we are not checking only healthy services, filter out services that do // not match the given filter. - if d.StatusFilter != nil && !d.StatusFilter.Accept(status) { + if !acceptStatus(d.filters, status) { continue } - // Sort the tags. - tags := deepCopyAndSortTags(entry.Service.Tags) - // Get the address of the service, falling back to the address of the node. - var address string - if entry.Service.Address != "" { - address = entry.Service.Address - } else { + address := entry.Service.Address + if address == "" { address = entry.Node.Address } - services = append(services, &HealthService{ + list = append(list, &HealthService{ Node: entry.Node.Node, NodeAddress: entry.Node.Address, Address: address, ID: entry.Service.ID, Name: entry.Service.Service, - Tags: tags, + Tags: ServiceTags(deepCopyAndSortTags(entry.Service.Tags)), Status: status, Checks: entry.Checks, - Port: uint64(entry.Service.Port), + Port: entry.Service.Port, }) } - log.Printf("[DEBUG] (%s) %d services after health check status filtering", d.Display(), len(services)) + log.Printf("[TRACE] %s: returned %d results after filtering", d, len(list)) - sort.Stable(HealthServiceList(services)) + sort.Stable(ByNodeThenID(list)) rm := &ResponseMetadata{ LastIndex: qm.LastIndex, LastContact: qm.LastContact, } - return services, rm, nil + return list, rm, nil } // CanShare returns a boolean if this dependency is shareable. -func (d *HealthServices) CanShare() bool { +func (d *HealthServiceQuery) CanShare() bool { return true } -// HashCode returns a unique identifier. -func (d *HealthServices) HashCode() string { - return fmt.Sprintf("HealthServices|%s", d.rawKey) -} - -// Display prints the human-friendly output. -func (d *HealthServices) Display() string { - return fmt.Sprintf(`"service(%s)"`, d.rawKey) -} - // Stop halts the dependency's fetch function. -func (d *HealthServices) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +func (d *HealthServiceQuery) Stop() { + close(d.stopCh) } -// ParseHealthServices processes the incoming strings to build a service dependency. -// -// Supported arguments -// ParseHealthServices("service_id") -// ParseHealthServices("service_id", "health_check") -// -// Where service_id is in the format of service(.tag(@datacenter)) -// and health_check is either "any" or "passing". -// -// If no health_check is provided then its the same as "passing". -func ParseHealthServices(s ...string) (*HealthServices, error) { - var query string - var filter ServiceStatusFilter - var err error - - switch len(s) { - case 1: - query = s[0] - filter, err = NewServiceStatusFilter("") - if err != nil { - return nil, err - } - case 2: - query = s[0] - filter, err = NewServiceStatusFilter(s[1]) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("expected 1 or 2 arguments, got %d", len(s)) +// String returns the human-friendly version of this dependency. +func (d *HealthServiceQuery) String() string { + name := d.name + if d.tag != "" { + name = d.tag + "." + name } - - if len(query) == 0 { - return nil, errors.New("cannot specify empty health service dependency") + if d.dc != "" { + name = name + "@" + d.dc } - - re := regexp.MustCompile(`\A` + - `((?P[[:word:]\-.]+)\.)?` + - `((?P[[:word:]\-/_]+))` + - `(@(?P[[:word:]\.\-]+))?(:(?P[0-9]+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(query, -1) - - if len(match) == 0 { - return nil, errors.New("invalid health service dependency format") + if d.near != "" { + name = name + "~" + d.near } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - tag, name, datacenter, port := m["tag"], m["name"], m["datacenter"], m["port"] - - if name == "" { - return nil, errors.New("name part is required") - } - - if port != "" { - log.Printf("[WARN] specifying a port in a 'service' query is not "+ - "supported - please remove the port from the query %q", query) - } - - var key string - if filter == nil { - key = query - } else { - key = fmt.Sprintf("%s %s", query, filter) - } - - sd := &HealthServices{ - rawKey: key, - Name: name, - Tag: tag, - DataCenter: datacenter, - StatusFilter: filter, - stopCh: make(chan struct{}), - } - - return sd, nil -} - -// statusFromChecks accepts a list of checks and returns the most likely status -// given those checks. Any "critical" statuses will automatically mark the -// service as critical. After that, any "unknown" statuses will mark as -// "unknown". If any warning checks exist, the status will be marked as -// "warning", and finally "passing". If there are no checks, the service will be -// marked as "passing". -func statusFromChecks(checks []*api.HealthCheck) (string, error) { - var passing, warning, unknown, critical, maintenance bool - for _, check := range checks { - if check.CheckID == NodeMaint || strings.HasPrefix(check.CheckID, ServiceMaint) { - maintenance = true - continue - } - - switch check.Status { - case "passing": - passing = true - case "warning": - warning = true - case "unknown": - unknown = true - case "critical": - critical = true - default: - return "", fmt.Errorf("unknown status: %q", check.Status) - } - } - - switch { - case maintenance: - return HealthMaint, nil - case critical: - return HealthCritical, nil - case unknown: - return HealthUnknown, nil - case warning: - return HealthWarning, nil - case passing: - return HealthPassing, nil - default: - // No checks? - return HealthPassing, nil + if len(d.filters) > 0 { + name = name + "|" + strings.Join(d.filters, ",") } + return fmt.Sprintf("health.service(%s)", name) } -// ServiceStatusFilter is used to specify a list of service statuses that you want filter by. -type ServiceStatusFilter []string - -// String returns the string representation of this status filter -func (f ServiceStatusFilter) String() string { - return fmt.Sprintf("[%s]", strings.Join(f, ",")) +// Type returns the type of this dependency. +func (d *HealthServiceQuery) Type() Type { + return TypeConsul } -// NewServiceStatusFilter creates a status filter from the given string in the -// format `[key[,key[,key...]]]`. Each status is split on the comma character -// and must match one of the valid status names. -// -// If the empty string is given, it is assumed only "passing" statuses are to -// be returned. -// -// If the user specifies "any" with other keys, an error will be returned. -func NewServiceStatusFilter(s string) (ServiceStatusFilter, error) { - // If no statuses were given, use the default status of "all passing". - if len(s) == 0 || len(strings.TrimSpace(s)) == 0 { - return nil, nil - } - - var errs *multierror.Error - var hasAny bool - - raw := strings.Split(s, ",") - trimmed := make(ServiceStatusFilter, 0, len(raw)) - for _, r := range raw { - trim := strings.TrimSpace(r) - - // Ignore the empty string. - if len(trim) == 0 { - continue - } - - // Record the case where we have the "any" status - it will be used later. - if trim == HealthAny { - hasAny = true - } - - // Validate that the service is actually a valid name. - switch trim { - case HealthAny, HealthUnknown, HealthPassing, HealthWarning, HealthCritical, HealthMaint: - trimmed = append(trimmed, trim) - default: - errs = multierror.Append(errs, fmt.Errorf("service filter: invalid filter %q", trim)) - } - } - - // If the user specified "any" with additional keys, that is invalid. - if hasAny && len(trimmed) != 1 { - errs = multierror.Append(errs, fmt.Errorf("service filter: cannot specify extra keys when using %q", "any")) - } - - return trimmed, errs.ErrorOrNil() -} - -// Accept allows us to check if a slice of health checks pass this filter. -func (f ServiceStatusFilter) Accept(s string) bool { - // If the any filter is activated, pass everything. - if f.any() { - return true - } - - // Iterate over each status and see if the given status is any of those - // statuses. - for _, status := range f { - if status == s { +// acceptStatus allows us to check if a slice of health checks pass this filter. +func acceptStatus(list []string, s string) bool { + for _, status := range list { + if status == s || status == HealthAny { return true } } - return false } -// any is a helper method to determine if this is an "any" service status -// filter. If "any" was given, it must be the only item in the list. -func (f ServiceStatusFilter) any() bool { - return len(f) == 1 && f[0] == HealthAny -} - -// HealthServiceList is a sortable slice of Service -type HealthServiceList []*HealthService +// ByNodeThenID is a sortable slice of Service +type ByNodeThenID []*HealthService // Len, Swap, and Less are used to implement the sort.Sort interface. -func (s HealthServiceList) Len() int { return len(s) } -func (s HealthServiceList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s HealthServiceList) Less(i, j int) bool { +func (s ByNodeThenID) Len() int { return len(s) } +func (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByNodeThenID) Less(i, j int) bool { if s[i].Node < s[j].Node { return true } else if s[i].Node == s[j].Node { diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go new file mode 100644 index 000000000000..a075ea5dfcb5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go @@ -0,0 +1,112 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVGetQuery)(nil) + + // KVGetQueryRe is the regular expression to use. + KVGetQueryRe = regexp.MustCompile(`\A` + keyRe + dcRe + `\z`) +) + +// KVGetQuery queries the KV store for a single key. +type KVGetQuery struct { + stopCh chan struct{} + + dc string + key string + block bool +} + +// NewKVGetQuery parses a string into a dependency. +func NewKVGetQuery(s string) (*KVGetQuery, error) { + if s != "" && !KVGetQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.get: invalid format: %q", s) + } + + m := regexpMatch(KVGetQueryRe, s) + return &KVGetQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + key: m["key"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.key, + RawQuery: opts.String(), + }) + + pair, qm, err := clients.Consul().KV().Get(d.key, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + Block: d.block, + } + + if pair == nil { + log.Printf("[TRACE] %s: returned nil", d) + return nil, rm, nil + } + + value := string(pair.Value) + log.Printf("[TRACE] %s: returned %q", d, value) + return value, rm, nil +} + +// EnableBlocking turns this into a blocking KV query. +func (d *KVGetQuery) EnableBlocking() { + d.block = true +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVGetQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVGetQuery) String() string { + key := d.key + if d.dc != "" { + key = key + "@" + d.dc + } + + if d.block { + return fmt.Sprintf("kv.block(%s)", key) + } + return fmt.Sprintf("kv.get(%s)", key) +} + +// Stop halts the dependency's fetch function. +func (d *KVGetQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVGetQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go new file mode 100644 index 000000000000..60e1ef7e4a1b --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go @@ -0,0 +1,104 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVKeysQuery)(nil) + + // KVKeysQueryRe is the regular expression to use. + KVKeysQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +// KVKeysQuery queries the KV store for a single key. +type KVKeysQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVKeysQuery parses a string into a dependency. +func NewKVKeysQuery(s string) (*KVKeysQuery, error) { + if s != "" && !KVKeysQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.keys: invalid format: %q", s) + } + + m := regexpMatch(KVKeysQueryRe, s) + return &KVKeysQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVKeysQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().Keys(d.prefix, "", opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + keys := make([]string, len(list)) + for i, v := range list { + v = strings.TrimPrefix(v, d.prefix) + v = strings.TrimLeft(v, "/") + keys[i] = v + } + + log.Printf("[TRACE] %s: returned %d results", d, len(list)) + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return keys, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVKeysQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVKeysQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.keys(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVKeysQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVKeysQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go new file mode 100644 index 000000000000..8f63dcfbf7ec --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go @@ -0,0 +1,128 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*KVListQuery)(nil) + + // KVListQueryRe is the regular expression to use. + KVListQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) +) + +// KeyPair is a simple Key-Value pair +type KeyPair struct { + Path string + Key string + Value string + + // Lesser-used, but still valuable keys from api.KV + CreateIndex uint64 + ModifyIndex uint64 + LockIndex uint64 + Flags uint64 + Session string +} + +// KVListQuery queries the KV store for a single key. +type KVListQuery struct { + stopCh chan struct{} + + dc string + prefix string +} + +// NewKVListQuery parses a string into a dependency. +func NewKVListQuery(s string) (*KVListQuery, error) { + if s != "" && !KVListQueryRe.MatchString(s) { + return nil, fmt.Errorf("kv.list: invalid format: %q", s) + } + + m := regexpMatch(KVListQueryRe, s) + return &KVListQuery{ + stopCh: make(chan struct{}, 1), + dc: m["dc"], + prefix: m["prefix"], + }, nil +} + +// Fetch queries the Consul API defined by the given client. +func (d *KVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{ + Datacenter: d.dc, + }) + + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/kv/" + d.prefix, + RawQuery: opts.String(), + }) + + list, qm, err := clients.Consul().KV().List(d.prefix, opts.ToConsulOpts()) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + log.Printf("[TRACE] %s: returned %d pairs", d, len(list)) + + pairs := make([]*KeyPair, 0, len(list)) + for _, pair := range list { + key := strings.TrimPrefix(pair.Key, d.prefix) + key = strings.TrimLeft(key, "/") + + pairs = append(pairs, &KeyPair{ + Path: pair.Key, + Key: key, + Value: string(pair.Value), + CreateIndex: pair.CreateIndex, + ModifyIndex: pair.ModifyIndex, + LockIndex: pair.LockIndex, + Flags: pair.Flags, + Session: pair.Session, + }) + } + + rm := &ResponseMetadata{ + LastIndex: qm.LastIndex, + LastContact: qm.LastContact, + } + + return pairs, rm, nil +} + +// CanShare returns a boolean if this dependency is shareable. +func (d *KVListQuery) CanShare() bool { + return true +} + +// String returns the human-friendly version of this dependency. +func (d *KVListQuery) String() string { + prefix := d.prefix + if d.dc != "" { + prefix = prefix + "@" + d.dc + } + return fmt.Sprintf("kv.list(%s)", prefix) +} + +// Stop halts the dependency's fetch function. +func (d *KVListQuery) Stop() { + close(d.stopCh) +} + +// Type returns the type of this dependency. +func (d *KVListQuery) Type() Type { + return TypeConsul +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/set.go b/vendor/github.com/hashicorp/consul-template/dependency/set.go new file mode 100644 index 000000000000..d3a5df3ab95f --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/set.go @@ -0,0 +1,72 @@ +package dependency + +import ( + "strings" + "sync" +) + +// Set is a dependency-specific set implementation. Relative ordering is +// preserved. +type Set struct { + once sync.Once + sync.RWMutex + list []string + set map[string]Dependency +} + +// Add adds a new element to the set if it does not already exist. +func (s *Set) Add(d Dependency) bool { + s.init() + s.Lock() + defer s.Unlock() + if _, ok := s.set[d.String()]; !ok { + s.list = append(s.list, d.String()) + s.set[d.String()] = d + return true + } + return false +} + +// Get retrieves a single element from the set by name. +func (s *Set) Get(v string) Dependency { + s.RLock() + defer s.RUnlock() + return s.set[v] +} + +// List returns the insertion-ordered list of dependencies. +func (s *Set) List() []Dependency { + s.RLock() + defer s.RUnlock() + r := make([]Dependency, len(s.list)) + for i, k := range s.list { + r[i] = s.set[k] + } + return r +} + +// Len is the size of the set. +func (s *Set) Len() int { + s.RLock() + defer s.RUnlock() + return len(s.list) +} + +// String is a string representation of the set. +func (s *Set) String() string { + s.RLock() + defer s.RUnlock() + return strings.Join(s.list, ", ") +} + +func (s *Set) init() { + s.once.Do(func() { + if s.list == nil { + s.list = make([]string, 0, 8) + } + + if s.set == nil { + s.set = make(map[string]Dependency) + } + }) +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/store_key.go b/vendor/github.com/hashicorp/consul-template/dependency/store_key.go deleted file mode 100644 index 9b1549fb0413..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/store_key.go +++ /dev/null @@ -1,199 +0,0 @@ -package dependency - -import ( - "errors" - "fmt" - "log" - "regexp" - "sync" - - api "github.com/hashicorp/consul/api" -) - -// StoreKey represents a single item in Consul's KV store. -type StoreKey struct { - sync.Mutex - - rawKey string - Path string - DataCenter string - - defaultValue string - defaultGiven bool - existenceCheck bool - - stopped bool - stopCh chan struct{} -} - -// kvGetResponse is a wrapper around the Consul API response. -type kvGetResponse struct { - pair *api.KVPair - meta *api.QueryMeta - err error -} - -// Fetch queries the Consul API defined by the given client and returns string -// of the value to Path. -func (d *StoreKey) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - consulOpts := opts.consulQueryOptions() - if d.DataCenter != "" { - consulOpts.Datacenter = d.DataCenter - } - - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("store key: error getting client: %s", err) - } - - dataCh := make(chan *kvGetResponse, 1) - - go func() { - log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) - pair, meta, err := consul.KV().Get(d.Path, consulOpts) - resp := &kvGetResponse{pair: pair, meta: meta, err: err} - - select { - case dataCh <- resp: - case <-d.stopCh: - } - }() - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case resp := <-dataCh: - if resp.err != nil { - return "", nil, fmt.Errorf("store key: error fetching: %s", resp.err) - } - - rm := &ResponseMetadata{ - LastIndex: resp.meta.LastIndex, - LastContact: resp.meta.LastContact, - } - - if d.existenceCheck { - return (resp.pair != nil), rm, nil - } - - if resp.pair == nil { - if d.defaultGiven { - log.Printf("[DEBUG] (%s) Consul returned no data (using default of %q)", - d.Display(), d.defaultValue) - return d.defaultValue, rm, nil - } - return nil, rm, nil - } - - log.Printf("[DEBUG] (%s) Consul returned %s", d.Display(), resp.pair.Value) - - return string(resp.pair.Value), rm, nil - } -} - -// SetExistenceCheck sets this keys as an existence check instead of a value -// check. -func (d *StoreKey) SetExistenceCheck(b bool) { - d.existenceCheck = true -} - -// SetDefault is used to set the default value. -func (d *StoreKey) SetDefault(s string) { - d.defaultGiven = true - d.defaultValue = s -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *StoreKey) CanShare() bool { - return true -} - -// HashCode returns a unique identifier. -func (d *StoreKey) HashCode() string { - if d.existenceCheck { - return fmt.Sprintf("StoreKeyExists|%s", d.rawKey) - } - - if d.defaultGiven { - return fmt.Sprintf("StoreKey|%s|%s", d.rawKey, d.defaultValue) - } - - return fmt.Sprintf("StoreKey|%s", d.rawKey) -} - -// Display prints the human-friendly output. -func (d *StoreKey) Display() string { - if d.existenceCheck { - return fmt.Sprintf(`"key_exists(%s)"`, d.rawKey) - } - - if d.defaultGiven { - return fmt.Sprintf(`"key_or_default(%s, %q)"`, d.rawKey, d.defaultValue) - } - - return fmt.Sprintf(`"key(%s)"`, d.rawKey) -} - -// Stop halts the dependency's fetch function. -func (d *StoreKey) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } -} - -// ParseStoreKey parses a string of the format a(/b(/c...)) -func ParseStoreKey(s string) (*StoreKey, error) { - if len(s) == 0 { - return nil, errors.New("cannot specify empty key dependency") - } - - re := regexp.MustCompile(`\A` + - `(?P[^@]+)` + - `(@(?P.+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(s, -1) - - if len(match) == 0 { - return nil, errors.New("invalid key dependency format") - } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - key, datacenter := m["key"], m["datacenter"] - - if key == "" { - return nil, errors.New("key part is required") - } - - kd := &StoreKey{ - rawKey: s, - Path: key, - DataCenter: datacenter, - stopCh: make(chan struct{}), - } - - return kd, nil -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go b/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go deleted file mode 100644 index 14f2cf6ed46c..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/store_key_prefix.go +++ /dev/null @@ -1,184 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "errors" - "fmt" - "log" - "regexp" - "strings" - "sync" - - "github.com/hashicorp/consul/api" -) - -func init() { - gob.Register([]*KeyPair{}) -} - -// KeyPair is a simple Key-Value pair -type KeyPair struct { - Path string - Key string - Value string - - // Lesser-used, but still valuable keys from api.KV - CreateIndex uint64 - ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Session string -} - -// StoreKeyPrefix is the representation of a requested key dependency -// from inside a template. -type StoreKeyPrefix struct { - sync.Mutex - - rawKey string - Prefix string - DataCenter string - stopped bool - stopCh chan struct{} -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of KeyPair objects -func (d *StoreKeyPrefix) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - consulOpts := opts.consulQueryOptions() - if d.DataCenter != "" { - consulOpts.Datacenter = d.DataCenter - } - - consul, err := clients.Consul() - if err != nil { - return nil, nil, fmt.Errorf("store key prefix: error getting client: %s", err) - } - - var prefixes api.KVPairs - var qm *api.QueryMeta - dataCh := make(chan struct{}) - go func() { - log.Printf("[DEBUG] (%s) querying consul with %+v", d.Display(), consulOpts) - prefixes, qm, err = consul.KV().List(d.Prefix, consulOpts) - close(dataCh) - }() - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-dataCh: - } - - if err != nil { - return nil, nil, fmt.Errorf("store key prefix: error fetching: %s", err) - } - - log.Printf("[DEBUG] (%s) Consul returned %d key pairs", d.Display(), len(prefixes)) - - keyPairs := make([]*KeyPair, 0, len(prefixes)) - for _, pair := range prefixes { - key := strings.TrimPrefix(pair.Key, d.Prefix) - key = strings.TrimLeft(key, "/") - - keyPairs = append(keyPairs, &KeyPair{ - Path: pair.Key, - Key: key, - Value: string(pair.Value), - CreateIndex: pair.CreateIndex, - ModifyIndex: pair.ModifyIndex, - LockIndex: pair.LockIndex, - Flags: pair.Flags, - Session: pair.Session, - }) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return keyPairs, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *StoreKeyPrefix) CanShare() bool { - return true -} - -// HashCode returns a unique identifier. -func (d *StoreKeyPrefix) HashCode() string { - return fmt.Sprintf("StoreKeyPrefix|%s", d.rawKey) -} - -// Display prints the human-friendly output. -func (d *StoreKeyPrefix) Display() string { - return fmt.Sprintf(`"storeKeyPrefix(%s)"`, d.rawKey) -} - -// Stop halts the dependency's fetch function. -func (d *StoreKeyPrefix) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } -} - -// ParseStoreKeyPrefix parses a string of the format a(/b(/c...)) -func ParseStoreKeyPrefix(s string) (*StoreKeyPrefix, error) { - // a(/b(/c))(@datacenter) - re := regexp.MustCompile(`\A` + - `(?P[[:word:],\.\:\-\/]+)?` + - `(@(?P[[:word:]\.\-]+))?` + - `\z`) - names := re.SubexpNames() - match := re.FindAllStringSubmatch(s, -1) - - if len(match) == 0 { - return nil, errors.New("invalid key prefix dependency format") - } - - r := match[0] - - m := map[string]string{} - for i, n := range r { - if names[i] != "" { - m[names[i]] = n - } - } - - prefix, datacenter := m["prefix"], m["datacenter"] - - // Empty prefix or nil prefix should default to "/" - if len(prefix) == 0 { - prefix = "/" - } - - // Remove leading slash - if len(prefix) > 1 && prefix[0] == '/' { - prefix = prefix[1:len(prefix)] - } - - kpd := &StoreKeyPrefix{ - rawKey: s, - Prefix: prefix, - DataCenter: datacenter, - stopCh: make(chan struct{}), - } - - return kpd, nil -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/test.go b/vendor/github.com/hashicorp/consul-template/dependency/test.go deleted file mode 100644 index d3f40313a78a..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/test.go +++ /dev/null @@ -1,126 +0,0 @@ -package dependency - -import ( - "fmt" - "sync" - "time" -) - -// Test is a special dependency that does not actually speaks to a server. -type Test struct { - Name string -} - -func (d *Test) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - time.Sleep(10 * time.Millisecond) - data := "this is some data" - rm := &ResponseMetadata{LastIndex: 1} - return data, rm, nil -} - -func (d *Test) CanShare() bool { - return true -} - -func (d *Test) HashCode() string { - return fmt.Sprintf("Test|%s", d.Name) -} - -func (d *Test) Display() string { return "fakedep" } - -func (d *Test) Stop() {} - -// TestStale is a special dependency that can be used to test what happens when -// stale data is permitted. -type TestStale struct { - Name string -} - -// Fetch is used to implement the dependency interface. -func (d *TestStale) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - time.Sleep(10 * time.Millisecond) - - if opts == nil { - opts = &QueryOptions{} - } - - if opts.AllowStale { - data := "this is some stale data" - rm := &ResponseMetadata{LastIndex: 1, LastContact: 50 * time.Millisecond} - return data, rm, nil - } else { - data := "this is some fresh data" - rm := &ResponseMetadata{LastIndex: 1} - return data, rm, nil - } -} - -func (d *TestStale) CanShare() bool { - return true -} - -func (d *TestStale) HashCode() string { - return fmt.Sprintf("TestStale|%s", d.Name) -} - -func (d *TestStale) Display() string { return "fakedep" } - -func (d *TestStale) Stop() {} - -// TestFetchError is a special dependency that returns an error while fetching. -type TestFetchError struct { - Name string -} - -func (d *TestFetchError) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - time.Sleep(10 * time.Millisecond) - return nil, nil, fmt.Errorf("failed to contact server") -} - -func (d *TestFetchError) CanShare() bool { - return true -} - -func (d *TestFetchError) HashCode() string { - return fmt.Sprintf("TestFetchError|%s", d.Name) -} - -func (d *TestFetchError) Display() string { return "fakedep" } - -func (d *TestFetchError) Stop() {} - -// TestRetry is a special dependency that errors on the first fetch and -// succeeds on subsequent fetches. -type TestRetry struct { - sync.Mutex - Name string - retried bool -} - -func (d *TestRetry) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - time.Sleep(10 * time.Millisecond) - - d.Lock() - defer d.Unlock() - - if d.retried { - data := "this is some data" - rm := &ResponseMetadata{LastIndex: 1} - return data, rm, nil - } else { - d.retried = true - return nil, nil, fmt.Errorf("failed to contact server (try again)") - } -} - -func (d *TestRetry) CanShare() bool { - return true -} - -func (d *TestRetry) HashCode() string { - return fmt.Sprintf("TestRetry|%s", d.Name) -} - -func (d *TestRetry) Display() string { return "fakedep" } - -func (d *TestRetry) Stop() {} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go new file mode 100644 index 000000000000..17c2413b2691 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go @@ -0,0 +1,28 @@ +package dependency + +import "time" + +var ( + // VaultDefaultLeaseDuration is the default lease duration in seconds. + VaultDefaultLeaseDuration = 5 * time.Minute +) + +// Secret is a vault secret. +type Secret struct { + RequestID string + LeaseID string + LeaseDuration int + Renewable bool + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} +} + +// leaseDurationOrDefault returns a value or the default lease duration. +func leaseDurationOrDefault(d int) int { + if d == 0 { + return int(VaultDefaultLeaseDuration.Nanoseconds() / 1000000000) + } + return d +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go new file mode 100644 index 000000000000..3e80fd293fe6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go @@ -0,0 +1,126 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultListQuery)(nil) +) + +// VaultListQuery is the dependency to Vault for a secret +type VaultListQuery struct { + stopCh chan struct{} + + path string +} + +// NewVaultListQuery creates a new datacenter dependency. +func NewVaultListQuery(s string) (*VaultListQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.list: invalid format: %q", s) + } + + return &VaultListQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch queries the Vault API +func (d *VaultListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{}) + + // If this is not the first query, poll to simulate blocking-queries. + if opts.WaitIndex != 0 { + dur := VaultDefaultLeaseDuration + log.Printf("[TRACE] %s: long polling for %s", d, dur) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(dur): + } + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh list. + log.Printf("[TRACE] %s: LIST %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + secret, err := clients.Vault().Logical().List(d.path) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + var result []string + + // The secret could be nil if it does not exist. + if secret == nil || secret.Data == nil { + log.Printf("[TRACE] %s: no data", d) + return respWithMetadata(result) + } + + // This is a weird thing that happened once... + keys, ok := secret.Data["keys"] + if !ok { + log.Printf("[TRACE] %s: no keys", d) + return respWithMetadata(result) + } + + list, ok := keys.([]interface{}) + if !ok { + log.Printf("[TRACE] %s: not list", d) + return nil, nil, fmt.Errorf("%s: unexpected response", d) + } + + for _, v := range list { + typed, ok := v.(string) + if !ok { + return nil, nil, fmt.Errorf("%s: non-string in list", d) + } + result = append(result, typed) + } + sort.Strings(result) + + log.Printf("[TRACE] %s: returned %d results", d, len(result)) + + return respWithMetadata(result) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultListQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultListQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultListQuery) String() string { + return fmt.Sprintf("vault.list(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *VaultListQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go new file mode 100644 index 000000000000..f33524c62b82 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go @@ -0,0 +1,146 @@ +package dependency + +import ( + "fmt" + "log" + "net/url" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultReadQuery)(nil) +) + +// VaultReadQuery is the dependency to Vault for a secret +type VaultReadQuery struct { + stopCh chan struct{} + + path string + secret *Secret +} + +// NewVaultReadQuery creates a new datacenter dependency. +func NewVaultReadQuery(s string) (*VaultReadQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.read: invalid format: %q", s) + } + + return &VaultReadQuery{ + stopCh: make(chan struct{}, 1), + path: s, + }, nil +} + +// Fetch queries the Vault API +func (d *VaultReadQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{}) + + // If this is not the first query and we have a lease duration, sleep until we + // try to renew. + if opts.WaitIndex != 0 && d.secret != nil && d.secret.LeaseDuration != 0 { + dur := time.Duration(d.secret.LeaseDuration/2.0) * time.Second + if dur == 0 { + dur = VaultDefaultLeaseDuration + } + + log.Printf("[TRACE] %s: long polling for %s", d, dur) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(dur): + } + } + + // Attempt to renew the secret. If we do not have a secret or if that secret + // is not renewable, we will attempt a (re-)read later. + if d.secret != nil && d.secret.LeaseID != "" && d.secret.Renewable { + log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ + Path: "/v1/sys/renew/" + d.secret.LeaseID, + RawQuery: opts.String(), + }) + + renewal, err := clients.Vault().Sys().Renew(d.secret.LeaseID, 0) + if err == nil { + log.Printf("[TRACE] %s: successfully renewed %s", d, d.secret.LeaseID) + + secret := &Secret{ + RequestID: renewal.RequestID, + LeaseID: renewal.LeaseID, + LeaseDuration: d.secret.LeaseDuration, + Renewable: renewal.Renewable, + Data: d.secret.Data, + } + d.secret = secret + + return respWithMetadata(secret) + } + + // The renewal failed for some reason. + log.Printf("[WARN] %s: failed to renew %s: %s", d, d.secret.LeaseID, err) + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh read. + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + vaultSecret, err := clients.Vault().Logical().Read(d.path) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + // The secret could be nil if it does not exist. + if vaultSecret == nil { + return nil, nil, fmt.Errorf("%s: no secret exists at %s", d, d.path) + } + + // Print any warnings. + for _, w := range vaultSecret.Warnings { + log.Printf("[WARN] %s: %s", d, w) + } + + // Create our cloned secret. + secret := &Secret{ + LeaseID: vaultSecret.LeaseID, + LeaseDuration: leaseDurationOrDefault(vaultSecret.LeaseDuration), + Renewable: vaultSecret.Renewable, + Data: vaultSecret.Data, + } + d.secret = secret + + return respWithMetadata(secret) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultReadQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultReadQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultReadQuery) String() string { + return fmt.Sprintf("vault.read(%s)", d.path) +} + +// Type returns the type of this dependency. +func (d *VaultReadQuery) Type() Type { + return TypeVault +} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go deleted file mode 100644 index 9c024e1bd7ae..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_secret.go +++ /dev/null @@ -1,197 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "strings" - "sync" - "time" - - vaultapi "github.com/hashicorp/vault/api" -) - -// Secret is a vault secret. -type Secret struct { - LeaseID string - LeaseDuration int - Renewable bool - - // Data is the actual contents of the secret. The format of the data - // is arbitrary and up to the secret backend. - Data map[string]interface{} -} - -// VaultSecret is the dependency to Vault for a secret -type VaultSecret struct { - sync.Mutex - - Path string - data map[string]interface{} - secret *Secret - - stopped bool - stopCh chan struct{} -} - -// Fetch queries the Vault API -func (d *VaultSecret) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - log.Printf("[DEBUG] (%s) querying vault with %+v", d.Display(), opts) - - // If this is not the first query and we have a lease duration, sleep until we - // try to renew. - if opts.WaitIndex != 0 && d.secret != nil && d.secret.LeaseDuration != 0 { - duration := time.Duration(d.secret.LeaseDuration/2.0) * time.Second - log.Printf("[DEBUG] (%s) pretending to long-poll for %q", - d.Display(), duration) - select { - case <-d.stopCh: - log.Printf("[DEBUG] (%s) received interrupt", d.Display()) - return nil, nil, ErrStopped - case <-time.After(duration): - } - } - - // Grab the vault client - vault, err := clients.Vault() - if err != nil { - return nil, nil, ErrWithExitf("vault secret: %s", err) - } - - // Attempt to renew the secret. If we do not have a secret or if that secret - // is not renewable, we will attempt a (re-)read later. - if d.secret != nil && d.secret.LeaseID != "" && d.secret.Renewable { - renewal, err := vault.Sys().Renew(d.secret.LeaseID, 0) - if err == nil { - log.Printf("[DEBUG] (%s) successfully renewed", d.Display()) - - log.Printf("[DEBUG] (%s) %#v", d.Display(), renewal) - - secret := &Secret{ - LeaseID: renewal.LeaseID, - LeaseDuration: d.secret.LeaseDuration, - Renewable: renewal.Renewable, - Data: d.secret.Data, - } - - d.Lock() - d.secret = secret - d.Unlock() - - return respWithMetadata(secret) - } - - // The renewal failed for some reason. - log.Printf("[WARN] (%s) failed to renew, re-obtaining: %s", d.Display(), err) - } - - // If we got this far, we either didn't have a secret to renew, the secret was - // not renewable, or the renewal failed, so attempt a fresh read. - var vaultSecret *vaultapi.Secret - if len(d.data) == 0 { - vaultSecret, err = vault.Logical().Read(d.Path) - } else { - vaultSecret, err = vault.Logical().Write(d.Path, d.data) - } - if err != nil { - return nil, nil, ErrWithExitf("error obtaining from vault: %s", err) - } - - // The secret could be nil (maybe it does not exist yet). This is not an error - // to Vault, but it is an error to Consul Template, so return an error - // instead. - if vaultSecret == nil { - return nil, nil, fmt.Errorf("no secret exists at path %q", d.Display()) - } - - // Create our cloned secret - secret := &Secret{ - LeaseID: vaultSecret.LeaseID, - LeaseDuration: leaseDurationOrDefault(vaultSecret.LeaseDuration), - Renewable: vaultSecret.Renewable, - Data: vaultSecret.Data, - } - - d.Lock() - d.secret = secret - d.Unlock() - - log.Printf("[DEBUG] (%s) vault returned the secret", d.Display()) - - return respWithMetadata(secret) -} - -// CanShare returns if this dependency is shareable. -func (d *VaultSecret) CanShare() bool { - return false -} - -// HashCode returns the hash code for this dependency. -func (d *VaultSecret) HashCode() string { - return fmt.Sprintf("VaultSecret|%s", d.Path) -} - -// Display returns a string that should be displayed to the user in output (for -// example). -func (d *VaultSecret) Display() string { - return fmt.Sprintf(`"secret(%s)"`, d.Path) -} - -// Stop halts the given dependency's fetch. -func (d *VaultSecret) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } -} - -// ParseVaultSecret creates a new datacenter dependency. -func ParseVaultSecret(s ...string) (*VaultSecret, error) { - if len(s) == 0 { - return nil, fmt.Errorf("expected 1 or more arguments, got %d", len(s)) - } - - path, rest := s[0], s[1:len(s)] - - if len(path) == 0 { - return nil, fmt.Errorf("vault path must be at least one character") - } - - data := make(map[string]interface{}) - for _, str := range rest { - parts := strings.SplitN(str, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid value %q - must be key=value", str) - } - - k, v := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) - data[k] = v - } - - vs := &VaultSecret{ - Path: path, - data: data, - stopCh: make(chan struct{}), - } - return vs, nil -} - -func leaseDurationOrDefault(d int) int { - if d == 0 { - return 5 * 60 - } - return d -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go deleted file mode 100644 index b4173351e440..000000000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_secrets.go +++ /dev/null @@ -1,134 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "sort" - "sync" - "time" -) - -// VaultSecrets is the dependency to list secrets in Vault. -type VaultSecrets struct { - sync.Mutex - - Path string - - stopped bool - stopCh chan struct{} -} - -// Fetch queries the Vault API -func (d *VaultSecrets) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() - return nil, nil, ErrStopped - } - d.Unlock() - - if opts == nil { - opts = &QueryOptions{} - } - - log.Printf("[DEBUG] (%s) querying vault with %+v", d.Display(), opts) - - // If this is not the first query and we have a lease duration, sleep until we - // try to renew. - if opts.WaitIndex != 0 { - log.Printf("[DEBUG] (%s) pretending to long-poll", d.Display()) - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-time.After(sleepTime): - } - } - - // Grab the vault client - vault, err := clients.Vault() - if err != nil { - return nil, nil, ErrWithExitf("vault secrets: %s", err) - } - - // Get the list as a secret - vaultSecret, err := vault.Logical().List(d.Path) - if err != nil { - return nil, nil, ErrWithExitf("error listing secrets from vault: %s", err) - } - - // If the secret or data data is nil, return an empty list of strings. - if vaultSecret == nil || vaultSecret.Data == nil { - return respWithMetadata(make([]string, 0)) - } - - // If there are no keys at that path, return the empty list. - keys, ok := vaultSecret.Data["keys"] - if !ok { - return respWithMetadata(make([]string, 0)) - } - - // Convert the interface into a list of interfaces. - list, ok := keys.([]interface{}) - if !ok { - return nil, nil, ErrWithExitf("vault returned an unexpected payload for %q", d.Display()) - } - - // Pull each item out of the list and safely cast to a string. - result := make([]string, len(list)) - for i, v := range list { - typed, ok := v.(string) - if !ok { - return nil, nil, ErrWithExitf("vault returned a non-string when listing secrets for %q", d.Display()) - } - result[i] = typed - } - sort.Strings(result) - - log.Printf("[DEBUG] (%s) vault listed %d secrets(s)", d.Display(), len(result)) - - return respWithMetadata(result) -} - -// CanShare returns if this dependency is shareable. -func (d *VaultSecrets) CanShare() bool { - return false -} - -// HashCode returns the hash code for this dependency. -func (d *VaultSecrets) HashCode() string { - return fmt.Sprintf("VaultSecrets|%s", d.Path) -} - -// Display returns a string that should be displayed to the user in output (for -// example). -func (d *VaultSecrets) Display() string { - return fmt.Sprintf(`"secrets(%s)"`, d.Path) -} - -// Stop halts the dependency's fetch function. -func (d *VaultSecrets) Stop() { - d.Lock() - defer d.Unlock() - - if !d.stopped { - close(d.stopCh) - d.stopped = true - } -} - -// ParseVaultSecrets creates a new datacenter dependency. -func ParseVaultSecrets(s string) (*VaultSecrets, error) { - // Ensure a trailing slash, always. - if len(s) == 0 { - s = "/" - } - if s[len(s)-1] != '/' { - s = fmt.Sprintf("%s/", s) - } - - vs := &VaultSecrets{ - Path: s, - stopCh: make(chan struct{}), - } - return vs, nil -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go index 7c1ca20a3691..e930b0b01506 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go @@ -2,64 +2,67 @@ package dependency import ( "log" - "sync" + "net/url" "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultTokenQuery)(nil) ) -// VaultToken is the dependency to Vault for a secret -type VaultToken struct { - sync.Mutex +// VaultTokenQuery is the dependency to Vault for a secret +type VaultTokenQuery struct { + stopCh chan struct{} leaseID string leaseDuration int +} - stopped bool - stopCh chan struct{} +// NewVaultTokenQuery creates a new dependency. +func NewVaultTokenQuery() (*VaultTokenQuery, error) { + return &VaultTokenQuery{ + stopCh: make(chan struct{}, 1), + }, nil } // Fetch queries the Vault API -func (d *VaultToken) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - d.Lock() - if d.stopped { - defer d.Unlock() +func (d *VaultTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: return nil, nil, ErrStopped + default: } - d.Unlock() - if opts == nil { - opts = &QueryOptions{} - } + opts = opts.Merge(&QueryOptions{}) - log.Printf("[DEBUG] (%s) renewing vault token", d.Display()) + log.Printf("[TRACE] %s: GET %s", d, &url.URL{ + Path: "/v1/auth/token/renew-self", + RawQuery: opts.String(), + }) // If this is not the first query and we have a lease duration, sleep until we // try to renew. if opts.WaitIndex != 0 && d.leaseDuration != 0 { - duration := time.Duration(d.leaseDuration/2.0) * time.Second - - if duration < 1*time.Second { - log.Printf("[DEBUG] (%s) increasing sleep to 1s (was %q)", - d.Display(), duration) - duration = 1 * time.Second + dur := time.Duration(d.leaseDuration/2.0) * time.Second + if dur == 0 { + dur = VaultDefaultLeaseDuration } - log.Printf("[DEBUG] (%s) sleeping for %q", d.Display(), duration) + log.Printf("[TRACE] %s: long polling for %s", d, dur) + select { case <-d.stopCh: return nil, nil, ErrStopped - case <-time.After(duration): + case <-time.After(dur): } } - // Grab the vault client - vault, err := clients.Vault() - if err != nil { - return nil, nil, ErrWithExitf("vault_token: %s", err) - } - - token, err := vault.Auth().Token().RenewSelf(0) + token, err := clients.Vault().Auth().Token().RenewSelf(0) if err != nil { - return nil, nil, ErrWithExitf("error renewing vault token: %s", err) + return nil, nil, errors.Wrap(err, d.String()) } // Create our cloned secret @@ -70,50 +73,30 @@ func (d *VaultToken) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, Data: token.Data, } - leaseDuration := token.Auth.LeaseDuration - if leaseDuration == 0 { - log.Printf("[WARN] (%s) lease duration is 0, setting to 5s", d.Display()) - leaseDuration = 5 - } - - d.Lock() d.leaseID = secret.LeaseID - d.leaseDuration = leaseDuration - d.Unlock() + d.leaseDuration = secret.LeaseDuration - log.Printf("[DEBUG] (%s) successfully renewed token", d.Display()) + log.Printf("[DEBUG] %s: renewed token", d) return respWithMetadata(secret) } // CanShare returns if this dependency is shareable. -func (d *VaultToken) CanShare() bool { +func (d *VaultTokenQuery) CanShare() bool { return false } -// HashCode returns the hash code for this dependency. -func (d *VaultToken) HashCode() string { - return "VaultToken" -} - -// Display returns a string that should be displayed to the user in output (for -// example). -func (d *VaultToken) Display() string { - return "vault_token" -} - // Stop halts the dependency's fetch function. -func (d *VaultToken) Stop() { - d.Lock() - defer d.Unlock() +func (d *VaultTokenQuery) Stop() { + close(d.stopCh) +} - if !d.stopped { - close(d.stopCh) - d.stopped = true - } +// String returns the human-friendly version of this dependency. +func (d *VaultTokenQuery) String() string { + return "vault.token" } -// ParseVaultToken creates a new VaultToken dependency. -func ParseVaultToken() (*VaultToken, error) { - return &VaultToken{stopCh: make(chan struct{})}, nil +// Type returns the type of this dependency. +func (d *VaultTokenQuery) Type() Type { + return TypeVault } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go new file mode 100644 index 000000000000..1bbd80b329f4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go @@ -0,0 +1,172 @@ +package dependency + +import ( + "crypto/sha1" + "fmt" + "io" + "log" + "net/url" + "sort" + "strings" + "time" + + "github.com/pkg/errors" +) + +var ( + // Ensure implements + _ Dependency = (*VaultWriteQuery)(nil) +) + +// VaultWriteQuery is the dependency to Vault for a secret +type VaultWriteQuery struct { + stopCh chan struct{} + + path string + data map[string]interface{} + dataHash string + secret *Secret +} + +// NewVaultWriteQuery creates a new datacenter dependency. +func NewVaultWriteQuery(s string, d map[string]interface{}) (*VaultWriteQuery, error) { + s = strings.TrimSpace(s) + s = strings.Trim(s, "/") + if s == "" { + return nil, fmt.Errorf("vault.write: invalid format: %q", s) + } + + return &VaultWriteQuery{ + stopCh: make(chan struct{}, 1), + path: s, + data: d, + dataHash: sha1Map(d), + }, nil +} + +// Fetch queries the Vault API +func (d *VaultWriteQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { + select { + case <-d.stopCh: + return nil, nil, ErrStopped + default: + } + + opts = opts.Merge(&QueryOptions{}) + + // If this is not the first query and we have a lease duration, sleep until we + // try to renew. + if opts.WaitIndex != 0 && d.secret != nil && d.secret.LeaseDuration != 0 { + dur := time.Duration(d.secret.LeaseDuration/2.0) * time.Second + if dur == 0 { + dur = VaultDefaultLeaseDuration + } + + log.Printf("[TRACE] %s: long polling for %s", d, dur) + + select { + case <-d.stopCh: + return nil, nil, ErrStopped + case <-time.After(dur): + } + } + + // Attempt to renew the secret. If we do not have a secret or if that secret + // is not renewable, we will attempt a (re-)write later. + if d.secret != nil && d.secret.LeaseID != "" && d.secret.Renewable { + log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ + Path: "/v1/sys/renew/" + d.secret.LeaseID, + RawQuery: opts.String(), + }) + + renewal, err := clients.Vault().Sys().Renew(d.secret.LeaseID, 0) + if err == nil { + log.Printf("[TRACE] %s: successfully renewed %s", d, d.secret.LeaseID) + + secret := &Secret{ + RequestID: renewal.RequestID, + LeaseID: renewal.LeaseID, + LeaseDuration: d.secret.LeaseDuration, + Renewable: renewal.Renewable, + Data: d.secret.Data, + } + d.secret = secret + + return respWithMetadata(secret) + } + + // The renewal failed for some reason. + log.Printf("[WARN] %s: failed to renew %s: %s", d, d.secret.LeaseID, err) + } + + // If we got this far, we either didn't have a secret to renew, the secret was + // not renewable, or the renewal failed, so attempt a fresh write. + log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ + Path: "/v1/" + d.path, + RawQuery: opts.String(), + }) + + vaultSecret, err := clients.Vault().Logical().Write(d.path, d.data) + if err != nil { + return nil, nil, errors.Wrap(err, d.String()) + } + + // The secret could be nil if it does not exist. + if vaultSecret == nil { + return nil, nil, fmt.Errorf("%s: no secret exists at %s", d, d.path) + } + + // Print any warnings. + for _, w := range vaultSecret.Warnings { + log.Printf("[WARN] %s: %s", d, w) + } + + // Create our cloned secret. + secret := &Secret{ + LeaseID: vaultSecret.LeaseID, + LeaseDuration: leaseDurationOrDefault(vaultSecret.LeaseDuration), + Renewable: vaultSecret.Renewable, + Data: vaultSecret.Data, + } + d.secret = secret + + return respWithMetadata(secret) +} + +// CanShare returns if this dependency is shareable. +func (d *VaultWriteQuery) CanShare() bool { + return false +} + +// Stop halts the given dependency's fetch. +func (d *VaultWriteQuery) Stop() { + close(d.stopCh) +} + +// String returns the human-friendly version of this dependency. +func (d *VaultWriteQuery) String() string { + return fmt.Sprintf("vault.write(%s -> %s)", d.path, d.dataHash) +} + +// Type returns the type of this dependency. +func (d *VaultWriteQuery) Type() Type { + return TypeVault +} + +// sha1Map returns the sha1 hash of the data in the map. The reason this data is +// hashed is because it appears in the output and could contain sensitive +// information. +func sha1Map(m map[string]interface{}) string { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + h := sha1.New() + for _, k := range keys { + io.WriteString(h, fmt.Sprintf("%s=%q", k, m[k])) + } + + return fmt.Sprintf("%.4x", h.Sum(nil)) +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/dedup.go b/vendor/github.com/hashicorp/consul-template/manager/dedup.go index dc79015d0719..9b5c548fc6c3 100644 --- a/vendor/github.com/hashicorp/consul-template/manager/dedup.go +++ b/vendor/github.com/hashicorp/consul-template/manager/dedup.go @@ -57,8 +57,8 @@ type templateData struct { // path for a total of 100. // type DedupManager struct { - // config is the consul-template configuration - config *config.Config + // config is the deduplicate configuration + config *config.DedupConfig // clients is used to access the underlying clinets clients *dep.ClientSet @@ -89,7 +89,7 @@ type DedupManager struct { } // NewDedupManager creates a new Dedup manager -func NewDedupManager(config *config.Config, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) { +func NewDedupManager(config *config.DedupConfig, clients *dep.ClientSet, brain *template.Brain, templates []*template.Template) (*DedupManager, error) { d := &DedupManager{ config: config, clients: clients, @@ -107,10 +107,7 @@ func NewDedupManager(config *config.Config, clients *dep.ClientSet, brain *templ func (d *DedupManager) Start() error { log.Printf("[INFO] (dedup) starting de-duplication manager") - client, err := d.clients.Consul() - if err != nil { - return err - } + client := d.clients.Consul() go d.createSession(client) // Start to watch each template @@ -141,7 +138,7 @@ START: log.Printf("[INFO] (dedup) attempting to create session") session := client.Session() sessionCh := make(chan struct{}) - ttl := fmt.Sprintf("%ds", d.config.Deduplicate.TTL/time.Second) + ttl := fmt.Sprintf("%.6fs", float64(*d.config.TTL)/float64(time.Second)) se := &consulapi.SessionEntry{ Name: "Consul-Template de-duplication", Behavior: "delete", @@ -196,7 +193,7 @@ func (d *DedupManager) IsLeader(tmpl *template.Template) bool { // UpdateDeps is used to update the values of the dependencies for a template func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) error { // Calculate the path to write updates to - dataPath := path.Join(d.config.Deduplicate.Prefix, t.HexMD5, "data") + dataPath := path.Join(*d.config.Prefix, t.ID(), "data") // Package up the dependency data td := templateData{ @@ -211,7 +208,7 @@ func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) e // Pull the current value from the brain val, ok := d.brain.Recall(dp) if ok { - td.Data[dp.HashCode()] = val + td.Data[dp.String()] = val } } @@ -241,10 +238,7 @@ func (d *DedupManager) UpdateDeps(t *template.Template, deps []dep.Dependency) e Value: buf.Bytes(), Flags: templateDataFlag, } - client, err := d.clients.Consul() - if err != nil { - return fmt.Errorf("failed to get consul client: %v", err) - } + client := d.clients.Consul() if _, err := client.KV().Put(&kvPair, nil); err != nil { return fmt.Errorf("failed to write '%s': %v", dataPath, err) } @@ -286,12 +280,12 @@ func (d *DedupManager) setLeader(tmpl *template.Template, lockCh <-chan struct{} } func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Template) { - log.Printf("[INFO] (dedup) starting watch for template hash %s", t.HexMD5) - path := path.Join(d.config.Deduplicate.Prefix, t.HexMD5, "data") + log.Printf("[INFO] (dedup) starting watch for template hash %s", t.ID()) + path := path.Join(*d.config.Prefix, t.ID(), "data") // Determine if stale queries are allowed var allowStale bool - if d.config.MaxStale != 0 { + if *d.config.MaxStale != 0 { allowStale = true } @@ -323,7 +317,7 @@ START: } // Block for updates on the data key - log.Printf("[INFO] (dedup) listing data for template hash %s", t.HexMD5) + log.Printf("[INFO] (dedup) listing data for template hash %s", t.ID()) pair, meta, err := client.KV().Get(path, opts) if err != nil { log.Printf("[ERR] (dedup) failed to get '%s': %v", path, err) @@ -337,14 +331,14 @@ START: opts.WaitIndex = meta.LastIndex // If we've exceeded the maximum staleness, retry without stale - if allowStale && meta.LastContact > d.config.MaxStale { + if allowStale && meta.LastContact > *d.config.MaxStale { allowStale = false log.Printf("[DEBUG] (dedup) %s stale data (last contact exceeded max_stale)", path) goto START } // Re-enable stale queries if allowed - if d.config.MaxStale != 0 { + if *d.config.MaxStale > 0 { allowStale = true } @@ -408,8 +402,8 @@ func (d *DedupManager) parseData(path string, raw []byte) { func (d *DedupManager) attemptLock(client *consulapi.Client, session string, sessionCh chan struct{}, t *template.Template) { defer d.wg.Done() START: - log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.HexMD5) - basePath := path.Join(d.config.Deduplicate.Prefix, t.HexMD5) + log.Printf("[INFO] (dedup) attempting lock for template hash %s", t.ID()) + basePath := path.Join(*d.config.Prefix, t.ID()) lopts := &consulapi.LockOptions{ Key: path.Join(basePath, "lock"), Session: session, diff --git a/vendor/github.com/hashicorp/consul-template/manager/renderer.go b/vendor/github.com/hashicorp/consul-template/manager/renderer.go new file mode 100644 index 000000000000..24655f4e9d98 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/manager/renderer.go @@ -0,0 +1,146 @@ +package manager + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +type RenderInput struct { + Backup bool + Contents []byte + Dry bool + DryStream io.Writer + Path string + Perms os.FileMode +} + +type RenderResult struct { + DidRender bool + WouldRender bool +} + +// Render atomically renders a file contents to disk, returning a result of +// whether it would have rendered and actually did render. +func Render(i *RenderInput) (*RenderResult, error) { + existing, err := ioutil.ReadFile(i.Path) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "failed reading file") + } + + if bytes.Equal(existing, i.Contents) { + return &RenderResult{ + DidRender: false, + WouldRender: true, + }, nil + } + + if i.Dry { + fmt.Fprintf(i.DryStream, "> %s\n%s", i.Path, i.Contents) + } else { + if err := AtomicWrite(i.Path, i.Contents, i.Perms, i.Backup); err != nil { + return nil, errors.Wrap(err, "failed writing file") + } + } + + return &RenderResult{ + DidRender: true, + WouldRender: true, + }, nil +} + +// AtomicWrite accepts a destination path and the template contents. It writes +// the template contents to a TempFile on disk, returning if any errors occur. +// +// If the parent destination directory does not exist, it will be created +// automatically with permissions 0755. To use a different permission, create +// the directory first or use `chmod` in a Command. +// +// If the destination path exists, all attempts will be made to preserve the +// existing file permissions. If those permissions cannot be read, an error is +// returned. If the file does not exist, it will be created automatically with +// permissions 0644. To use a different permission, create the destination file +// first or use `chmod` in a Command. +// +// If no errors occur, the Tempfile is "renamed" (moved) to the destination +// path. +func AtomicWrite(path string, contents []byte, perms os.FileMode, backup bool) error { + if path == "" { + return fmt.Errorf("missing destination") + } + + parent := filepath.Dir(path) + if _, err := os.Stat(parent); os.IsNotExist(err) { + if err := os.MkdirAll(parent, 0755); err != nil { + return err + } + } + + f, err := ioutil.TempFile(parent, "") + if err != nil { + return err + } + defer os.Remove(f.Name()) + + if _, err := f.Write(contents); err != nil { + return err + } + + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + if err := os.Chmod(f.Name(), perms); err != nil { + return err + } + + // If we got this far, it means we are about to save the file. Copy the + // current contents of the file onto disk (if it exists) so we have a backup. + if backup { + if _, err := os.Stat(path); !os.IsNotExist(err) { + if err := copyFile(path, path+".bak"); err != nil { + return err + } + } + } + + if err := os.Rename(f.Name(), path); err != nil { + return err + } + + return nil +} + +// copyFile copies the file at src to the path at dst. Any errors that occur +// are returned. +func copyFile(src, dst string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + stat, err := s.Stat() + if err != nil { + return err + } + + d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, stat.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(d, s); err != nil { + d.Close() + return err + } + return d.Close() +} diff --git a/vendor/github.com/hashicorp/consul-template/manager/runner.go b/vendor/github.com/hashicorp/consul-template/manager/runner.go index 304e0583d7b5..6ff6a2611112 100644 --- a/vendor/github.com/hashicorp/consul-template/manager/runner.go +++ b/vendor/github.com/hashicorp/consul-template/manager/runner.go @@ -1,18 +1,12 @@ package manager import ( - "bytes" "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" - "os/exec" - "path/filepath" - "runtime" "strconv" - "strings" "sync" "time" @@ -23,6 +17,7 @@ import ( "github.com/hashicorp/consul-template/watch" "github.com/hashicorp/go-multierror" "github.com/mattn/go-shellwords" + "github.com/pkg/errors" ) const ( @@ -54,9 +49,9 @@ type Runner struct { outStream, errStream io.Writer inStream io.Reader - // ctemplatesMap is a map of each template ID to the ConfigTemplates + // ctemplatesMap is a map of each template ID to the TemplateConfigs // that made it. - ctemplatesMap map[string][]*config.ConfigTemplate + ctemplatesMap map[string]config.TemplateConfigs // templates is the list of calculated templates. templates []*template.Template @@ -73,6 +68,9 @@ type Runner struct { // dependencies is the list of dependencies this runner is watching. dependencies map[string]dep.Dependency + // dependenciesLock is a lock around touching the dependencies map. + dependenciesLock sync.Mutex + // watcher is the watcher this runner is using. watcher *watch.Watcher @@ -94,19 +92,69 @@ type Runner struct { // dedup is the deduplication manager if enabled dedup *DedupManager + + // Env represents a custom set of environment variables to populate the + // template and command runtime with. These environment variables will be + // available in both the command's environment as well as the template's + // environment. + Env map[string]string + + // stopLock is the lock around checking if the runner can be stopped + stopLock sync.Mutex + + // stopped is a boolean of whether the runner is stopped + stopped bool } // RenderEvent captures the time and events that occurred for a template // rendering. type RenderEvent struct { + // Missing is the list of dependencies that we do not yet have data for, but + // are contained in the watcher. This is different from unwatched dependencies, + // which includes dependencies the watcher has not yet started querying for + // data. + MissingDeps *dep.Set + + // Template is the template attempting to be rendered. + Template *template.Template + + // TemplateConfigs is the list of template configs that correspond to this + // template. + TemplateConfigs []*config.TemplateConfig + + // Unwatched is the list of dependencies that are not present in the watcher. + // This value may change over time due to the n-pass evaluation. + UnwatchedDeps *dep.Set + + // UpdatedAt is the last time this render event was updated. + UpdatedAt time.Time + + // Used is the full list of dependencies seen in the template. Because of + // the n-pass evaluation, this number can change over time. The dependecnies + // in this list may or may not have data. This just contains the list of all + // dependencies parsed out of the template with the current data. + UsedDeps *dep.Set + + // WouldRender determines if the template would have been rendered. A template + // would have been rendered if all the dependencies are satisfied, but may + // not have actually rendered if the file was already present or if an error + // occurred when trying to write the file. + WouldRender bool + // LastWouldRender marks the last time the template would have rendered. LastWouldRender time.Time + // DidRender determines if the Template was actually written to disk. In dry + // mode, this will always be false, since templates are not written to disk + // in dry mode. A template is only rendered to disk if all dependencies are + // satisfied and the template is not already in place with the same contents. + DidRender bool + // LastDidRender marks the last time the template was written to disk. LastDidRender time.Time } -// NewRunner accepts a slice of ConfigTemplates and returns a pointer to the new +// NewRunner accepts a slice of TemplateConfigs and returns a pointer to the new // Runner and any error that occurred during creation. func NewRunner(config *config.Config, dry, once bool) (*Runner, error) { log.Printf("[INFO] (runner) creating new runner (dry: %v, once: %v)", dry, once) @@ -167,19 +215,19 @@ func (r *Runner) Start() { continue NEXT_Q } - for _, c := range r.configTemplatesFor(t) { - if c.Wait.IsActive() { + for _, c := range r.templateConfigsFor(t) { + if *c.Wait.Enabled { log.Printf("[DEBUG] (runner) enabling template-specific quiescence for %q", t.ID()) r.quiescenceMap[t.ID()] = newQuiescence( - r.quiescenceCh, c.Wait.Min, c.Wait.Max, t) + r.quiescenceCh, *c.Wait.Min, *c.Wait.Max, t) continue NEXT_Q } } - if r.config.Wait.IsActive() { + if *r.config.Wait.Enabled { log.Printf("[DEBUG] (runner) enabling global quiescence for %q", t.ID()) r.quiescenceMap[t.ID()] = newQuiescence( - r.quiescenceCh, r.config.Wait.Min, r.config.Wait.Max, t) + r.quiescenceCh, *r.config.Wait.Min, *r.config.Wait.Max, t) continue NEXT_Q } } @@ -189,20 +237,41 @@ func (r *Runner) Start() { log.Printf("[WARN] (runner) watching %d dependencies - watching this "+ "many dependencies could DDoS your consul cluster", r.watcher.Size()) } else { - log.Printf("[INFO] (runner) watching %d dependencies", r.watcher.Size()) + log.Printf("[DEBUG] (runner) watching %d dependencies", r.watcher.Size()) } if r.allTemplatesRendered() { // If an exec command was given and a command is not currently running, // spawn the child process for supervision. - if r.config.Exec.Command != "" { + if config.StringPresent(r.config.Exec.Command) { + // Lock the child because we are about to check if it exists. + r.childLock.Lock() + if r.child == nil { - if err := r.spawnChild(); err != nil { + env := r.config.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + child, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: config.StringVal(r.config.Exec.Command), + Env: env.Env(), + ReloadSignal: config.SignalVal(r.config.Exec.ReloadSignal), + KillSignal: config.SignalVal(r.config.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(r.config.Exec.KillTimeout), + Splay: config.TimeDurationVal(r.config.Exec.Splay), + }) + if err != nil { r.ErrCh <- err + r.childLock.Unlock() return } + r.child = child } + // Unlock the child, we are done now. + r.childLock.Unlock() + // It's possible that we didn't start a process, in which case no // channel is returned. If we did get a new exitCh, that means a child // was spawned, so we need to watch a new exitCh. It is also possible @@ -240,9 +309,9 @@ func (r *Runner) Start() { OUTER: select { - case view := <-r.watcher.DataCh: + case view := <-r.watcher.DataCh(): // Receive this update - r.Receive(view.Dependency, view.Data()) + r.Receive(view.Dependency(), view.Data()) // Drain all dependency data. Given a large number of dependencies, it is // feasible that we have data for more than one of them. Instead of @@ -255,8 +324,8 @@ func (r *Runner) Start() { // more information about this optimization and the entire backstory. for { select { - case view := <-r.watcher.DataCh: - r.Receive(view.Dependency, view.Data()) + case view := <-r.watcher.DataCh(): + r.Receive(view.Dependency(), view.Data()) default: break OUTER } @@ -269,34 +338,16 @@ func (r *Runner) Start() { log.Printf("[INFO] (runner) watcher triggered by de-duplication manager") break OUTER - case err := <-r.watcher.ErrCh: - // If this is our own internal error, see if we should hard exit. - if derr, ok := err.(*dep.FetchError); ok { - log.Printf("[DEBUG] (runner) detected custom error type") - if derr.ShouldExit() { - log.Printf("[DEBUG] (runner) custom error asked for hard exit") - r.ErrCh <- derr.OriginalError() - return - } - } - - // Intentionally do not send the error back up to the runner. Eventually, - // once Consul API implements errwrap and multierror, we can check the - // "type" of error and conditionally alert back. - // - // if err.Contains(Something) { - // errCh <- err - // } + case err := <-r.watcher.ErrCh(): + // Push the error back up the stack log.Printf("[ERR] (runner) watcher reported error: %s", err) - if r.once { - r.ErrCh <- err - return - } + r.ErrCh <- err + return case tmpl := <-r.quiescenceCh: // Remove the quiescence for this template from the map. This will force // the upcoming Run call to actually evaluate and render the template. - log.Printf("[INFO] (runner) received template %q from quiescence", tmpl.ID()) + log.Printf("[DEBUG] (runner) received template %q from quiescence", tmpl.ID()) delete(r.quiescenceMap, tmpl.ID()) case c := <-childExitCh: @@ -309,8 +360,8 @@ func (r *Runner) Start() { return } - // If we got this far, that means we got new data or one of the timers fired, - // so attempt to re-render. + // If we got this far, that means we got new data or one of the timers + // fired, so attempt to re-render. if err := r.Run(); err != nil { r.ErrCh <- err return @@ -320,6 +371,13 @@ func (r *Runner) Start() { // Stop halts the execution of this runner and its subprocesses. func (r *Runner) Stop() { + r.stopLock.Lock() + defer r.stopLock.Unlock() + + if r.stopped { + return + } + log.Printf("[INFO] (runner) stopping") r.stopDedup() r.stopWatcher() @@ -330,6 +388,8 @@ func (r *Runner) Stop() { r.config.PidFile, err) } + r.stopped = true + close(r.DoneCh) } @@ -356,8 +416,6 @@ func (r *Runner) stopDedup() { if r.dedup != nil { log.Printf("[DEBUG] (runner) stopping de-duplication manager") r.dedup.Stop() - } else { - log.Printf("[DEBUG] (runner) de-duplication manager is not running") } } @@ -365,8 +423,6 @@ func (r *Runner) stopWatcher() { if r.watcher != nil { log.Printf("[DEBUG] (runner) stopping watcher") r.watcher.Stop() - } else { - log.Printf("[DEBUG] (runner) watcher is not running") } } @@ -377,8 +433,6 @@ func (r *Runner) stopChild() { if r.child != nil { log.Printf("[DEBUG] (runner) stopping child process") r.child.Stop() - } else { - log.Printf("[DEBUG] (runner) child is not running") } } @@ -387,6 +441,9 @@ func (r *Runner) stopChild() { // is "renderable" (i.e. all its Dependencies have been downloaded at least // once). func (r *Runner) Receive(d dep.Dependency, data interface{}) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + // Just because we received data, it does not mean that we are actually // watching for that data. How is that possible you may ask? Well, this // Runner's data channel is pooled, meaning it accepts multiple data views @@ -400,8 +457,8 @@ func (r *Runner) Receive(d dep.Dependency, data interface{}) { // https://github.com/hashicorp/consul-template/issues/198 // // and by "little" bug, I mean really big bug. - if _, ok := r.dependencies[d.HashCode()]; ok { - log.Printf("[DEBUG] (runner) receiving dependency %s", d.Display()) + if _, ok := r.dependencies[d.String()]; ok { + log.Printf("[DEBUG] (runner) receiving dependency %s", d) r.brain.Remember(d, data) } } @@ -425,15 +482,29 @@ func (r *Runner) Signal(s os.Signal) error { // Please note that all templates are rendered **and then** any commands are // executed. func (r *Runner) Run() error { - log.Printf("[INFO] (runner) running") + log.Printf("[INFO] (runner) initiating run") var wouldRenderAny, renderedAny bool - var commands []*config.ConfigTemplate + var commands []*config.TemplateConfig depsMap := make(map[string]dep.Dependency) for _, tmpl := range r.templates { log.Printf("[DEBUG] (runner) checking template %s", tmpl.ID()) + // Grab the last event + lastEvent := r.renderEvents[tmpl.ID()] + + // Create the event + event := &RenderEvent{ + Template: tmpl, + TemplateConfigs: r.templateConfigsFor(tmpl), + } + + if lastEvent != nil { + event.LastWouldRender = lastEvent.LastWouldRender + event.LastDidRender = lastEvent.LastDidRender + } + // Check if we are currently the leader instance isLeader := true if r.dedup != nil { @@ -456,38 +527,44 @@ func (r *Runner) Run() error { // Attempt to render the template, returning any missing dependencies and // the rendered contents. If there are any missing dependencies, the // contents cannot be rendered or trusted! - used, missing, contents, err := tmpl.Execute(r.brain) + result, err := tmpl.Execute(&template.ExecuteInput{ + Brain: r.brain, + Env: r.childEnv(), + }) if err != nil { - return err + return errors.Wrap(err, tmpl.Source()) } + // Grab the list of used and missing dependencies. + missing, used := result.Missing, result.Used + // Add the dependency to the list of dependencies for this runner. - for _, d := range used { + for _, d := range used.List() { // If we've taken over leadership for a template, we may have data // that is cached, but not have the watcher. We must treat this as // missing so that we create the watcher and re-run the template. if isLeader && !r.watcher.Watching(d) { - missing = append(missing, d) + missing.Add(d) } - if _, ok := depsMap[d.HashCode()]; !ok { - depsMap[d.HashCode()] = d + if _, ok := depsMap[d.String()]; !ok { + depsMap[d.String()] = d } } // Diff any missing dependencies the template reported with dependencies // the watcher is watching. - var unwatched []dep.Dependency - for _, d := range missing { + unwatched := new(dep.Set) + for _, d := range missing.List() { if !r.watcher.Watching(d) { - unwatched = append(unwatched, d) + unwatched.Add(d) } } // If there are unwatched dependencies, start the watcher and move onto the // next one. - if len(unwatched) > 0 { - log.Printf("[INFO] (runner) was not watching %d dependencies", len(unwatched)) - for _, d := range unwatched { + if l := unwatched.Len(); l > 0 { + log.Printf("[DEBUG] (runner) was not watching %d dependencies", l) + for _, d := range unwatched.List() { // If we are deduplicating, we must still handle non-sharable // dependencies, since those will be ignored. if isLeader || !d.CanShare() { @@ -499,18 +576,23 @@ func (r *Runner) Run() error { // If the template is missing data for some dependencies then we are not // ready to render and need to move on to the next one. - if len(missing) > 0 { - log.Printf("[INFO] (runner) missing data for %d dependencies", len(missing)) + if l := missing.Len(); l > 0 { + log.Printf("[DEBUG] (runner) missing data for %d dependencies", l) continue } // Trigger an update of the de-duplicaiton manager if r.dedup != nil && isLeader { - if err := r.dedup.UpdateDeps(tmpl, used); err != nil { + if err := r.dedup.UpdateDeps(tmpl, used.List()); err != nil { log.Printf("[ERR] (runner) failed to update dependency data for de-duplication: %v", err) } } + // Update event information with dependencies. + event.MissingDeps = missing + event.UnwatchedDeps = unwatched + event.UsedDeps = used + // If quiescence is activated, start/update the timers and loop back around. // We do not want to render the templates yet. if q, ok := r.quiescenceMap[tmpl.ID()]; ok { @@ -518,29 +600,35 @@ func (r *Runner) Run() error { continue } - // For each configuration template that is tied to this template, attempt to + // For each template configuration that is tied to this template, attempt to // render it to disk and accumulate commands for later use. - for _, ctemplate := range r.configTemplatesFor(tmpl) { - log.Printf("[DEBUG] (runner) checking ctemplate %+v", ctemplate) + for _, templateConfig := range r.templateConfigsFor(tmpl) { + log.Printf("[DEBUG] (runner) rendering %s", templateConfig.Display()) // Render the template, taking dry mode into account - wouldRender, didRender, err := r.render(contents, ctemplate.Destination, ctemplate.Perms, ctemplate.Backup) + result, err := Render(&RenderInput{ + Backup: config.BoolVal(templateConfig.Backup), + Contents: result.Output, + Dry: r.dry, + DryStream: r.outStream, + Path: config.StringVal(templateConfig.Destination), + Perms: config.FileModeVal(templateConfig.Perms), + }) if err != nil { - log.Printf("[DEBUG] (runner) error rendering %s", tmpl.ID()) - return err + return errors.Wrap(err, "error rendering "+templateConfig.Display()) } - log.Printf("[DEBUG] (runner) wouldRender: %t, didRender: %t", wouldRender, didRender) + renderTime := time.Now().UTC() // If we would have rendered this template (but we did not because the // contents were the same or something), we should consider this template // rendered even though the contents on disk have not been updated. We // will not fire commands unless the template was _actually_ rendered to // disk though. - if wouldRender { - // Make a note that we have rendered this template (required for once - // mode and just generally nice for debugging purposes). - r.markRenderTime(tmpl.ID(), false) + if result.WouldRender { + // This event would have rendered + event.WouldRender = true + event.LastWouldRender = renderTime // Record that at least one template would have been rendered. wouldRenderAny = true @@ -548,13 +636,16 @@ func (r *Runner) Run() error { // If we _actually_ rendered the template to disk, we want to run the // appropriate commands. - if didRender { + if result.DidRender { + log.Printf("[INFO] (runner) rendered %s", templateConfig.Display()) + + // This event did render + event.DidRender = true + event.LastDidRender = renderTime + // Record that at least one template was rendered. renderedAny = true - // Store the render time - r.markRenderTime(tmpl.ID(), true) - if !r.dry { // If the template was rendered (changed) and we are not in dry-run mode, // aggregate commands, ignoring previously known commands @@ -562,16 +653,30 @@ func (r *Runner) Run() error { // Future-self Q&A: Why not use a map for the commands instead of an // array with an expensive lookup option? Well I'm glad you asked that // future-self! One of the API promises is that commands are executed - // in the order in which they are provided in the ConfigTemplate + // in the order in which they are provided in the TemplateConfig // definitions. If we inserted commands into a map, we would lose that // relative ordering and people would be unhappy. - if ctemplate.Command != "" && !commandExists(ctemplate, commands) { - log.Printf("[DEBUG] (runner) appending command: %s", ctemplate.Command) - commands = append(commands, ctemplate) + // if config.StringPresent(ctemplate.Command) + if c := config.StringVal(templateConfig.Exec.Command); c != "" { + existing := findCommand(templateConfig, commands) + if existing != nil { + log.Printf("[DEBUG] (runner) skipping command %q from %s (already appended from %s)", + c, templateConfig.Display(), existing.Display()) + } else { + log.Printf("[DEBUG] (runner) appending command %q from %s", + c, templateConfig.Display()) + commands = append(commands, templateConfig) + } } } } } + + // Send updated render event + r.renderEventsLock.Lock() + event.UpdatedAt = time.Now().UTC() + r.renderEvents[tmpl.ID()] = event + r.renderEventsLock.Unlock() } // Check if we need to deliver any rendered signals @@ -590,11 +695,24 @@ func (r *Runner) Run() error { // ensures all commands execute at least once. var errs []error for _, t := range commands { - log.Printf("[DEBUG] (runner) running command: `%s`, timeout: %s", - t.Command, t.CommandTimeout) - if err := r.execute(t.Command, t.CommandTimeout); err != nil { - log.Printf("[ERR] (runner) error running command: %s", err) - errs = append(errs, err) + command := config.StringVal(t.Exec.Command) + log.Printf("[INFO] (runner) executing command %q from %s", command, t.Display()) + env := t.Exec.Env.Copy() + env.Custom = append(r.childEnv(), env.Custom...) + if _, err := spawnChild(&spawnChildInput{ + Stdin: r.inStream, + Stdout: r.outStream, + Stderr: r.errStream, + Command: command, + Env: env.Env(), + Timeout: config.TimeDurationVal(t.Exec.Timeout), + ReloadSignal: config.SignalVal(t.Exec.ReloadSignal), + KillSignal: config.SignalVal(t.Exec.KillSignal), + KillTimeout: config.TimeDurationVal(t.Exec.KillTimeout), + Splay: config.TimeDurationVal(t.Exec.Splay), + }); err != nil { + s := fmt.Sprintf("failed to execute command %q from %s", command, t.Display()) + errs = append(errs, errors.Wrap(err, s)) } } @@ -624,18 +742,16 @@ func (r *Runner) Run() error { // init() creates the Runner's underlying data structures and returns an error // if any problems occur. func (r *Runner) init() error { - // Ensure we have default vaults - conf := config.DefaultConfig() - conf.Merge(r.config) - r.config = conf + // Ensure default configuration values + r.config = config.DefaultConfig().Merge(r.config) + r.config.Finalize() // Print the final config for debugging - result, err := json.MarshalIndent(r.config, "", " ") + result, err := json.Marshal(r.config) if err != nil { return err } - log.Printf("[DEBUG] (runner) final config (tokens suppressed):\n\n%s\n\n", - result) + log.Printf("[DEBUG] (runner) final config: %s", result) // Create the clientset clients, err := newClientSet(r.config) @@ -650,16 +766,21 @@ func (r *Runner) init() error { } r.watcher = watcher - numTemplates := len(r.config.ConfigTemplates) + numTemplates := len(*r.config.Templates) templates := make([]*template.Template, 0, numTemplates) - ctemplatesMap := make(map[string][]*config.ConfigTemplate) + ctemplatesMap := make(map[string]config.TemplateConfigs) - // Iterate over each ConfigTemplate, creating a new Template resource for each + // Iterate over each TemplateConfig, creating a new Template resource for each // entry. Templates are parsed and saved, and a map of templates to their // config templates is kept so templates can lookup their commands and output // destinations. - for _, ctmpl := range r.config.ConfigTemplates { - tmpl, err := template.NewTemplate(ctmpl.Source, ctmpl.EmbeddedTemplate, ctmpl.LeftDelim, ctmpl.RightDelim) + for _, ctmpl := range *r.config.Templates { + tmpl, err := template.NewTemplate(&template.NewTemplateInput{ + Source: config.StringVal(ctmpl.Source), + Contents: config.StringVal(ctmpl.Contents), + LeftDelim: config.StringVal(ctmpl.LeftDelim), + RightDelim: config.StringVal(ctmpl.RightDelim), + }) if err != nil { return err } @@ -669,7 +790,7 @@ func (r *Runner) init() error { } if _, ok := ctemplatesMap[tmpl.ID()]; !ok { - ctemplatesMap[tmpl.ID()] = make([]*config.ConfigTemplate, 0, 1) + ctemplatesMap[tmpl.ID()] = make([]*config.TemplateConfig, 0, 1) } ctemplatesMap[tmpl.ID()] = append(ctemplatesMap[tmpl.ID()], ctmpl) } @@ -695,12 +816,11 @@ func (r *Runner) init() error { r.quiescenceMap = make(map[string]*quiescence) r.quiescenceCh = make(chan *template.Template) - // Setup the dedup manager if needed. This is - if r.config.Deduplicate.Enabled { + if *r.config.Dedup.Enabled { if r.once { log.Printf("[INFO] (runner) disabling de-duplication in once mode") } else { - r.dedup, err = NewDedupManager(r.config, clients, r.brain, r.templates) + r.dedup, err = NewDedupManager(r.config.Dedup, clients, r.brain, r.templates) if err != nil { return err } @@ -716,34 +836,37 @@ func (r *Runner) init() error { // At the end of this function, the given depsMap is converted to a slice and // stored on the runner. func (r *Runner) diffAndUpdateDeps(depsMap map[string]dep.Dependency) { + r.dependenciesLock.Lock() + defer r.dependenciesLock.Unlock() + // Diff and up the list of dependencies, stopping any unneeded watchers. - log.Printf("[INFO] (runner) diffing and updating dependencies") + log.Printf("[DEBUG] (runner) diffing and updating dependencies") for key, d := range r.dependencies { if _, ok := depsMap[key]; !ok { - log.Printf("[DEBUG] (runner) %s is no longer needed", d.Display()) + log.Printf("[DEBUG] (runner) %s is no longer needed", d) r.watcher.Remove(d) r.brain.Forget(d) } else { - log.Printf("[DEBUG] (runner) %s is still needed", d.Display()) + log.Printf("[DEBUG] (runner) %s is still needed", d) } } r.dependencies = depsMap } -// ConfigTemplateFor returns the ConfigTemplate for the given Template -func (r *Runner) configTemplatesFor(tmpl *template.Template) []*config.ConfigTemplate { +// TemplateConfigFor returns the TemplateConfig for the given Template +func (r *Runner) templateConfigsFor(tmpl *template.Template) []*config.TemplateConfig { return r.ctemplatesMap[tmpl.ID()] } -// ConfigTemplateMapping returns a mapping between the template ID and the set -// of ConfigTemplate represented by the template ID -func (r *Runner) ConfigTemplateMapping() map[string][]config.ConfigTemplate { - m := make(map[string][]config.ConfigTemplate, len(r.ctemplatesMap)) +// TemplateConfigMapping returns a mapping between the template ID and the set +// of TemplateConfig represented by the template ID +func (r *Runner) TemplateConfigMapping() map[string][]config.TemplateConfig { + m := make(map[string][]config.TemplateConfig, len(r.ctemplatesMap)) for id, set := range r.ctemplatesMap { - ctmpls := make([]config.ConfigTemplate, len(set)) + ctmpls := make([]config.TemplateConfig, len(set)) m[id] = ctmpls for i, ctmpl := range set { ctmpls[i] = *ctmpl @@ -768,169 +891,65 @@ func (r *Runner) allTemplatesRendered() bool { return true } -// markRenderTime stores the render time for the given template. If didRender is -// true, it stores the time for the template having been rendered, otherwise it -// stores it as would have been rendered. -func (r *Runner) markRenderTime(tmplID string, didRender bool) { - r.renderEventsLock.Lock() - defer r.renderEventsLock.Unlock() - - // Get the current time - now := time.Now() - - // Create the event for the template ID if it is the first time - event, ok := r.renderEvents[tmplID] - if !ok { - event = &RenderEvent{} - r.renderEvents[tmplID] = event - } - - if didRender { - event.LastDidRender = now - } else { - event.LastWouldRender = now - } -} - -// Render accepts a Template and a destination on disk. The first return -// parameter is a boolean that indicates if the template would have been -// rendered. Since this function is idempotent (meaning it does not write the -// template if the contents are the same), it is possible that a template is -// renderable, but never actually rendered because the contents are already -// present on disk in the correct state. In this situation, we want to inform -// the parent that the template would have been rendered, but was not. The -// second return value indicates if the template was actually committed to disk. -// By the associative property, if the second return value is true, the first -// return value must also be true (but not necessarily the other direction). The -// second return value indicates whether the caller should take action given a -// template on disk has changed. -// -// No template exists on disk: true, true, nil -// Template exists, but contents are different: true, true, nil -// Template exists, but contents are the same: true, false, nil -func (r *Runner) render(contents []byte, dest string, perms os.FileMode, backup bool) (bool, bool, error) { - existingContents, err := ioutil.ReadFile(dest) - if err != nil && !os.IsNotExist(err) { - return false, false, err - } - - if bytes.Equal(contents, existingContents) { - return true, false, nil - } +// childEnv creates a map of environment variables for child processes to have +// access to configurations in Consul Template's configuration. +func (r *Runner) childEnv() []string { + var m = make(map[string]string) - if r.dry { - fmt.Fprintf(r.outStream, "> %s\n%s", dest, contents) - } else { - if err := atomicWrite(dest, contents, perms, backup); err != nil { - return false, false, err - } + if config.StringPresent(r.config.Consul.Address) { + m["CONSUL_HTTP_ADDR"] = config.StringVal(r.config.Consul.Address) } - return true, true, nil -} - -// execute accepts a command string and runs that command string on the current -// system. -func (r *Runner) execute(command string, timeout time.Duration) error { - var shell, flag string - if runtime.GOOS == "windows" { - shell, flag = "cmd", "/C" - } else { - shell, flag = "/bin/sh", "-c" + if config.BoolVal(r.config.Consul.Auth.Enabled) { + m["CONSUL_HTTP_AUTH"] = r.config.Consul.Auth.String() } - // Copy the current environment as well as some custom environment variables - // that are read by other Consul tools (like Consul's HTTP address). This - // allows the user to specify these values once (in the Consul Template config - // or command line), instead of in multiple places. - var customEnv = make(map[string]string) - - if r.config.Consul != "" { - customEnv["CONSUL_HTTP_ADDR"] = r.config.Consul - } - - if r.config.Token != "" { - customEnv["CONSUL_HTTP_TOKEN"] = r.config.Token - } - - if r.config.Auth.Enabled { - customEnv["CONSUL_HTTP_AUTH"] = r.config.Auth.String() - } - - customEnv["CONSUL_HTTP_SSL"] = strconv.FormatBool(r.config.SSL.Enabled) - customEnv["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(r.config.SSL.Verify) - - if r.config.Vault.Address != "" { - customEnv["VAULT_ADDR"] = r.config.Vault.Address - } + m["CONSUL_HTTP_SSL"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Enabled)) + m["CONSUL_HTTP_SSL_VERIFY"] = strconv.FormatBool(config.BoolVal(r.config.Consul.SSL.Verify)) - if !r.config.Vault.SSL.Verify { - customEnv["VAULT_SKIP_VERIFY"] = "true" + if config.StringPresent(r.config.Vault.Address) { + m["VAULT_ADDR"] = config.StringVal(r.config.Vault.Address) } - if r.config.Vault.SSL.Cert != "" { - customEnv["VAULT_CLIENT_CERT"] = r.config.Vault.SSL.Cert + if !config.BoolVal(r.config.Vault.SSL.Verify) { + m["VAULT_SKIP_VERIFY"] = "true" } - if r.config.Vault.SSL.Key != "" { - customEnv["VAULT_CLIENT_KEY"] = r.config.Vault.SSL.Key + if config.StringPresent(r.config.Vault.SSL.Cert) { + m["VAULT_CLIENT_CERT"] = config.StringVal(r.config.Vault.SSL.Cert) } - if r.config.Vault.SSL.CaPath != "" { - customEnv["VAULT_CAPATH"] = r.config.Vault.SSL.CaPath + if config.StringPresent(r.config.Vault.SSL.Key) { + m["VAULT_CLIENT_KEY"] = config.StringVal(r.config.Vault.SSL.Key) } - if r.config.Vault.SSL.CaCert != "" { - customEnv["VAULT_CACERT"] = r.config.Vault.SSL.CaCert + if config.StringPresent(r.config.Vault.SSL.CaPath) { + m["VAULT_CAPATH"] = config.StringVal(r.config.Vault.SSL.CaPath) } - if r.config.Vault.SSL.ServerName != "" { - customEnv["VAULT_TLS_SERVER_NAME"] = r.config.Vault.SSL.ServerName + if config.StringPresent(r.config.Vault.SSL.CaCert) { + m["VAULT_CACERT"] = config.StringVal(r.config.Vault.SSL.CaCert) } - currentEnv := os.Environ() - cmdEnv := make([]string, len(currentEnv), len(currentEnv)+len(customEnv)) - copy(cmdEnv, currentEnv) - for k, v := range customEnv { - cmdEnv = append(cmdEnv, fmt.Sprintf("%s=%s", k, v)) + if config.StringPresent(r.config.Vault.SSL.ServerName) { + m["VAULT_TLS_SERVER_NAME"] = config.StringVal(r.config.Vault.SSL.ServerName) } - // Create and invoke the command - cmd := exec.Command(shell, flag, command) - cmd.Stdout = r.outStream - cmd.Stderr = r.errStream - cmd.Env = cmdEnv - if err := cmd.Start(); err != nil { - return err + // Append runner-supplied env (this is supplied programatically). + for k, v := range r.Env { + m[k] = v } - done := make(chan error, 1) - go func() { - done <- cmd.Wait() - }() - - select { - case <-time.After(timeout): - if cmd.Process != nil { - if err := cmd.Process.Kill(); err != nil { - return fmt.Errorf("failed to kill %q in %s: %s", - command, timeout, err) - } - } - <-done // Allow the goroutine to finish - return fmt.Errorf( - "command %q\n"+ - "did not return for %s - if your command does not return, please\n"+ - "make sure to background it", - command, timeout) - case err := <-done: - return err + e := make([]string, 0, len(m)) + for k, v := range m { + e = append(e, k+"="+v) } + return e } // storePid is used to write out a PID file to disk. func (r *Runner) storePid() error { - path := r.config.PidFile + path := config.StringVal(r.config.PidFile) if path == "" { return nil } @@ -953,7 +972,7 @@ func (r *Runner) storePid() error { // deletePid is used to remove the PID on exit. func (r *Runner) deletePid() error { - path := r.config.PidFile + path := config.StringVal(r.config.PidFile) if path == "" { return nil } @@ -975,39 +994,52 @@ func (r *Runner) deletePid() error { return nil } -// spawnChild creates a new child process and stores it on the runner object. -func (r *Runner) spawnChild() error { - r.childLock.Lock() - defer r.childLock.Unlock() +// spawnChildInput is used as input to spawn a child process. +type spawnChildInput struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Command string + Timeout time.Duration + Env []string + ReloadSignal os.Signal + KillSignal os.Signal + KillTimeout time.Duration + Splay time.Duration +} +// spawnChild spawns a child process with the given inputs and returns the +// resulting child. +func spawnChild(i *spawnChildInput) (*child.Child, error) { p := shellwords.NewParser() p.ParseEnv = true p.ParseBacktick = true - args, err := p.Parse(r.config.Exec.Command) + args, err := p.Parse(i.Command) if err != nil { - return err + return nil, errors.Wrap(err, "failed parsing command") } child, err := child.New(&child.NewInput{ - Stdin: r.inStream, - Stdout: r.outStream, - Stderr: r.errStream, + Stdin: i.Stdin, + Stdout: i.Stdout, + Stderr: i.Stderr, Command: args[0], Args: args[1:], - ReloadSignal: r.config.Exec.ReloadSignal, - KillSignal: r.config.Exec.KillSignal, - KillTimeout: r.config.Exec.KillTimeout, - Splay: r.config.Exec.Splay, + Env: i.Env, + Timeout: i.Timeout, + ReloadSignal: i.ReloadSignal, + KillSignal: i.KillSignal, + KillTimeout: i.KillTimeout, + Splay: i.Splay, }) if err != nil { - return fmt.Errorf("error creating child: %s", err) + return nil, errors.Wrap(err, "error creating child") } - r.child = child - if err := r.child.Start(); err != nil { - return fmt.Errorf("error starting child: %s", err) + if err := child.Start(); err != nil { + return nil, errors.Wrap(err, "child") } - return nil + return child, nil } // quiescence is an internal representation of a single template's quiescence @@ -1062,138 +1094,50 @@ func (q *quiescence) tick() { } } -// atomicWrite accepts a destination path and the template contents. It writes -// the template contents to a TempFile on disk, returning if any errors occur. -// -// If the parent destination directory does not exist, it will be created -// automatically with permissions 0755. To use a different permission, create -// the directory first or use `chmod` in a Command. -// -// If the destination path exists, all attempts will be made to preserve the -// existing file permissions. If those permissions cannot be read, an error is -// returned. If the file does not exist, it will be created automatically with -// permissions 0644. To use a different permission, create the destination file -// first or use `chmod` in a Command. -// -// If no errors occur, the Tempfile is "renamed" (moved) to the destination -// path. -func atomicWrite(path string, contents []byte, perms os.FileMode, backup bool) error { - parent := filepath.Dir(path) - if _, err := os.Stat(parent); os.IsNotExist(err) { - if err := os.MkdirAll(parent, 0755); err != nil { - return err - } - } - - f, err := ioutil.TempFile(parent, "") - if err != nil { - return err - } - defer os.Remove(f.Name()) - - if _, err := f.Write(contents); err != nil { - return err - } - - if err := f.Sync(); err != nil { - return err - } - - if err := f.Close(); err != nil { - return err - } - - if err := os.Chmod(f.Name(), perms); err != nil { - return err - } - - // If we got this far, it means we are about to save the file. Copy the - // current contents of the file onto disk (if it exists) so we have a backup. - if backup { - if _, err := os.Stat(path); !os.IsNotExist(err) { - if err := copyFile(path, path+".bak"); err != nil { - return err - } - } - } - - if err := os.Rename(f.Name(), path); err != nil { - return err - } - - return nil -} - -// copyFile copies the file at src to the path at dst. Any errors that occur -// are returned. -func copyFile(src, dst string) error { - s, err := os.Open(src) - if err != nil { - return err - } - defer s.Close() - - stat, err := s.Stat() - if err != nil { - return err - } - - d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, stat.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(d, s); err != nil { - d.Close() - return err - } - return d.Close() -} - -// Checks if a ConfigTemplate with the given data exists in the list of Config -// Templates. -func commandExists(c *config.ConfigTemplate, templates []*config.ConfigTemplate) bool { - needle := strings.TrimSpace(c.Command) +// findCommand searches the list of template configs for the given command and +// returns it if it exists. +func findCommand(c *config.TemplateConfig, templates []*config.TemplateConfig) *config.TemplateConfig { + needle := config.StringVal(c.Exec.Command) for _, t := range templates { - if needle == strings.TrimSpace(t.Command) { - return true + if needle == config.StringVal(t.Exec.Command) { + return t } } - - return false + return nil } // newClientSet creates a new client set from the given config. -func newClientSet(config *config.Config) (*dep.ClientSet, error) { +func newClientSet(c *config.Config) (*dep.ClientSet, error) { clients := dep.NewClientSet() if err := clients.CreateConsulClient(&dep.CreateConsulClientInput{ - Address: config.Consul, - Token: config.Token, - AuthEnabled: config.Auth.Enabled, - AuthUsername: config.Auth.Username, - AuthPassword: config.Auth.Password, - SSLEnabled: config.SSL.Enabled, - SSLVerify: config.SSL.Verify, - SSLCert: config.SSL.Cert, - SSLKey: config.SSL.Key, - SSLCACert: config.SSL.CaCert, - SSLCAPath: config.SSL.CaPath, - ServerName: config.SSL.ServerName, + Address: config.StringVal(c.Consul.Address), + Token: config.StringVal(c.Consul.Token), + AuthEnabled: config.BoolVal(c.Consul.Auth.Enabled), + AuthUsername: config.StringVal(c.Consul.Auth.Username), + AuthPassword: config.StringVal(c.Consul.Auth.Password), + SSLEnabled: config.BoolVal(c.Consul.SSL.Enabled), + SSLVerify: config.BoolVal(c.Consul.SSL.Verify), + SSLCert: config.StringVal(c.Consul.SSL.Cert), + SSLKey: config.StringVal(c.Consul.SSL.Key), + SSLCACert: config.StringVal(c.Consul.SSL.CaCert), + SSLCAPath: config.StringVal(c.Consul.SSL.CaPath), + ServerName: config.StringVal(c.Consul.SSL.ServerName), }); err != nil { return nil, fmt.Errorf("runner: %s", err) } if err := clients.CreateVaultClient(&dep.CreateVaultClientInput{ - Address: config.Vault.Address, - Token: config.Vault.Token, - UnwrapToken: config.Vault.UnwrapToken, - SSLEnabled: config.Vault.SSL.Enabled, - SSLVerify: config.Vault.SSL.Verify, - SSLCert: config.Vault.SSL.Cert, - SSLKey: config.Vault.SSL.Key, - SSLCACert: config.Vault.SSL.CaCert, - SSLCAPath: config.Vault.SSL.CaPath, - ServerName: config.Vault.SSL.ServerName, + Address: config.StringVal(c.Vault.Address), + Token: config.StringVal(c.Vault.Token), + UnwrapToken: config.BoolVal(c.Vault.UnwrapToken), + SSLEnabled: config.BoolVal(c.Vault.SSL.Enabled), + SSLVerify: config.BoolVal(c.Vault.SSL.Verify), + SSLCert: config.StringVal(c.Vault.SSL.Cert), + SSLKey: config.StringVal(c.Vault.SSL.Key), + SSLCACert: config.StringVal(c.Vault.SSL.CaCert), + SSLCAPath: config.StringVal(c.Vault.SSL.CaPath), + ServerName: config.StringVal(c.Vault.SSL.ServerName), }); err != nil { return nil, fmt.Errorf("runner: %s", err) } @@ -1202,21 +1146,22 @@ func newClientSet(config *config.Config) (*dep.ClientSet, error) { } // newWatcher creates a new watcher. -func newWatcher(config *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { - log.Printf("[INFO] (runner) creating Watcher") - - watcher, err := watch.NewWatcher(&watch.WatcherConfig{ - Clients: clients, - Once: once, - MaxStale: config.MaxStale, - RetryFunc: func(current time.Duration) time.Duration { - return config.Retry - }, - RenewVault: config.Vault.Token != "" && config.Vault.RenewToken, +func newWatcher(c *config.Config, clients *dep.ClientSet, once bool) (*watch.Watcher, error) { + log.Printf("[INFO] (runner) creating watcher") + + w, err := watch.NewWatcher(&watch.NewWatcherInput{ + Clients: clients, + MaxStale: config.TimeDurationVal(c.MaxStale), + Once: once, + RenewVault: config.StringPresent(c.Vault.Token) && config.BoolVal(c.Vault.RenewToken), + RetryFuncConsul: watch.RetryFunc(c.Consul.Retry.RetryFunc()), + // TODO: Add a sane default retry - right now this only affects "local" + // dependencies like reading a file from disk. + RetryFuncDefault: nil, + RetryFuncVault: watch.RetryFunc(c.Vault.Retry.RetryFunc()), }) if err != nil { - return nil, err + return nil, errors.Wrap(err, "runner") } - - return watcher, err + return w, nil } diff --git a/vendor/github.com/hashicorp/consul-template/signals/signals.go b/vendor/github.com/hashicorp/consul-template/signals/signals.go index a26261e379ed..dacc3e62c1b2 100644 --- a/vendor/github.com/hashicorp/consul-template/signals/signals.go +++ b/vendor/github.com/hashicorp/consul-template/signals/signals.go @@ -7,13 +7,16 @@ import ( "strings" ) +// SIGNIL is the nil signal. var SIGNIL os.Signal = new(NilSignal) +// ValidSignals is the list of all valid signals. This is built at runtime +// because it is OS-dependent. var ValidSignals []string func init() { valid := make([]string, 0, len(SignalLookup)) - for k, _ := range SignalLookup { + for k := range SignalLookup { valid = append(valid, k) } sort.Strings(valid) @@ -26,7 +29,7 @@ func Parse(s string) (os.Signal, error) { sig, ok := SignalLookup[strings.ToUpper(s)] if !ok { return nil, fmt.Errorf("invalid signal %q - valid signals are %q", - sig, ValidSignals) + s, ValidSignals) } return sig, nil } diff --git a/vendor/github.com/hashicorp/consul-template/template/brain.go b/vendor/github.com/hashicorp/consul-template/template/brain.go index ca3f66330300..149fc4f9f3cf 100644 --- a/vendor/github.com/hashicorp/consul-template/template/brain.go +++ b/vendor/github.com/hashicorp/consul-template/template/brain.go @@ -11,8 +11,8 @@ import ( type Brain struct { sync.RWMutex - // data is the map of individual dependencies (by HashCode()) and the most - // recent data for that dependency. + // data is the map of individual dependencies and the most recent data for + // that dependency. data map[string]interface{} // receivedData is an internal tracker of which dependencies have stored data @@ -36,8 +36,8 @@ func (b *Brain) Remember(d dep.Dependency, data interface{}) { b.Lock() defer b.Unlock() - b.data[d.HashCode()] = data - b.receivedData[d.HashCode()] = struct{}{} + b.data[d.String()] = data + b.receivedData[d.String()] = struct{}{} } // Recall gets the current value for the given dependency in the Brain. @@ -46,11 +46,11 @@ func (b *Brain) Recall(d dep.Dependency) (interface{}, bool) { defer b.RUnlock() // If we have not received data for this dependency, return now. - if _, ok := b.receivedData[d.HashCode()]; !ok { + if _, ok := b.receivedData[d.String()]; !ok { return nil, false } - return b.data[d.HashCode()], true + return b.data[d.String()], true } // ForceSet is used to force set the value of a dependency @@ -69,6 +69,6 @@ func (b *Brain) Forget(d dep.Dependency) { b.Lock() defer b.Unlock() - delete(b.data, d.HashCode()) - delete(b.receivedData, d.HashCode()) + delete(b.data, d.String()) + delete(b.receivedData, d.String()) } diff --git a/vendor/github.com/hashicorp/consul-template/template/template_functions.go b/vendor/github.com/hashicorp/consul-template/template/funcs.go similarity index 74% rename from vendor/github.com/hashicorp/consul-template/template/template_functions.go rename to vendor/github.com/hashicorp/consul-template/template/funcs.go index bec0488fec39..8aac7fd4e741 100644 --- a/vendor/github.com/hashicorp/consul-template/template/template_functions.go +++ b/vendor/github.com/hashicorp/consul-template/template/funcs.go @@ -2,20 +2,21 @@ package template import ( "bytes" + "encoding/base64" "encoding/json" "fmt" "io/ioutil" - "log" - "os" "os/exec" "reflect" "regexp" "strconv" "strings" + "text/template" "time" "github.com/burntsushi/toml" dep "github.com/hashicorp/consul-template/dependency" + "github.com/pkg/errors" yaml "gopkg.in/yaml.v2" ) @@ -24,105 +25,158 @@ import ( var now = func() time.Time { return time.Now().UTC() } // datacentersFunc returns or accumulates datacenter dependencies. -func datacentersFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) ([]string, error) { - return func(s ...string) ([]string, error) { +func datacentersFunc(b *Brain, used, missing *dep.Set) func() ([]string, error) { + return func() ([]string, error) { result := []string{} - d, err := dep.ParseDatacenters(s...) + d, err := dep.NewCatalogDatacentersQuery() if err != nil { return result, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { return value.([]string), nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } +// envFunc returns a function which checks the value of an environment variable. +// Invokers can specify their own environment, which takes precedences over any +// real environment variables +func envFunc(b *Brain, used, missing *dep.Set, overrides []string) func(string) (string, error) { + return func(s string) (string, error) { + var result string + + d, err := dep.NewEnvQuery(s) + if err != nil { + return result, err + } + + used.Add(d) + + // Overrides lookup - we have to do this after adding the dependency, + // otherwise dedupe sharing won't work. + for _, e := range overrides { + split := strings.SplitN(e, "=", 2) + k, v := split[0], split[1] + if k == s { + return v, nil + } + } + + if value, ok := b.Recall(d); ok { + return value.(string), nil + } + + missing.Add(d) + + return result, nil + } +} + +// executeTemplateFunc executes the given template in the context of the +// parent. If an argument is specified, it will be used as the context instead. +// This can be used for nested template definitions. +func executeTemplateFunc(t *template.Template) func(string, ...interface{}) (string, error) { + return func(s string, data ...interface{}) (string, error) { + var dot interface{} + switch len(data) { + case 0: + dot = nil + case 1: + dot = data[0] + default: + return "", fmt.Errorf("executeTemplate: wrong number of arguments, expected 1 or 2"+ + ", but got %d", len(data)+1) + } + var b bytes.Buffer + if err := t.ExecuteTemplate(&b, s, dot); err != nil { + return "", err + } + return b.String(), nil + } +} + // fileFunc returns or accumulates file dependencies. -func fileFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) (string, error) { +func fileFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { return func(s string) (string, error) { if len(s) == 0 { return "", nil } - d, err := dep.ParseFile(s) + d, err := dep.NewFileQuery(s) if err != nil { return "", err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { if value == nil { return "", nil } return value.(string), nil } - addDependency(missing, d) + missing.Add(d) return "", nil } } // keyFunc returns or accumulates key dependencies. -func keyFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) (string, error) { +func keyFunc(b *Brain, used, missing *dep.Set) func(string) (string, error) { return func(s string) (string, error) { if len(s) == 0 { return "", nil } - d, err := dep.ParseStoreKey(s) + d, err := dep.NewKVGetQuery(s) if err != nil { return "", err } + d.EnableBlocking() - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { if value == nil { return "", nil } return value.(string), nil } - addDependency(missing, d) + missing.Add(d) return "", nil } } // keyExistsFunc returns true if a key exists, false otherwise. -func keyExistsFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) (bool, error) { +func keyExistsFunc(b *Brain, used, missing *dep.Set) func(string) (bool, error) { return func(s string) (bool, error) { if len(s) == 0 { return false, nil } - d, err := dep.ParseStoreKey(s) + d, err := dep.NewKVGetQuery(s) if err != nil { return false, err } - d.SetExistenceCheck(true) - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { - return value.(bool), nil + if value, ok := b.Recall(d); ok { + return value != nil, nil } - addDependency(missing, d) + missing.Add(d) return false, nil } @@ -130,37 +184,34 @@ func keyExistsFunc(brain *Brain, // keyWithDefaultFunc returns or accumulates key dependencies that have a // default value. -func keyWithDefaultFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string, string) (string, error) { +func keyWithDefaultFunc(b *Brain, used, missing *dep.Set) func(string, string) (string, error) { return func(s, def string) (string, error) { if len(s) == 0 { return def, nil } - d, err := dep.ParseStoreKey(s) + d, err := dep.NewKVGetQuery(s) if err != nil { return "", err } - d.SetDefault(def) - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { - if value == nil { + if value, ok := b.Recall(d); ok { + if value == nil || value.(string) == "" { return def, nil } return value.(string), nil } - addDependency(missing, d) + missing.Add(d) return def, nil } } // lsFunc returns or accumulates keyPrefix dependencies. -func lsFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) { +func lsFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { return func(s string) ([]*dep.KeyPair, error) { result := []*dep.KeyPair{} @@ -168,15 +219,15 @@ func lsFunc(brain *Brain, return result, nil } - d, err := dep.ParseStoreKeyPrefix(s) + d, err := dep.NewKVListQuery(s) if err != nil { return result, err } - addDependency(used, d) + used.Add(d) // Only return non-empty top-level keys - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { for _, pair := range value.([]*dep.KeyPair) { if pair.Key != "" && !strings.Contains(pair.Key, "/") { result = append(result, pair) @@ -185,116 +236,132 @@ func lsFunc(brain *Brain, return result, nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // nodeFunc returns or accumulates catalog node dependency. -func nodeFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) (*dep.NodeDetail, error) { - return func(s ...string) (*dep.NodeDetail, error) { +func nodeFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.CatalogNode, error) { + return func(s ...string) (*dep.CatalogNode, error) { - d, err := dep.ParseCatalogNode(s...) + d, err := dep.NewCatalogNodeQuery(strings.Join(s, "")) if err != nil { return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { - return value.(*dep.NodeDetail), nil + if value, ok := b.Recall(d); ok { + return value.(*dep.CatalogNode), nil } - addDependency(missing, d) + missing.Add(d) return nil, nil } } // nodesFunc returns or accumulates catalog node dependencies. -func nodesFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) ([]*dep.Node, error) { +func nodesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.Node, error) { return func(s ...string) ([]*dep.Node, error) { result := []*dep.Node{} - d, err := dep.ParseCatalogNodes(s...) + d, err := dep.NewCatalogNodesQuery(strings.Join(s, "")) if err != nil { return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { return value.([]*dep.Node), nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // secretFunc returns or accumulates secret dependencies from Vault. -func secretFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) (*dep.Secret, error) { +func secretFunc(b *Brain, used, missing *dep.Set) func(...string) (*dep.Secret, error) { return func(s ...string) (*dep.Secret, error) { - result := &dep.Secret{} + var result *dep.Secret if len(s) == 0 { return result, nil } - d, err := dep.ParseVaultSecret(s...) + // TODO: Refactor into separate template functions + path, rest := s[0], s[1:] + data := make(map[string]interface{}) + for _, str := range rest { + parts := strings.SplitN(str, "=", 2) + if len(parts) != 2 { + return result, fmt.Errorf("not k=v pair %q", str) + } + + k, v := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + data[k] = v + } + + var d dep.Dependency + var err error + + if len(rest) == 0 { + d, err = dep.NewVaultReadQuery(path) + } else { + d, err = dep.NewVaultWriteQuery(path, data) + } + if err != nil { - return result, nil + return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { result = value.(*dep.Secret) return result, nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // secretsFunc returns or accumulates a list of secret dependencies from Vault. -func secretsFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) ([]string, error) { +func secretsFunc(b *Brain, used, missing *dep.Set) func(string) ([]string, error) { return func(s string) ([]string, error) { - result := []string{} + var result []string if len(s) == 0 { return result, nil } - d, err := dep.ParseVaultSecrets(s) + d, err := dep.NewVaultListQuery(s) if err != nil { - return result, nil + return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { result = value.([]string) return result, nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // serviceFunc returns or accumulates health service dependencies. -func serviceFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) ([]*dep.HealthService, error) { +func serviceFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.HealthService, error) { return func(s ...string) ([]*dep.HealthService, error) { result := []*dep.HealthService{} @@ -302,49 +369,47 @@ func serviceFunc(brain *Brain, return result, nil } - d, err := dep.ParseHealthServices(s...) + d, err := dep.NewHealthServiceQuery(strings.Join(s, "")) if err != nil { return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { return value.([]*dep.HealthService), nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // servicesFunc returns or accumulates catalog services dependencies. -func servicesFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(...string) ([]*dep.CatalogService, error) { - return func(s ...string) ([]*dep.CatalogService, error) { - result := []*dep.CatalogService{} +func servicesFunc(b *Brain, used, missing *dep.Set) func(...string) ([]*dep.CatalogSnippet, error) { + return func(s ...string) ([]*dep.CatalogSnippet, error) { + result := []*dep.CatalogSnippet{} - d, err := dep.ParseCatalogServices(s...) + d, err := dep.NewCatalogServicesQuery(strings.Join(s, "")) if err != nil { return nil, err } - addDependency(used, d) + used.Add(d) - if value, ok := brain.Recall(d); ok { - return value.([]*dep.CatalogService), nil + if value, ok := b.Recall(d); ok { + return value.([]*dep.CatalogSnippet), nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } // treeFunc returns or accumulates keyPrefix dependencies. -func treeFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) ([]*dep.KeyPair, error) { +func treeFunc(b *Brain, used, missing *dep.Set) func(string) ([]*dep.KeyPair, error) { return func(s string) ([]*dep.KeyPair, error) { result := []*dep.KeyPair{} @@ -352,15 +417,15 @@ func treeFunc(brain *Brain, return result, nil } - d, err := dep.ParseStoreKeyPrefix(s) + d, err := dep.NewKVListQuery(s) if err != nil { return result, err } - addDependency(used, d) + used.Add(d) // Only return non-empty top-level keys - if value, ok := brain.Recall(d); ok { + if value, ok := b.Recall(d); ok { for _, pair := range value.([]*dep.KeyPair) { parts := strings.Split(pair.Key, "/") if parts[len(parts)-1] != "" { @@ -370,20 +435,39 @@ func treeFunc(brain *Brain, return result, nil } - addDependency(missing, d) + missing.Add(d) return result, nil } } -// vaultFunc is deprecated. Use secretFunc instead. -func vaultFunc(brain *Brain, - used, missing map[string]dep.Dependency) func(string) (*dep.Secret, error) { - return func(s string) (*dep.Secret, error) { - log.Printf("[WARN] the `vault' template function has been deprecated. " + - "Please use `secret` instead!") - return secretFunc(brain, used, missing)(s) +// base64Decode decodes the given string as a base64 string, returning an error +// if it fails. +func base64Decode(s string) (string, error) { + v, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64Decode") + } + return string(v), nil +} + +// base64Encode encodes the given value into a string represented as base64. +func base64Encode(s string) (string, error) { + return base64.StdEncoding.EncodeToString([]byte(s)), nil +} + +// base64URLDecode decodes the given string as a URL-safe base64 string. +func base64URLDecode(s string) (string, error) { + v, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return "", errors.Wrap(err, "base64URLDecode") } + return string(v), nil +} + +// base64URLEncode encodes the given string to be URL-safe. +func base64URLEncode(s string) (string, error) { + return base64.URLEncoding.EncodeToString([]byte(s)), nil } // byKey accepts a slice of KV pairs and returns a map of the top-level @@ -436,12 +520,18 @@ func byTag(in interface{}) (map[string][]interface{}, error) { switch typed := in.(type) { case nil: - case []*dep.CatalogService: + case []*dep.CatalogSnippet: for _, s := range typed { for _, t := range s.Tags { m[t] = append(m[t], s) } } + case []*dep.CatalogService: + for _, s := range typed { + for _, t := range s.ServiceTags { + m[t] = append(m[t], s) + } + } case []*dep.HealthService: for _, s := range typed { for _, t := range s.Tags { @@ -464,9 +554,24 @@ func contains(v, l interface{}) (bool, error) { return in(l, v) } -// env returns the value of the environment variable set -func env(s string) (string, error) { - return os.Getenv(s), nil +// containsSomeFunc returns functions to implement each of the following: +// +// 1. containsAll - true if (∀x ∈ v then x ∈ l); false otherwise +// 2. containsAny - true if (∃x ∈ v such that x ∈ l); false otherwise +// 3. containsNone - true if (∀x ∈ v then x ∉ l); false otherwise +// 2. containsNotAll - true if (∃x ∈ v such that x ∉ l); false otherwise +// +// ret_true - return true at end of loop for none/all; false for any/notall +// invert - invert block test for all/notall +func containsSomeFunc(retTrue, invert bool) func([]interface{}, interface{}) (bool, error) { + return func(v []interface{}, l interface{}) (bool, error) { + for i := 0; i < len(v); i++ { + if ok, _ := in(l, v[i]); ok != invert { + return !retTrue, nil + } + } + return retTrue, nil + } } // explode is used to expand a list of keypairs into a deeply-nested hash. @@ -474,7 +579,7 @@ func explode(pairs []*dep.KeyPair) (map[string]interface{}, error) { m := make(map[string]interface{}) for _, pair := range pairs { if err := explodeHelper(m, pair.Key, pair.Value, pair.Key); err != nil { - return nil, err + return nil, errors.Wrap(err, "explode") } } return m, nil @@ -495,16 +600,16 @@ func explodeHelper(m map[string]interface{}, k, v, p string) error { return fmt.Errorf("not a map: %q: %q already has value %q", p, top, m[top]) } return explodeHelper(nest, key, v, k) - } else { - if k != "" { - m[k] = v - } + } + + if k != "" { + m[k] = v } return nil } -// in seaches for a given value in a given interface. +// in searches for a given value in a given interface. func in(l, v interface{}) (bool, error) { lv := reflect.ValueOf(l) vv := reflect.ValueOf(v) @@ -614,7 +719,7 @@ func parseBool(s string) (bool, error) { result, err := strconv.ParseBool(s) if err != nil { - return false, fmt.Errorf("parseBool: %s", err) + return false, errors.Wrap(err, "parseBool") } return result, nil } @@ -627,7 +732,7 @@ func parseFloat(s string) (float64, error) { result, err := strconv.ParseFloat(s, 10) if err != nil { - return 0, fmt.Errorf("parseFloat: %s", err) + return 0, errors.Wrap(err, "parseFloat") } return result, nil } @@ -640,7 +745,7 @@ func parseInt(s string) (int64, error) { result, err := strconv.ParseInt(s, 10, 64) if err != nil { - return 0, fmt.Errorf("parseInt: %s", err) + return 0, errors.Wrap(err, "parseInt") } return result, nil } @@ -666,7 +771,7 @@ func parseUint(s string) (uint64, error) { result, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("parseUint: %s", err) + return 0, errors.Wrap(err, "parseUint") } return result, nil } @@ -704,14 +809,14 @@ func plugin(name string, args ...string) (string, error) { }() select { - case <-time.After(5 * time.Second): + case <-time.After(30 * time.Second): if cmd.Process != nil { if err := cmd.Process.Kill(); err != nil { return "", fmt.Errorf("exec %q: failed to kill", name) } } <-done // Allow the goroutine to exit - return "", fmt.Errorf("exec %q: did not finish", name) + return "", fmt.Errorf("exec %q: did not finishin 30s", name) case err := <-done: if err != nil { return "", fmt.Errorf("exec %q: %s\n\nstdout:\n\n%s\n\nstderr:\n\n%s", @@ -783,7 +888,7 @@ func toLower(s string) (string, error) { func toJSON(i interface{}) (string, error) { result, err := json.Marshal(i) if err != nil { - return "", fmt.Errorf("toJSON: %s", err) + return "", errors.Wrap(err, "toJSON") } return string(bytes.TrimSpace(result)), err } @@ -793,7 +898,7 @@ func toJSON(i interface{}) (string, error) { func toJSONPretty(m map[string]interface{}) (string, error) { result, err := json.MarshalIndent(m, "", " ") if err != nil { - return "", fmt.Errorf("toJSONPretty: %s", err) + return "", errors.Wrap(err, "toJSONPretty") } return string(bytes.TrimSpace(result)), err } @@ -812,7 +917,7 @@ func toUpper(s string) (string, error) { func toYAML(m map[string]interface{}) (string, error) { result, err := yaml.Marshal(m) if err != nil { - return "", fmt.Errorf("toYAML: %s", err) + return "", errors.Wrap(err, "toYAML") } return string(bytes.TrimSpace(result)), nil } @@ -822,11 +927,11 @@ func toTOML(m map[string]interface{}) (string, error) { buf := bytes.NewBuffer([]byte{}) enc := toml.NewEncoder(buf) if err := enc.Encode(m); err != nil { - return "", fmt.Errorf("toTOML: %s", err) + return "", errors.Wrap(err, "toTOML") } result, err := ioutil.ReadAll(buf) if err != nil { - return "", fmt.Errorf("toTOML: %s", err) + return "", errors.Wrap(err, "toTOML") } return string(bytes.TrimSpace(result)), nil } @@ -1006,10 +1111,3 @@ func divide(b, a interface{}) (interface{}, error) { return nil, fmt.Errorf("divide: unknown type for %q (%T)", av, a) } } - -// addDependency adds the given Dependency to the map. -func addDependency(m map[string]dep.Dependency, d dep.Dependency) { - if _, ok := m[d.HashCode()]; !ok { - m[d.HashCode()] = d - } -} diff --git a/vendor/github.com/hashicorp/consul-template/template/scratch.go b/vendor/github.com/hashicorp/consul-template/template/scratch.go new file mode 100644 index 000000000000..d26787c1a7a4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul-template/template/scratch.go @@ -0,0 +1,125 @@ +package template + +import ( + "fmt" + "sort" + "sync" +) + +// Scratch is a wrapper around a map which is used by the template. +type Scratch struct { + once sync.Once + sync.RWMutex + values map[string]interface{} +} + +// Key returns a boolean indicating whether the given key exists in the map. +func (s *Scratch) Key(k string) bool { + s.RLock() + defer s.RUnlock() + _, ok := s.values[k] + return ok +} + +// Get returns a value previously set by Add or Set +func (s *Scratch) Get(k string) interface{} { + s.RLock() + defer s.RUnlock() + return s.values[k] +} + +// Set stores the value v at the key k. It will overwrite an existing value +// if present. +func (s *Scratch) Set(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + s.values[k] = v + return "" +} + +// SetX behaves the same as Set, except it will not overwrite existing keys if +// already present. +func (s *Scratch) SetX(k string, v interface{}) string { + s.init() + + s.Lock() + defer s.Unlock() + if _, ok := s.values[k]; !ok { + s.values[k] = v + } + return "" +} + +// MapSet stores the value v into a key mk in the map named k. +func (s *Scratch) MapSet(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, true) +} + +// MapSetX behaves the same as MapSet, except it will not overwrite the map +// key if it already exists. +func (s *Scratch) MapSetX(k, mk string, v interface{}) (string, error) { + s.init() + + s.Lock() + defer s.Unlock() + return s.mapSet(k, mk, v, false) +} + +// mapSet is sets the value in the map, overwriting if o is true. This function +// does not perform locking; callers should lock before invoking. +func (s *Scratch) mapSet(k, mk string, v interface{}, o bool) (string, error) { + if _, ok := s.values[k]; !ok { + s.values[k] = make(map[string]interface{}) + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return "", fmt.Errorf("%q is not a map", k) + } + + if _, ok := typed[mk]; o || !ok { + typed[mk] = v + } + return "", nil +} + +// MapValues returns the list of values in the map sorted by key. +func (s *Scratch) MapValues(k string) ([]interface{}, error) { + s.init() + + s.Lock() + defer s.Unlock() + if s.values == nil { + return nil, nil + } + + typed, ok := s.values[k].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%q is not a map", k) + } + + keys := make([]string, 0, len(typed)) + for k := range typed { + keys = append(keys, k) + } + sort.Strings(keys) + + sorted := make([]interface{}, len(keys)) + for i, k := range keys { + sorted[i] = typed[k] + } + return sorted, nil +} + +// init initializes the scratch. +func (s *Scratch) init() { + if s.values == nil { + s.values = make(map[string]interface{}) + } +} diff --git a/vendor/github.com/hashicorp/consul-template/template/template.go b/vendor/github.com/hashicorp/consul-template/template/template.go index f4bbe853b1ee..38e6366a7d1c 100644 --- a/vendor/github.com/hashicorp/consul-template/template/template.go +++ b/vendor/github.com/hashicorp/consul-template/template/template.go @@ -4,144 +4,218 @@ import ( "bytes" "crypto/md5" "encoding/hex" - "errors" - "fmt" "io/ioutil" - "path/filepath" "text/template" + "github.com/pkg/errors" + dep "github.com/hashicorp/consul-template/dependency" ) -type Template struct { - // Path is the path to this template on disk. - Path string +var ( + // ErrTemplateContentsAndSource is the error returned when a template + // specifies both a "source" and "content" argument, which is not valid. + ErrTemplateContentsAndSource = errors.New("template: cannot specify both 'source' and 'content'") - // LeftDelim and RightDelim are the left and right delimiters to use. - LeftDelim, RightDelim string + // ErrTemplateMissingContentsAndSource is the error returned when a template + // does not specify either a "source" or "content" argument, which is not + // valid. + ErrTemplateMissingContentsAndSource = errors.New("template: must specify exactly one of 'source' or 'content'") +) - // Contents is the string contents for the template. It is either given +// Template is the internal representation of an individual template to process. +// The template retains the relationship between it's contents and is +// responsible for it's own execution. +type Template struct { + // contents is the string contents for the template. It is either given // during template creation or read from disk when initialized. + contents string + + // source is the original location of the template. This may be undefined if + // the template was dynamically defined. + source string + + // leftDelim and rightDelim are the template delimiters. + leftDelim string + rightDelim string + + // hexMD5 stores the hex version of the MD5 + hexMD5 string +} + +// NewTemplateInput is used as input when creating the template. +type NewTemplateInput struct { + // Source is the location on disk to the file. + Source string + + // Contents are the raw template contents. Contents string - // HexMD5 stores the hex version of the MD5 - HexMD5 string + // LeftDelim and RightDelim are the template delimiters. + LeftDelim string + RightDelim string } // NewTemplate creates and parses a new Consul Template template at the given // path. If the template does not exist, an error is returned. During // initialization, the template is read and is parsed for dependencies. Any // errors that occur are returned. -func NewTemplate(path, contents, leftDelim, rightDelim string) (*Template, error) { - // Validate that we are either given the path or the explicit contents - pathEmpty, contentsEmpty := path == "", contents == "" - if !pathEmpty && !contentsEmpty { - return nil, errors.New("Either specify template path or content, not both") - } else if pathEmpty && contentsEmpty { - return nil, errors.New("Must specify template path or content") +func NewTemplate(i *NewTemplateInput) (*Template, error) { + if i == nil { + i = &NewTemplateInput{} } - template := &Template{ - Path: path, - Contents: contents, - LeftDelim: leftDelim, - RightDelim: rightDelim, + // Validate that we are either given the path or the explicit contents + if i.Source != "" && i.Contents != "" { + return nil, ErrTemplateContentsAndSource + } else if i.Source == "" && i.Contents == "" { + return nil, ErrTemplateMissingContentsAndSource } - if err := template.init(); err != nil { - return nil, err + + var t Template + t.source = i.Source + t.contents = i.Contents + t.leftDelim = i.LeftDelim + t.rightDelim = i.RightDelim + + if i.Source != "" { + contents, err := ioutil.ReadFile(i.Source) + if err != nil { + return nil, errors.Wrap(err, "failed to read template") + } + t.contents = string(contents) } - return template, nil + // Compute the MD5, encode as hex + hash := md5.Sum([]byte(t.contents)) + t.hexMD5 = hex.EncodeToString(hash[:]) + + return &t, nil } -// ID returns an identifier for the template +// ID returns the identifier for this template. func (t *Template) ID() string { - return t.HexMD5 + return t.hexMD5 } -// Execute evaluates this template in the context of the given brain. -// -// The first return value is the list of used dependencies. -// The second return value is the list of missing dependencies. -// The third return value is the rendered text. -// The fourth return value any error that occurs. -func (t *Template) Execute(brain *Brain) ([]dep.Dependency, []dep.Dependency, []byte, error) { - usedMap := make(map[string]dep.Dependency) - missingMap := make(map[string]dep.Dependency) - name := filepath.Base(t.Path) - funcs := funcMap(brain, usedMap, missingMap) - - tmpl, err := template.New(name). - Delims(t.LeftDelim, t.RightDelim). - Funcs(funcs). - Parse(t.Contents) - if err != nil { - return nil, nil, nil, fmt.Errorf("template: %s", err) - } +// Contents returns the raw contents of the template. +func (t *Template) Contents() string { + return t.contents +} - // TODO: accept an io.Writer instead - buff := new(bytes.Buffer) - if err := tmpl.Execute(buff, nil); err != nil { - return nil, nil, nil, fmt.Errorf("template: %s", err) +// Source returns the filepath source of this template. +func (t *Template) Source() string { + if t.source == "" { + return "(dynamic)" } + return t.source +} - // Update this list of this template's dependencies - var used []dep.Dependency - for _, dep := range usedMap { - used = append(used, dep) - } +// ExecuteInput is used as input to the template's execute function. +type ExecuteInput struct { + // Brain is the brain where data for the template is stored. + Brain *Brain - // Compile the list of missing dependencies - var missing []dep.Dependency - for _, dep := range missingMap { - missing = append(missing, dep) - } + // Env is a custom environment provided to the template for envvar resolution. + // Values specified here will take precedence over any values in the + // environment when using the `env` function. + Env []string +} - return used, missing, buff.Bytes(), nil +// ExecuteResult is the result of the template execution. +type ExecuteResult struct { + // Used is the set of dependencies that were used. + Used *dep.Set + + // Missing is the set of dependencies that were missing. + Missing *dep.Set + + // Output is the rendered result. + Output []byte } -// init reads the template file and initializes required variables. -func (t *Template) init() error { - // Render the template - if t.Path != "" { - contents, err := ioutil.ReadFile(t.Path) - if err != nil { - return err - } - t.Contents = string(contents) +// Execute evaluates this template in the provided context. +func (t *Template) Execute(i *ExecuteInput) (*ExecuteResult, error) { + if i == nil { + i = &ExecuteInput{} } - // Compute the MD5, encode as hex - hash := md5.Sum([]byte(t.Contents)) - t.HexMD5 = hex.EncodeToString(hash[:]) + var used, missing dep.Set - return nil + tmpl := template.New("") + tmpl.Delims(t.leftDelim, t.rightDelim) + tmpl.Funcs(funcMap(&funcMapInput{ + t: tmpl, + brain: i.Brain, + env: i.Env, + used: &used, + missing: &missing, + })) + + tmpl, err := tmpl.Parse(t.contents) + if err != nil { + return nil, errors.Wrap(err, "parse") + } + + // Execute the template into the writer + var b bytes.Buffer + if err := tmpl.Execute(&b, nil); err != nil { + return nil, errors.Wrap(err, "execute") + } + + return &ExecuteResult{ + Used: &used, + Missing: &missing, + Output: b.Bytes(), + }, nil +} + +// funcMapInput is input to the funcMap, which builds the template functions. +type funcMapInput struct { + t *template.Template + brain *Brain + env []string + used *dep.Set + missing *dep.Set } // funcMap is the map of template functions to their respective functions. -func funcMap(brain *Brain, used, missing map[string]dep.Dependency) template.FuncMap { +func funcMap(i *funcMapInput) template.FuncMap { + var scratch Scratch + return template.FuncMap{ // API functions - "datacenters": datacentersFunc(brain, used, missing), - "file": fileFunc(brain, used, missing), - "key": keyFunc(brain, used, missing), - "key_exists": keyExistsFunc(brain, used, missing), - "key_or_default": keyWithDefaultFunc(brain, used, missing), - "ls": lsFunc(brain, used, missing), - "node": nodeFunc(brain, used, missing), - "nodes": nodesFunc(brain, used, missing), - "secret": secretFunc(brain, used, missing), - "secrets": secretsFunc(brain, used, missing), - "service": serviceFunc(brain, used, missing), - "services": servicesFunc(brain, used, missing), - "tree": treeFunc(brain, used, missing), - "vault": vaultFunc(brain, used, missing), + "datacenters": datacentersFunc(i.brain, i.used, i.missing), + "env": envFunc(i.brain, i.used, i.missing, i.env), + "file": fileFunc(i.brain, i.used, i.missing), + "key": keyFunc(i.brain, i.used, i.missing), + "keyExists": keyExistsFunc(i.brain, i.used, i.missing), + "keyOrDefault": keyWithDefaultFunc(i.brain, i.used, i.missing), + "ls": lsFunc(i.brain, i.used, i.missing), + "node": nodeFunc(i.brain, i.used, i.missing), + "nodes": nodesFunc(i.brain, i.used, i.missing), + "secret": secretFunc(i.brain, i.used, i.missing), + "secrets": secretsFunc(i.brain, i.used, i.missing), + "service": serviceFunc(i.brain, i.used, i.missing), + "services": servicesFunc(i.brain, i.used, i.missing), + "tree": treeFunc(i.brain, i.used, i.missing), + + // Scratch + "scratch": func() *Scratch { return &scratch }, // Helper functions + "base64Decode": base64Decode, + "base64Encode": base64Encode, + "base64URLDecode": base64URLDecode, + "base64URLEncode": base64URLEncode, "byKey": byKey, "byTag": byTag, "contains": contains, - "env": env, + "containsAll": containsSomeFunc(true, true), + "containsAny": containsSomeFunc(false, false), + "containsNone": containsSomeFunc(true, false), + "containsNotAll": containsSomeFunc(false, true), + "executeTemplate": executeTemplateFunc(i.t), "explode": explode, "in": in, "loop": loop, @@ -171,5 +245,8 @@ func funcMap(brain *Brain, used, missing map[string]dep.Dependency) template.Fun "subtract": subtract, "multiply": multiply, "divide": divide, + + // Deprecated functions + "key_or_default": keyWithDefaultFunc(i.brain, i.used, i.missing), } } diff --git a/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go b/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go deleted file mode 100644 index ef4e6d2d04cd..000000000000 --- a/vendor/github.com/hashicorp/consul-template/watch/mapstructure.go +++ /dev/null @@ -1,27 +0,0 @@ -package watch - -import ( - "reflect" - - "github.com/mitchellh/mapstructure" -) - -// StringToWaitDurationHookFunc returns a function that converts strings to wait -// value. This is designed to be used with mapstructure for parsing out a wait -// value. -func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(new(Wait)) { - return data, nil - } - - // Convert it by parsing - return ParseWait(data.(string)) - } -} diff --git a/vendor/github.com/hashicorp/consul-template/watch/view.go b/vendor/github.com/hashicorp/consul-template/watch/view.go index 4a744f76b27b..ca47f50f8ebc 100644 --- a/vendor/github.com/hashicorp/consul-template/watch/view.go +++ b/vendor/github.com/hashicorp/consul-template/watch/view.go @@ -18,42 +18,72 @@ const ( // View is a representation of a Dependency and the most recent data it has // received from Consul. type View struct { - // Dependency is the dependency that is associated with this View - Dependency dep.Dependency + // dependency is the dependency that is associated with this View + dependency dep.Dependency - // config is the configuration for the watcher that created this view and - // contains important information about how this view should behave when - // polling including retry functions and handling stale queries. - config *WatcherConfig + // clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + clients *dep.ClientSet - // Data is the most-recently-received data from Consul for this View + // data is the most-recently-received data from Consul for this View. It is + // accompanied by a series of locks and booleans to ensure consistency. dataLock sync.RWMutex data interface{} receivedData bool lastIndex uint64 + // maxStale is the maximum amount of time to allow a query to be stale. + maxStale time.Duration + + // once determines if this view should receive data exactly once. + once bool + + // retryFunc is the function to invoke on failure to determine if a retry + // should be attempted. + retryFunc RetryFunc + // stopCh is used to stop polling on this View stopCh chan struct{} } -// NewView creates a new view object from the given Consul API client and -// Dependency. If an error occurs, it will be returned. -func NewView(config *WatcherConfig, d dep.Dependency) (*View, error) { - if config == nil { - return nil, fmt.Errorf("view: missing config") - } +// NewViewInput is used as input to the NewView function. +type NewViewInput struct { + // Dependency is the dependency to associate with the new view. + Dependency dep.Dependency - if d == nil { - return nil, fmt.Errorf("view: missing dependency") - } + // Clients is the list of clients to communicate upstream. This is passed + // directly to the dependency. + Clients *dep.ClientSet + + // MaxStale is the maximum amount a time a query response is allowed to be + // stale before forcing a read from the leader. + MaxStale time.Duration + + // Once indicates this view should poll for data exactly one time. + Once bool + // RetryFunc is a function which dictates how this view should retry on + // upstream errors. + RetryFunc RetryFunc +} + +// NewView constructs a new view with the given inputs. +func NewView(i *NewViewInput) (*View, error) { return &View{ - Dependency: d, - config: config, - stopCh: make(chan struct{}), + dependency: i.Dependency, + clients: i.Clients, + maxStale: i.MaxStale, + once: i.Once, + retryFunc: i.RetryFunc, + stopCh: make(chan struct{}, 1), }, nil } +// Dependency returns the dependency attached to this View. +func (v *View) Dependency() dep.Dependency { + return v.dependency +} + // Data returns the most-recently-received data from Consul for this View. func (v *View) Data() interface{} { v.dataLock.RLock() @@ -75,8 +105,7 @@ func (v *View) DataAndLastIndex() (interface{}, uint64) { // function to be fired in a goroutine, but then halted even if the fetch // function is in the middle of a blocking query. func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { - defaultRetry := v.config.RetryFunc(1 * time.Second) - currentRetry := defaultRetry + var retries int for { doneCh, fetchErrCh := make(chan struct{}, 1), make(chan error, 1) @@ -86,37 +115,47 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { case <-doneCh: // Reset the retry to avoid exponentially incrementing retries when we // have some successful requests - currentRetry = defaultRetry + retries = 0 - log.Printf("[INFO] (view) %s received data", v.display()) + log.Printf("[TRACE] (view) %s received data", v.dependency) select { case <-v.stopCh: + return case viewCh <- v: } // If we are operating in once mode, do not loop - we received data at // least once which is the API promise here. - if v.config.Once { + if v.once { return } case err := <-fetchErrCh: - log.Printf("[ERR] (view) %s %s", v.display(), err) + if v.retryFunc != nil { + retry, sleep := v.retryFunc(retries) + if retry { + log.Printf("[WARN] (view) %s (retry attempt %d after %q)", + err, retries+1, sleep) + select { + case <-time.After(sleep): + retries++ + continue + case <-v.stopCh: + return + } + } + } + + log.Printf("[ERR] (view) %s (exceeded maximum retries)", err) // Push the error back up to the watcher select { case <-v.stopCh: + return case errCh <- err: + return } - - // Sleep and retry - if v.config.RetryFunc != nil { - currentRetry = v.config.RetryFunc(currentRetry) - } - log.Printf("[INFO] (view) %s errored, retrying in %s", v.display(), currentRetry) - time.Sleep(currentRetry) - continue case <-v.stopCh: - log.Printf("[DEBUG] (view) %s stopping poll (received on view stopCh)", v.display()) + log.Printf("[TRACE] (view) %s stopping poll (received on view stopCh)", v.dependency) return } } @@ -128,10 +167,10 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { // result of doneCh and errCh. It is assumed that only one instance of fetch // is running per View and therefore no locking or mutexes are used. func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { - log.Printf("[DEBUG] (view) %s starting fetch", v.display()) + log.Printf("[TRACE] (view) %s starting fetch", v.dependency) var allowStale bool - if v.config.MaxStale != 0 { + if v.maxStale != 0 { allowStale = true } @@ -144,48 +183,44 @@ func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { default: } - opts := &dep.QueryOptions{ + data, rm, err := v.dependency.Fetch(v.clients, &dep.QueryOptions{ AllowStale: allowStale, WaitTime: defaultWaitTime, WaitIndex: v.lastIndex, - } - data, rm, err := v.Dependency.Fetch(v.config.Clients, opts) + }) if err != nil { - // ErrStopped is returned by a dependency when it prematurely stopped - // because the upstream process asked for a reload or termination. The - // most likely cause is that the view was stopped due to a configuration - // reload or process interrupt, so we do not want to propagate this error - // to the runner, but we want to stop the fetch routine for this view. - if err != dep.ErrStopped { + if err == dep.ErrStopped { + log.Printf("[TRACE] (view) %s reported stop", v.dependency) + } else { errCh <- err } return } if rm == nil { - errCh <- fmt.Errorf("consul returned nil response metadata; this " + - "should never happen and is probably a bug in consul-template") + errCh <- fmt.Errorf("received nil response metadata - this is a bug " + + "and should be reported") return } - if allowStale && rm.LastContact > v.config.MaxStale { + if allowStale && rm.LastContact > v.maxStale { allowStale = false - log.Printf("[DEBUG] (view) %s stale data (last contact exceeded max_stale)", v.display()) + log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency) continue } - if v.config.MaxStale != 0 { + if v.maxStale != 0 { allowStale = true } if rm.LastIndex == v.lastIndex { - log.Printf("[DEBUG] (view) %s no new data (index was the same)", v.display()) + log.Printf("[TRACE] (view) %s no new data (index was the same)", v.dependency) continue } v.dataLock.Lock() if rm.LastIndex < v.lastIndex { - log.Printf("[DEBUG] (view) %s had a lower index, resetting", v.display()) + log.Printf("[TRACE] (view) %s had a lower index, resetting", v.dependency) v.lastIndex = 0 v.dataLock.Unlock() continue @@ -193,13 +228,13 @@ func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { v.lastIndex = rm.LastIndex if v.receivedData && reflect.DeepEqual(data, v.data) { - log.Printf("[DEBUG] (view) %s no new data (contents were the same)", v.display()) + log.Printf("[TRACE] (view) %s no new data (contents were the same)", v.dependency) v.dataLock.Unlock() continue } - if data == nil { - log.Printf("[DEBUG](view) %s data was not present", v.display()) + if data == nil && rm.Block { + log.Printf("[TRACE] (view) %s asked for blocking query", v.dependency) v.dataLock.Unlock() continue } @@ -213,13 +248,8 @@ func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { } } -// display returns a string that represents this view. -func (v *View) display() string { - return v.Dependency.Display() -} - // stop halts polling of this view. func (v *View) stop() { - v.Dependency.Stop() + v.dependency.Stop() close(v.stopCh) } diff --git a/vendor/github.com/hashicorp/consul-template/watch/wait.go b/vendor/github.com/hashicorp/consul-template/watch/wait.go deleted file mode 100644 index 72933a884594..000000000000 --- a/vendor/github.com/hashicorp/consul-template/watch/wait.go +++ /dev/null @@ -1,87 +0,0 @@ -package watch - -import ( - "errors" - "fmt" - "strings" - "time" -) - -// Wait is the Min/Max duration used by the Watcher -type Wait struct { - // Min and Max are the minimum and maximum time, respectively, to wait for - // data changes before rendering a new template to disk. - Min time.Duration `json:"min" mapstructure:"min"` - Max time.Duration `json:"max" mapstructure:"max"` -} - -// ParseWait parses a string of the format `minimum(:maximum)` into a Wait -// struct. -func ParseWait(s string) (*Wait, error) { - if len(strings.TrimSpace(s)) < 1 { - return nil, errors.New("cannot specify empty wait interval") - } - - parts := strings.Split(s, ":") - - var min, max time.Duration - var err error - - if len(parts) == 1 { - min, err = time.ParseDuration(strings.TrimSpace(parts[0])) - if err != nil { - return nil, err - } - - max = 4 * min - } else if len(parts) == 2 { - min, err = time.ParseDuration(strings.TrimSpace(parts[0])) - if err != nil { - return nil, err - } - - max, err = time.ParseDuration(strings.TrimSpace(parts[1])) - if err != nil { - return nil, err - } - } else { - return nil, errors.New("invalid wait interval format") - } - - if min < 0 || max < 0 { - return nil, errors.New("cannot specify a negative wait interval") - } - - if max < min { - return nil, errors.New("wait interval max must be larger than min") - } - - return &Wait{min, max}, nil -} - -// IsActive returns true if this wait is active (non-zero). -func (w *Wait) IsActive() bool { - return w.Min != 0 && w.Max != 0 -} - -// WaitVar implements the Flag.Value interface and allows the user to specify -// a watch interval using Go's flag parsing library. -type WaitVar Wait - -// Set sets the value in the format min[:max] for a wait timer. -func (w *WaitVar) Set(value string) error { - wait, err := ParseWait(value) - if err != nil { - return err - } - - w.Min = wait.Min - w.Max = wait.Max - - return nil -} - -// String returns the string format for this wait variable -func (w *WaitVar) String() string { - return fmt.Sprintf("%s:%s", w.Min, w.Max) -} diff --git a/vendor/github.com/hashicorp/consul-template/watch/watcher.go b/vendor/github.com/hashicorp/consul-template/watch/watcher.go index 39d98f1365b6..b95ca55efb55 100644 --- a/vendor/github.com/hashicorp/consul-template/watch/watcher.go +++ b/vendor/github.com/hashicorp/consul-template/watch/watcher.go @@ -1,77 +1,105 @@ package watch import ( - "fmt" "log" "sync" "time" dep "github.com/hashicorp/consul-template/dependency" + "github.com/pkg/errors" ) -// RetryFunc is a function that defines the retry for a given watcher. The -// function parameter is the current retry (which might be nil), and the -// return value is the new retry. In this way, you can build complex retry -// functions that are based off the previous values. -type RetryFunc func(time.Duration) time.Duration - -// DefaultRetryFunc is the default return function, which just echos whatever -// duration it was given. -var DefaultRetryFunc RetryFunc = func(t time.Duration) time.Duration { - return t -} - // dataBufferSize is the default number of views to process in a batch. const dataBufferSize = 2048 +type RetryFunc func(int) (bool, time.Duration) + // Watcher is a top-level manager for views that poll Consul for data. type Watcher struct { sync.Mutex - // DataCh is the chan where Views will be published. - DataCh chan *View + // clients is the collection of API clients to talk to upstreams. + clients *dep.ClientSet - // ErrCh is the chan where any errors will be published. - ErrCh chan error + // dataCh is the chan where Views will be published. + dataCh chan *View - // config is the internal configuration of this watcher. - config *WatcherConfig + // errCh is the chan where any errors will be published. + errCh chan error // depViewMap is a map of Templates to Views. Templates are keyed by - // HashCode(). + // their string. depViewMap map[string]*View + + // maxStale specifies the maximum staleness of a query response. + maxStale time.Duration + + // once signals if this watcher should tell views to retrieve data exactly + // one time intead of polling infinitely. + once bool + + // retryFuncs specifies the different ways to retry based on the upstream. + retryFuncConsul RetryFunc + retryFuncDefault RetryFunc + retryFuncVault RetryFunc } -// WatcherConfig is the configuration for a particular Watcher. -type WatcherConfig struct { - // Client is the mechanism for communicating with the Consul API. +type NewWatcherInput struct { + // Clients is the client set to communicate with upstreams. Clients *dep.ClientSet - // Once is used to determine if the views should poll for data exactly once. - Once bool - - // MaxStale is the maximum staleness of a query. If specified, Consul will - // distribute work among all servers instead of just the leader. Specifying - // this option assumes the use of AllowStale. + // MaxStale is the maximum staleness of a query. MaxStale time.Duration - // RetryFunc is a RetryFunc that represents the way retrys and backoffs - // should occur. - RetryFunc RetryFunc + // Once specifies this watcher should tell views to poll exactly once. + Once bool - // RenewVault determines if the watcher should renew the Vault token as a - // background job. + // RenewVault indicates if this watcher should renew Vault tokens. RenewVault bool + + // RetryFuncs specify the different ways to retry based on the upstream. + RetryFuncConsul RetryFunc + RetryFuncDefault RetryFunc + RetryFuncVault RetryFunc } // NewWatcher creates a new watcher using the given API client. -func NewWatcher(config *WatcherConfig) (*Watcher, error) { - watcher := &Watcher{config: config} - if err := watcher.init(); err != nil { - return nil, err +func NewWatcher(i *NewWatcherInput) (*Watcher, error) { + w := &Watcher{ + clients: i.Clients, + depViewMap: make(map[string]*View), + dataCh: make(chan *View, dataBufferSize), + errCh: make(chan error), + maxStale: i.MaxStale, + once: i.Once, + retryFuncConsul: i.RetryFuncConsul, + retryFuncDefault: i.RetryFuncDefault, + retryFuncVault: i.RetryFuncVault, + } + + // Start a watcher for the Vault renew if that config was specified + if i.RenewVault { + vt, err := dep.NewVaultTokenQuery() + if err != nil { + return nil, errors.Wrap(err, "watcher") + } + if _, err := w.Add(vt); err != nil { + return nil, errors.Wrap(err, "watcher") + } } - return watcher, nil + return w, nil +} + +// DataCh returns a read-only channel of Views which is populated when a view +// receives data from its upstream. +func (w *Watcher) DataCh() <-chan *View { + return w.dataCh +} + +// ErrCh returns a read-only channel of errors returned by the upstream. +func (w *Watcher) ErrCh() <-chan error { + return w.errCh } // Add adds the given dependency to the list of monitored depedencies @@ -86,22 +114,39 @@ func (w *Watcher) Add(d dep.Dependency) (bool, error) { w.Lock() defer w.Unlock() - log.Printf("[INFO] (watcher) adding %s", d.Display()) + log.Printf("[DEBUG] (watcher) adding %s", d) - if _, ok := w.depViewMap[d.HashCode()]; ok { - log.Printf("[DEBUG] (watcher) %s already exists, skipping", d.Display()) + if _, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) %s already exists, skipping", d) return false, nil } - v, err := NewView(w.config, d) + // Choose the correct retry function based off of the dependency's type. + var retryFunc RetryFunc + switch d.Type() { + case dep.TypeConsul: + retryFunc = w.retryFuncConsul + case dep.TypeVault: + retryFunc = w.retryFuncVault + default: + retryFunc = w.retryFuncDefault + } + + v, err := NewView(&NewViewInput{ + Dependency: d, + Clients: w.clients, + MaxStale: w.maxStale, + Once: w.once, + RetryFunc: retryFunc, + }) if err != nil { - return false, err + return false, errors.Wrap(err, "watcher") } - log.Printf("[DEBUG] (watcher) %s starting", d.Display()) + log.Printf("[TRACE] (watcher) %s starting", d) - w.depViewMap[d.HashCode()] = v - go v.poll(w.DataCh, w.ErrCh) + w.depViewMap[d.String()] = v + go v.poll(w.dataCh, w.errCh) return true, nil } @@ -111,7 +156,7 @@ func (w *Watcher) Watching(d dep.Dependency) bool { w.Lock() defer w.Unlock() - _, ok := w.depViewMap[d.HashCode()] + _, ok := w.depViewMap[d.String()] return ok } @@ -122,9 +167,9 @@ func (w *Watcher) ForceWatching(d dep.Dependency, enabled bool) { defer w.Unlock() if enabled { - w.depViewMap[d.HashCode()] = nil + w.depViewMap[d.String()] = nil } else { - delete(w.depViewMap, d.HashCode()) + delete(w.depViewMap, d.String()) } } @@ -136,16 +181,16 @@ func (w *Watcher) Remove(d dep.Dependency) bool { w.Lock() defer w.Unlock() - log.Printf("[INFO] (watcher) removing %s", d.Display()) + log.Printf("[DEBUG] (watcher) removing %s", d) - if view, ok := w.depViewMap[d.HashCode()]; ok { - log.Printf("[DEBUG] (watcher) actually removing %s", d.Display()) + if view, ok := w.depViewMap[d.String()]; ok { + log.Printf("[TRACE] (watcher) actually removing %s", d) view.stop() - delete(w.depViewMap, d.HashCode()) + delete(w.depViewMap, d.String()) return true } - log.Printf("[DEBUG] (watcher) %s did not exist, skipping", d.Display()) + log.Printf("[TRACE] (watcher) %s did not exist, skipping", d) return false } @@ -162,13 +207,13 @@ func (w *Watcher) Stop() { w.Lock() defer w.Unlock() - log.Printf("[INFO] (watcher) stopping all views") + log.Printf("[DEBUG] (watcher) stopping all views") for _, view := range w.depViewMap { if view == nil { continue } - log.Printf("[DEBUG] (watcher) stopping %s", view.Dependency.Display()) + log.Printf("[TRACE] (watcher) stopping %s", view.Dependency()) view.stop() } @@ -176,36 +221,5 @@ func (w *Watcher) Stop() { w.depViewMap = make(map[string]*View) // Close any idle TCP connections - w.config.Clients.Stop() -} - -// init sets up the initial values for the watcher. -func (w *Watcher) init() error { - if w.config == nil { - return fmt.Errorf("watcher: missing config") - } - - if w.config.RetryFunc == nil { - w.config.RetryFunc = DefaultRetryFunc - } - - // Setup the channels - w.DataCh = make(chan *View, dataBufferSize) - w.ErrCh = make(chan error) - - // Setup our map of dependencies to views - w.depViewMap = make(map[string]*View) - - // Start a watcher for the Vault renew if that config was specified - if w.config.RenewVault { - vt, err := dep.ParseVaultToken() - if err != nil { - return fmt.Errorf("watcher: %s", err) - } - if _, err := w.Add(vt); err != nil { - return fmt.Errorf("watcher: %s", err) - } - } - - return nil + w.clients.Stop() } diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 87a6c10016ce..1893d1cf359d 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -1,6 +1,7 @@ package api import ( + "bufio" "fmt" ) @@ -73,6 +74,8 @@ type AgentServiceCheck struct { HTTP string `json:",omitempty"` TCP string `json:",omitempty"` Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` // In Consul 0.7 and later, checks that are associated with a service // may also contain this optional DeregisterCriticalServiceAfter field, @@ -114,6 +117,17 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) { return out, nil } +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + // NodeName is used to get the node name of the agent func (a *Agent) NodeName() (string, error) { if a.nodeName != "" { @@ -345,6 +359,17 @@ func (a *Agent) Join(addr string, wan bool) error { return nil } +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + // ForceLeave is used to have the agent eject a failed node func (a *Agent) ForceLeave(node string) error { r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) @@ -409,3 +434,38 @@ func (a *Agent) DisableNodeMaintenance() error { resp.Body.Close() return nil } + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream +func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + logCh <- scanner.Text() + } + } + }() + + return logCh, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index dd811fde4bf4..9587043a42ca 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -20,6 +20,28 @@ import ( "github.com/hashicorp/go-cleanhttp" ) +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + // QueryOptions are used to parameterize a query type QueryOptions struct { // Providing a datacenter overwrites the DC provided @@ -181,15 +203,15 @@ func defaultConfig(transportFn func() *http.Transport) *Config { }, } - if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { config.Address = addr } - if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { + if token := os.Getenv(HTTPTokenEnvName); token != "" { config.Token = token } - if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { var username, password string if strings.Contains(auth, ":") { split := strings.SplitN(auth, ":", 2) @@ -205,10 +227,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { } } - if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { enabled, err := strconv.ParseBool(ssl) if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) } if enabled { @@ -216,10 +238,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { } } - if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { + if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" { doVerify, err := strconv.ParseBool(verify) if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) } if !doVerify { diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 337772ec0bf7..56f0dbf69267 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -16,6 +16,8 @@ type CatalogService struct { ServiceTags []string ServicePort int ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 } type CatalogNode struct { diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 74da949c8d19..8abe2393ad50 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "strings" ) const ( @@ -11,6 +12,15 @@ const ( HealthPassing = "passing" HealthWarning = "warning" HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" ) // HealthCheck is used to represent a single check @@ -25,11 +35,56 @@ type HealthCheck struct { ServiceName string } +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + // ServiceEntry is used for the health service endpoint type ServiceEntry struct { Node *Node Service *AgentService - Checks []*HealthCheck + Checks HealthChecks } // Health can be used to query the Health endpoints @@ -43,7 +98,7 @@ func (c *Client) Health() *Health { } // Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/health/node/"+node) r.setQueryOptions(q) rtt, resp, err := requireOK(h.c.doRequest(r)) @@ -56,7 +111,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -64,7 +119,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, } // Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/health/checks/"+service) r.setQueryOptions(q) rtt, resp, err := requireOK(h.c.doRequest(r)) @@ -77,7 +132,7 @@ func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *Query parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -115,7 +170,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) // State is used to retrieve all the checks in a given state. // The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { switch state { case HealthAny: case HealthWarning: @@ -136,7 +191,7 @@ func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMet parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 3dac2583c125..44e06bbb470d 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -11,13 +11,35 @@ import ( // KVPair is used to represent a single K/V entry type KVPair struct { - Key string + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Value []byte - Session string + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string } // KVPairs is a list of KVPair objects @@ -28,21 +50,21 @@ type KVOp string const ( KVSet KVOp = "set" - KVDelete = "delete" - KVDeleteCAS = "delete-cas" - KVDeleteTree = "delete-tree" - KVCAS = "cas" - KVLock = "lock" - KVUnlock = "unlock" - KVGet = "get" - KVGetTree = "get-tree" - KVCheckSession = "check-session" - KVCheckIndex = "check-index" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" ) // KVTxnOp defines a single operation inside a transaction. type KVTxnOp struct { - Verb string + Verb KVOp Key string Value []byte Flags uint64 @@ -70,7 +92,8 @@ func (c *Client) KV() *KV { return &KV{c} } -// Get is used to lookup a single key +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { resp, qm, err := k.getInternal(key, nil, q) if err != nil { @@ -133,7 +156,7 @@ func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMe } func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+key) + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) r.setQueryOptions(q) for param, val := range params { r.params.Set(param, val) @@ -254,7 +277,7 @@ func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { } func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) r.setWriteOptions(q) for param, val := range params { r.params.Set(param, val) diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index 08e8e793108d..9f9845a4325b 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -72,8 +72,9 @@ type LockOptions struct { Key string // Must be set and have write permissions Value []byte // Optional, value to associate with the lock Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) MonitorRetries int // Optional, defaults to 0 which means no retries MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime @@ -329,9 +330,12 @@ func (l *Lock) Destroy() error { // createSession is used to create a new managed session func (l *Lock) createSession() (string, error) { session := l.c.Session() - se := &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } } id, _, err := session.Create(se, nil) if err != nil { diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go index 48d74f3ca6ab..a8d04a38eb88 100644 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -43,6 +43,26 @@ type RaftConfiguration struct { Index uint64 } +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + // RaftGetConfiguration is used to query the current Raft peer set. func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { r := op.c.newRequest("GET", "/v1/operator/raft/configuration") @@ -79,3 +99,65 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err resp.Body.Close() return nil } + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index 63e741e050d5..876e2e3b55e9 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -167,19 +167,18 @@ func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDe } // Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *QueryOptions) (*QueryMeta, error) { +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setQueryOptions(q) + r.setWriteOptions(q) rtt, resp, err := requireOK(c.c.doRequest(r)) if err != nil { return nil, err } defer resp.Body.Close() - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return qm, nil + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil } // Execute is used to execute a specific prepared query. You can execute using diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 000000000000..e902377dd5ca --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 000000000000..835ba3e755ce --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 000000000000..273db3c98aea --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 000000000000..a932eade0240 --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 000000000000..842ee80456db --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 000000000000..6b1f2891a5ac --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 7667e51ec4e3..8175d6b8ce5f 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -501,46 +501,46 @@ "revision": "a557574d6c024ed6e36acc8b610f5f211c91568a" }, { - "checksumSHA1": "+JUQvWp1JUVeRT5weWL9hi6Fu4Y=", + "checksumSHA1": "gx2CAg/v3k7kfBA/rT5NCkI0jDI=", "path": "github.com/hashicorp/consul-template/child", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "UerCY17HM5DSJ/rE760qxm99Al4=", + "checksumSHA1": "1EAiHEm1b/m3KWy08U7yrAvDlms=", "path": "github.com/hashicorp/consul-template/config", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "0nA6tnACi/MkE+Mb5L1gqbc3tpw=", + "checksumSHA1": "/SBscJ0TqJlOZjKqkbUYMa5c5rk=", "path": "github.com/hashicorp/consul-template/dependency", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "KcDxr/mNzYzTeFSCQyhpU1Nm/Ug=", + "checksumSHA1": "efWxWe8blQflfYZFU1WSIks2Tbw=", "path": "github.com/hashicorp/consul-template/manager", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "ByMIKPf7bXpyhhy80IjKLKYrjpo=", + "checksumSHA1": "oskgb0WteBKOItG8NNDduM7E/D0=", "path": "github.com/hashicorp/consul-template/signals", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "bkSJRnR2VyZA1KoyOF/eSkxVVFg=", + "checksumSHA1": "qZEh52+ryjQjwRWxMZ9eEPa6OBI=", "path": "github.com/hashicorp/consul-template/template", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { - "checksumSHA1": "HfWf4Vf1fBJh5HgHLdjpF5vs0Lk=", + "checksumSHA1": "cl9R28+I+YT6a0Z+KQFP//wuC+0=", "path": "github.com/hashicorp/consul-template/watch", - "revision": "17cd016cdfa6601e82256b8d624b1331a0c188a7", - "revisionTime": "2016-10-28T21:56:23Z" + "revision": "8bf2ce9e0cdcd60d799a75262b90468d24ee392e", + "revisionTime": "2017-01-20T21:50:49Z" }, { "checksumSHA1": "kWbL0V4o8vJL75mzeQzhF6p5jiQ=", @@ -549,11 +549,11 @@ "revisionTime": "2016-09-14T16:11:34Z" }, { - "checksumSHA1": "BMEJLBjl91k5k3vMMzzT7G2SO1U=", + "checksumSHA1": "FXiaccMs+1NvIHh8W44lQJeGqms=", "comment": "v0.6.3-363-gae32a3c", "path": "github.com/hashicorp/consul/api", - "revision": "a189091a3530051285c12c726ca28ea55e015336", - "revisionTime": "2016-09-14T16:11:34Z" + "revision": "a9afa0c27f484dd19fe59a80444e64e5352c4408", + "revisionTime": "2016-12-19T19:15:39Z" }, { "checksumSHA1": "NrK9uDGSZ2WKMNLYicxDYmpRS3I=", @@ -895,6 +895,12 @@ "path": "github.com/opencontainers/runc/libcontainer/utils", "revision": "89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8" }, + { + "checksumSHA1": "ynJSWoF6v+3zMnh9R0QmmG6iGV8=", + "path": "github.com/pkg/errors", + "revision": "248dadf4e9068a0b3e79f02ed0a610d935de5302", + "revisionTime": "2016-10-29T09:36:37Z" + }, { "checksumSHA1": "1SC2ACq72a+yfN6CYp5s5woKsR4=", "comment": "v2.0.1-8-g983d3a5",