diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..447d48e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# CONTRIBUTING + +Thank you for considering contributing to the Outrigger CLI! + +## Quality Contributions + +* Make sure your branch will compile. +* Make sure your branch passes our static analysis checks. +* Make sure your branch conforms with go fmt standards. +* Manually test your changes. + +## User Interactions + +One of the key goals of this project is to promote a positive developer +experience. Every interaction should be thought of with the following points: + +* Are you providing the user with enough context about what's being asked or being done? +* Does the user expect to wait? Might the user think the tool stalled? +* Is there black box business happening that could be made more transparent? + +We have a slightly complex logging API to support addressing these concerns. +(See ./util/logging.go) + +Here are a few conventions: + +* **Starting a task that could take more than 5 seconds:** + * `cmd.out.Spin("Preparing the sauce")` +* **Use the correct method to log operational results: (Pick one)** + * `cmd.out.Info("Sauce is Ready.")` + * `cmd.out.Warning("Sauce is burnt on the bottom.")` + * `cmd.out.Error("Discard this sauce and try again.")` +* **Going to send some contextual notes to the user**: + 1. `cmd.out.NoSpin()` if currently using the spinner. + 2. `cmd.out.Info("Sauce exists.")` + 4. `cmd.out.Verbose("The ingredients of the sauce include tomato, salt, black pepper, garlic...")` +* **Command has executed and is successful. Please no notification:** + ``` + cmd.out.Info("Enjoy your dinner.") + return cmd.Success("") + ``` +* **Command has executed and is successful. Get a notification too!** + ``` + return cmd.Success("Enjoy your dinner.") + ``` +* **Command failed:** + ``` + message := "Cooking sauce is hard, we failed" + cmd.out.Error("%s: %s", message, err.Error()) + return cmd.Failure(message) + ``` + +## Development Environment Setup + +### Developing with Docker + +You can use the Docker integration within this repository to facilitate development in lieu of setting up a +local golang environment. Using docker-compose, run the following commands: + +```bash +docker-compose run --rm install +docker-compose run --rm compile +``` + +This will produce a working OSX binary at `build/darwin/rig`. + +If you change a dependency in `Gopkg.toml` you can update an individual package dependency with: + +```bash +docker-compose run --rm update [package] +``` + +If you want to update all packages use: + +```bash +docker-compose run --rm update +``` + +If you want to run the static analysis checks: + +```bash +docker-compose run --rm lint +``` + +If you want to run go fmt against the codebase: +```bash +docker-compose run --rm base go fmt ./... +``` + +### Developing Locally + +Install go from homebrew using the flag to include common cross-compiler targets (namely Darwin, Linux, and Windows) + +```bash +brew install go --with-cc-common +brew install dep +brew tap goreleaser/tap +brew install goreleaser/tap/goreleaser +``` + +Setup `$GOPATH` and `$PATH` in your favorite shell (`~/.bashrc` or `~/.zshrc`) + +```bash +export GOPATH=$HOME/Projects +export PATH=$PATH:$GOPATH/bin +``` + +Checkout the code into your `$GOPATH` in `$GOPATH/src/github.com/phase2/rig` + +Get all the dependencies + +```bash +# Install the project dependencies into $GOPATH +cd $GOPATH/src/github.com/phase2/rig +dep ensure +``` + +#### Building Rig + +If you want to build `rig` locally for your target platform, simply run the following command: + +```bash +GOARCH=amd64 GOOS=darwin go build -o build/darwin/rig cmd/main.go +``` + +This command targets an OS/Architecture (Darwin/Mac and 64bit) and puts the resultant file in the `build/darwin/` +with the name `rig`. Change `GOARCH` and `GOOS` if you need to target a different platform diff --git a/Gopkg.lock b/Gopkg.lock index 372748e..77816f8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -55,6 +55,12 @@ packages = ["."] revision = "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" +[[projects]] + name = "github.com/slok/gospinner" + packages = ["."] + revision = "9ad9fd160041ce6bfb531a55930573fe4c24042d" + version = "v0.1.0" + [[projects]] name = "github.com/urfave/cli" packages = ["."] @@ -85,3 +91,4 @@ inputs-digest = "86055c933d04a9f46045c437c0e163af6622e85cf0ea6625c54c4222b52e62ab" solver-name = "gps-cdcl" solver-version = 1 + diff --git a/Gopkg.toml b/Gopkg.toml index 28e3f4d..b4bfd32 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -53,3 +53,7 @@ name = "github.com/martinlindhe/notify" branch = "master" +[[constraint]] + name = "github.com/slok/gospinner" + version = "0.1.0" + diff --git a/README.md b/README.md index 915194b..2a979da 100644 --- a/README.md +++ b/README.md @@ -1,80 +1,28 @@ # Rig - Outrigger CLI [![Build Status](https://travis-ci.org/phase2/rig.svg?branch=develop)](https://travis-ci.org/phase2/rig) -> A CLI for managing the Outrigger container-driven development stack. +> A CLI for managing the Outrigger's container-driven development stack. See the [documentation for more details](http://docs.outrigger.sh). +See the [CONTRIBUTING.md](./CONTRIBUTING.md) for developer documentation. -Use this readme when you want to develop the Outrigger CLI. +## Built on Dependencies -Setup ------- - -Install go from homebrew using the flag to include common cross-compiler targets (namely Darwin, Linux, and Windows) - -```bash -brew install go --with-cc-common -brew install dep -brew tap goreleaser/tap -brew install goreleaser/tap/goreleaser -``` - -Setup `$GOPATH` and `$PATH` in your favorite shell (`~/.bashrc` or `~/.zshrc`) - -```bash -export GOPATH=$HOME/Projects -export PATH=$PATH:$GOPATH/bin -``` - -Checkout the code into your `$GOPATH` in `$GOPATH/src/github.com/phase2/rig` - -Get all the dependencies - -```bash -# Install the project dependencies into $GOPATH -cd $GOPATH/src/github.com/phase2/rig -dep ensure -``` - -Developing Locally -------------------- - -If you want to build `rig` locally for your target platform, simply run the following command: - -```bash -GOARCH=amd64 GOOS=darwin go build -o build/darwin/rig cmd/main.go -``` - -This command targets an OS/Architecture (Darwin/Mac and 64bit) and puts the resultant file in the `build/darwin/` -with the name `rig`. Change `GOARCH` and `GOOS` if you need to target a different platform - -Developing with Docker ------------------------ - -You can use the Docker integration within this repository to facilitate development in lieu of setting up a -local golang environment. Using docker-compose, run the following commands: - -```bash -docker-compose run --rm install -docker-compose run --rm compile -``` - -This will produce a working OSX binary at `build/darwin/rig`. - -If you change a dependency in `Gopkg.toml` you can update an individual package dependency with: - -```bash -docker-compose run --rm update [package] -``` - -If you want to update all packages use: - -```bash -docker-compose run --rm update -``` +We make use of a few key libraries to do all the fancy stuff that the `rig` CLI will do. + * https://github.com/urfave/cli + * The entire CLI framework from helps text to flags. + This was an easy cli to build b/c of this library. + * https://github.com/fatih/color + * All the fancy terminal color output + * https://github.com/bitly/go-simplejson + * The JSON parse and access library used primarily with the output + of `docker-machine inspect` + * https://gopkg.in/yaml.v2 + * The YAML library for parsing/reading YAML files + * https://github.com/martinlindhe/notify + * Cross-platform desktop notifications -Release -------- +## Release Intructions We use [GoReleaser](https://goreleaser.com) to handle nearly all of our release concerns. GoReleaser will handle @@ -90,18 +38,3 @@ To create a new release of rig: * Run `docker-compose run --rm goreleaser` * ... * Profit! - - -Dependencies -------------- - -We make use of a few key libraries to do all the fancy stuff that the `rig` CLI will do. - - * https://github.com/urfave/cli - * The entire CLI framework from helps text to flags. This was an easy cli to build b/c of this library - * https://github.com/fatih/color - * All the fancy terminal color output - * https://github.com/bitly/go-simplejson - * The JSON parse and access library used primarily with the output of `docker-machine inspect` - * https://gopkg.in/yaml.v2 - * The YAML library for parsing/reading YAML files diff --git a/commands/command.go b/commands/command.go index 96c2b3f..d538f4a 100644 --- a/commands/command.go +++ b/commands/command.go @@ -39,16 +39,25 @@ func (cmd *BaseCommand) Before(c *cli.Context) error { // Success encapsulates the functionality for reporting command success func (cmd *BaseCommand) Success(message string) error { + // Handle success messaging. if message != "" { - cmd.out.Info.Println(message) + cmd.out.Info(message) util.NotifySuccess(cmd.context, message) } + + // If there is an active spinner wrap it up. + cmd.out.NoSpin() + return nil } -// Error encapsulates the functionality for reporting command failure -func (cmd *BaseCommand) Error(message string, errorName string, exitCode int) error { +// Failure encapsulates the functionality for reporting command failure +func (cmd *BaseCommand) Failure(message string, errorName string, exitCode int) error { + // Make sure any running spinner halts. + cmd.out.NoSpin() + // Handle error messaging. util.NotifyError(cmd.context, message) + return cli.NewExitError(fmt.Sprintf("ERROR: %s [%s] (%d)", message, errorName, exitCode), exitCode) } @@ -64,6 +73,6 @@ func (cmd *BaseCommand) NewContext(name string, flags []cli.Flag, parent *cli.Co // SetContextFlag set a flag on the provided context func (cmd *BaseCommand) SetContextFlag(ctx *cli.Context, name string, value string) { if err := ctx.Set(name, value); err != nil { - cmd.out.Error.Fatal(err) + cmd.out.Channel.Error.Fatal(err) } } diff --git a/commands/config.go b/commands/config.go index 3fc6f25..fcac72d 100644 --- a/commands/config.go +++ b/commands/config.go @@ -53,7 +53,7 @@ func (cmd *Config) Run(c *cli.Context) error { os.Stdout.Write(output) } } else { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } return cmd.Success("") diff --git a/commands/dashboard.go b/commands/dashboard.go index 680e007..3c3b9d5 100644 --- a/commands/dashboard.go +++ b/commands/dashboard.go @@ -32,11 +32,16 @@ func (cmd *Dashboard) Commands() []cli.Command { // Run executes the `rig dashboard` command func (cmd *Dashboard) Run(ctx *cli.Context) error { if cmd.machine.IsRunning() || util.SupportsNativeDocker() { - cmd.out.Info.Println("Launching Dashboard") - return cmd.LaunchDashboard(cmd.machine) + cmd.out.Info("Launching Dashboard") + err := cmd.LaunchDashboard(cmd.machine) + if err != nil { + // Success may be presumed to only execute once per command execution. + // This allows calling LaunchDashboard() from start.go without success. + return cmd.Success("") + } } - return cmd.Error(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) + return cmd.Failure(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) } // LaunchDashboard launches the dashboard, stopping it first for a clean automatic update @@ -49,12 +54,16 @@ func (cmd *Dashboard) LaunchDashboard(machine Machine) error { // except to indicate the age of the image before update in the next section. _, seconds, err := util.ImageOlderThan(dashboardImageName, 86400*30) if err == nil { - cmd.out.Verbose.Printf("Local copy of the dashboardImageName '%s' was originally published %0.2f days ago.", dashboardImageName, seconds/86400) + cmd.out.Verbose("Local copy of the dashboardImageName '%s' was originally published %0.2f days ago.", dashboardImageName, seconds/86400) } - cmd.out.Verbose.Printf("Attempting to update %s", dashboardImageName) + // Updating the dashboard is rarely of interest to users so uses verbose logging. + // Per our user interaction practices, we would normally use a spinner here. + cmd.out.Verbose("Attempting to update %s", dashboardImageName) if err := util.StreamCommand("docker", "pull", dashboardImageName); err != nil { - cmd.out.Verbose.Println("Failed to update dashboard image. Will use local cache if available.") + cmd.out.Verbose("Failed to update dashboard image. Will use local cache if available.") + } else { + cmd.out.Verbose("Successfully updated dashboard.") } dockerAPIVersion, _ := util.GetDockerServerAPIVersion() @@ -71,16 +80,15 @@ func (cmd *Dashboard) LaunchDashboard(machine Machine) error { } util.ForceStreamCommand("docker", args...) - if util.IsMac() { util.Command("open", "http://dashboard.outrigger.vm").Run() } else if util.IsWindows() { util.Command("start", "http://dashboard.outrigger.vm").Run() } else { - cmd.out.Info.Println("Outrigger Dashboard is now available at http://dashboard.outrigger.vm") + cmd.out.Info("Outrigger Dashboard is now available at http://dashboard.outrigger.vm") } - return cmd.Success("") + return nil } // StopDashboard stops and removes the dashboard container diff --git a/commands/data_backup.go b/commands/data_backup.go index 376feec..d26b501 100644 --- a/commands/data_backup.go +++ b/commands/data_backup.go @@ -44,31 +44,32 @@ func (cmd *DataBackup) Run(c *cli.Context) error { } if !cmd.machine.Exists() { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } dataDir := c.String("data-dir") backupDir := c.String("backup-dir") backupFile := fmt.Sprintf("%s%c%s.tgz", backupDir, os.PathSeparator, cmd.machine.Name) if _, err := os.Stat(backupDir); err != nil { - cmd.out.Info.Printf("Creating backup directory: %s...", backupDir) + cmd.out.Info("Creating backup directory: %s...", backupDir) if mkdirErr := util.Command("mkdir", "-p", backupDir).Run(); mkdirErr != nil { - cmd.out.Error.Println(mkdirErr) - return cmd.Error(fmt.Sprintf("Could not create backup directory %s", backupDir), "BACKUP-DIR-CREATE-FAILED", 12) + cmd.out.Error(mkdirErr.Error()) + return cmd.Failure(fmt.Sprintf("Could not create backup directory %s", backupDir), "BACKUP-DIR-CREATE-FAILED", 12) } } else if _, err := os.Stat(backupFile); err == nil { // If the backup dir already exists, make sure the backup file does not exist. - return cmd.Error(fmt.Sprintf("Backup archive %s already exists.", backupFile), "BACKUP-ARCHIVE-EXISTS", 12) + return cmd.Failure(fmt.Sprintf("Backup archive %s already exists.", backupFile), "BACKUP-ARCHIVE-EXISTS", 12) } - cmd.out.Info.Printf("Backing up %s on '%s' to %s...", dataDir, cmd.machine.Name, backupFile) - // Stream the archive to stdout and capture it in a local file so we don't waste // space storing an archive on the VM filesystem. There may not be enough space. + cmd.out.Spin(fmt.Sprintf("Backing up %s on '%s' to %s...", dataDir, cmd.machine.Name, backupFile)) archiveCmd := fmt.Sprintf("sudo tar czf - -C %s .", dataDir) if err := util.StreamCommand("docker-machine", "ssh", cmd.machine.Name, archiveCmd, ">", backupFile); err != nil { - return cmd.Error(err.Error(), "COMMAND-ERROR", 13) + cmd.out.Error("Backup failed: %s", err.Error()) + return cmd.Failure("Backup failed", "COMMAND-ERROR", 13) } - return cmd.Success("Data Backup completed with no errors") + cmd.out.Info("Backup complete: %s", backupFile) + return cmd.Success("Data Backup completed") } diff --git a/commands/data_restore.go b/commands/data_restore.go index 16705e4..5c9af60 100644 --- a/commands/data_restore.go +++ b/commands/data_restore.go @@ -44,7 +44,7 @@ func (cmd *DataRestore) Run(c *cli.Context) error { } if !cmd.machine.Exists() { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } dataDir := c.String("data-dir") @@ -54,17 +54,17 @@ func (cmd *DataRestore) Run(c *cli.Context) error { } if _, err := os.Stat(backupFile); err != nil { - return cmd.Error(fmt.Sprintf("Backup archive %s doesn't exists.", backupFile), "BACKUP-ARCHIVE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("Backup archive %s doesn't exists.", backupFile), "BACKUP-ARCHIVE-NOT-FOUND", 12) } - cmd.out.Info.Printf("Restoring %s to %s on '%s'...", backupFile, dataDir, cmd.machine.Name) - + cmd.out.Spin(fmt.Sprintf("Restoring %s to %s on '%s'...", backupFile, dataDir, cmd.machine.Name)) // Send the archive via stdin and extract inline. Saves on disk & performance extractCmd := fmt.Sprintf("cat %s | docker-machine ssh %s \"sudo tar xzf - -C %s\"", backupFile, cmd.machine.Name, dataDir) - cmd.out.Info.Printf(extractCmd) if err := util.StreamCommand("bash", "-c", extractCmd); err != nil { - return cmd.Error(err.Error(), "COMMAND-ERROR", 13) + cmd.out.Error("Data restore failed: %s", err.Error()) + return cmd.Failure("Data restore failed", "COMMAND-ERROR", 13) } + cmd.out.Info("Data restore complete") return cmd.Success("Data Restore was successful") } diff --git a/commands/dns-records.go b/commands/dns-records.go index 9796466..0380617 100644 --- a/commands/dns-records.go +++ b/commands/dns-records.go @@ -34,7 +34,7 @@ func (cmd *DNSRecords) Run(c *cli.Context) error { records, err := cmd.LoadRecords() if err != nil { - return cmd.Error(err.Error(), "COMMAND-ERROR", 13) + return cmd.Failure(err.Error(), "COMMAND-ERROR", 13) } for _, record := range records { diff --git a/commands/dns.go b/commands/dns.go index 493d6f9..474149f 100644 --- a/commands/dns.go +++ b/commands/dns.go @@ -37,14 +37,15 @@ func (cmd *DNS) Commands() []cli.Command { // Run executes the `rig dns` command func (cmd *DNS) Run(c *cli.Context) error { - cmd.out.Info.Println("Configuring DNS") + cmd.out.Info("Configuring DNS") if !util.SupportsNativeDocker() && !cmd.machine.IsRunning() { - return cmd.Error(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) + return cmd.Failure(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) } if err := cmd.StartDNS(cmd.machine, c.String("nameservers")); err != nil { - return cmd.Error(err.Error(), "DNS-SETUP-FAILED", 13) + cmd.out.Error("DNS is ready") + return cmd.Failure(err.Error(), "DNS-SETUP-FAILED", 13) } if !util.SupportsNativeDocker() { @@ -56,13 +57,15 @@ func (cmd *DNS) Run(c *cli.Context) error { // ConfigureRoutes will configure routing to allow access to containers on IP addresses // within the Docker Machine bridge network func (cmd *DNS) ConfigureRoutes(machine Machine) { - cmd.out.Info.Println("Setting up local networking (may require your admin password)") + cmd.out.Spin("Setting up local networking (may require your admin password)") if util.IsMac() { cmd.configureMacRoutes(machine) } else if util.IsWindows() { cmd.configureWindowsRoutes(machine) } + + cmd.out.Info("Local networking is ready") } // ConfigureMac configures DNS resolution and network routing @@ -76,12 +79,12 @@ func (cmd *DNS) configureMacRoutes(machine Machine) { util.StreamCommand("sudo", "route", "-n", "add", "172.17.0.0/16", machineIP) if _, err := os.Stat("/usr/sbin/discoveryutil"); err == nil { // Put this here for people running OS X 10.10.0 to 10.10.3 (oy vey.) - cmd.out.Verbose.Println("Restarting discoveryutil to flush DNS caches") + cmd.out.Verbose("Restarting discoveryutil to flush DNS caches") util.StreamCommand("sudo", "launchctl", "unload", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") util.StreamCommand("sudo", "launchctl", "load", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") } else { // Reset DNS cache. We have seen this suddenly make /etc/resolver/vm work. - cmd.out.Verbose.Println("Restarting mDNSResponder to flush DNS caches") + cmd.out.Verbose("Restarting mDNSResponder to flush DNS caches") util.StreamCommand("sudo", "killall", "-HUP", "mDNSResponder") } } @@ -91,7 +94,7 @@ func (cmd *DNS) removeHostFilter(ipAddr string) { // #1: route -n get to find the interface name routeData, err := util.Command("route", "-n", "get", ipAddr).CombinedOutput() if err != nil { - cmd.out.Warning.Println("Unable to determine bridge interface to remove hostfilter") + cmd.out.Warning("Unable to determine bridge interface to remove hostfilter") return } ifaceRegexp := regexp.MustCompile(`interface:\s(\w+)`) @@ -100,7 +103,7 @@ func (cmd *DNS) removeHostFilter(ipAddr string) { // #2: ifconfig to get the details ifaceData, err := util.Command("ifconfig", iface).CombinedOutput() if err != nil { - cmd.out.Warning.Println("Unable to determine member to remove hostfilter") + cmd.out.Warning("Unable to determine member to remove hostfilter") return } memberRegexp := regexp.MustCompile(`member:\s(\w+)\s`) @@ -118,6 +121,7 @@ func (cmd *DNS) configureWindowsRoutes(machine Machine) { // StartDNS will start the dnsdock service func (cmd *DNS) StartDNS(machine Machine, nameservers string) error { + cmd.out.Spin("Setting up DNS resolver...") dnsServers := strings.Split(nameservers, ",") bridgeIP, err := util.GetBridgeIP() @@ -148,8 +152,8 @@ func (cmd *DNS) StartDNS(machine Machine, nameservers string) error { for _, server := range dnsServers { args = append(args, "--nameserver="+server) } - util.ForceStreamCommand("docker", args...) + util.ForceStreamCommand("docker", args...) // Configure the resolvers based on platform var resolverReturn error if util.IsMac() { @@ -159,12 +163,14 @@ func (cmd *DNS) StartDNS(machine Machine, nameservers string) error { } else if util.IsWindows() { resolverReturn = cmd.configureWindowsResolver(machine) } + cmd.out.Info("DNS resolution is ready") + return resolverReturn } // configureMacResolver configures DNS resolution and network routing func (cmd *DNS) configureMacResolver(machine Machine) error { - cmd.out.Verbose.Print("Configuring DNS resolution for macOS") + cmd.out.Verbose("Configuring DNS resolution for macOS") bridgeIP := machine.GetBridgeIP() if err := util.Command("sudo", "mkdir", "-p", "/etc/resolver").Run(); err != nil { @@ -175,12 +181,12 @@ func (cmd *DNS) configureMacResolver(machine Machine) error { } if _, err := os.Stat("/usr/sbin/discoveryutil"); err == nil { // Put this here for people running OS X 10.10.0 to 10.10.3 (oy vey.) - cmd.out.Verbose.Println("Restarting discoveryutil to flush DNS caches") + cmd.out.Verbose("Restarting discoveryutil to flush DNS caches") util.StreamCommand("sudo", "launchctl", "unload", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") util.StreamCommand("sudo", "launchctl", "load", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") } else { // Reset DNS cache. We have seen this suddenly make /etc/resolver/vm work. - cmd.out.Verbose.Println("Restarting mDNSResponder to flush DNS caches") + cmd.out.Verbose("Restarting mDNSResponder to flush DNS caches") util.StreamCommand("sudo", "killall", "-HUP", "mDNSResponder") } return nil @@ -188,7 +194,7 @@ func (cmd *DNS) configureMacResolver(machine Machine) error { // configureLinuxResolver configures DNS resolution func (cmd *DNS) configureLinuxResolver() error { - cmd.out.Verbose.Print("Configuring DNS resolution for linux") + cmd.out.Verbose("Configuring DNS resolution for linux") bridgeIP, err := util.GetBridgeIP() if err != nil { return err @@ -217,7 +223,7 @@ func (cmd *DNS) configureLinuxResolver() error { // configureWindowsResolver configures DNS resolution and network routing func (cmd *DNS) configureWindowsResolver(machine Machine) error { // TODO: Figure out Windows resolver configuration - cmd.out.Verbose.Print("TODO: Configuring DNS resolution for windows") + cmd.out.Verbose("TODO: Configuring DNS resolution for windows") return nil } diff --git a/commands/doctor.go b/commands/doctor.go index 713827b..7d3396f 100644 --- a/commands/doctor.go +++ b/commands/doctor.go @@ -3,6 +3,7 @@ package commands import ( "fmt" "os" + "os/exec" "strconv" "strings" @@ -32,63 +33,72 @@ func (cmd *Doctor) Commands() []cli.Command { // nolint: gocyclo func (cmd *Doctor) Run(c *cli.Context) error { // 0. Ensure all of rig's dependencies are available in the PATH. - if err := util.Command("docker", "-h").Start(); err == nil { - cmd.out.Info.Println("Docker is installed.") + cmd.out.Spin("Checking Docker installation...") + if err := exec.Command("docker", "-h").Start(); err == nil { + cmd.out.Info("Docker is installed.") } else { - cmd.out.Error.Fatal("Docker (docker) is not installed.") + cmd.out.Error("Docker (docker) is not installed.") } if !util.SupportsNativeDocker() { - if err := util.Command("docker-machine", "-h").Start(); err == nil { - cmd.out.Info.Println("Docker Machine is installed.") + cmd.out.Spin("Checking Docker Machine installation...") + if err := exec.Command("docker-machine", "-h").Start(); err == nil { + cmd.out.Info("Docker Machine is installed.") } else { - cmd.out.Error.Fatal("Docker Machine (docker-machine) is not installed.") + cmd.out.Error("Docker Machine (docker-machine) is not installed.") } } - if err := util.Command("docker-compose", "-h").Start(); err == nil { - cmd.out.Info.Println("Docker Compose is installed.") + cmd.out.Spin("Checking Docker Compose installation...") + if err := exec.Command("docker-compose", "-h").Start(); err == nil { + cmd.out.Info("Docker Compose is installed.") } else { - cmd.out.Warning.Printf("Docker Compose (docker-compose) is not installed.") + cmd.out.Error("Docker Compose (docker-compose) is not installed.") } // 1. Ensure the configured docker-machine matches the set environment. if !util.SupportsNativeDocker() { + cmd.out.Spin("Checking Docker Machine configuration...") if cmd.machine.Exists() { if _, isset := os.LookupEnv("DOCKER_MACHINE_NAME"); !isset { - cmd.out.Error.Fatalf("Docker configuration is not set. Please run 'eval \"$(rig config)\"'.") + cmd.out.Error("Docker configuration is not set. Please run 'eval \"$(rig config)\"'.") + return cmd.Failure("Could not complete.", "DOCTOR-FATAL", 1) } else if cmd.machine.Name != os.Getenv("DOCKER_MACHINE_NAME") { - cmd.out.Error.Fatalf("Your environment configuration specifies a different machine. Please re-run as 'rig --name=\"%s\" doctor'.", cmd.machine.Name) + cmd.out.Error("Your environment configuration specifies a different machine. Please re-run as 'rig --name=\"%s\" doctor'.", cmd.machine.Name) + return cmd.Failure("Could not complete.", "DOCTOR-FATAL", 1) } else { - cmd.out.Info.Printf("Docker Machine (%s) name matches your environment configuration.", cmd.machine.Name) + cmd.out.Info("Docker Machine (%s) name matches your environment configuration.", cmd.machine.Name) } - if output, err := util.Command("docker-machine", "url", cmd.machine.Name).Output(); err == nil { + if output, err := exec.Command("docker-machine", "url", cmd.machine.Name).Output(); err == nil { hostURL := strings.TrimSpace(string(output)) if hostURL != os.Getenv("DOCKER_HOST") { - cmd.out.Error.Fatalf("Docker Host configuration should be '%s' but got '%s'. Please re-run 'eval \"$(rig config)\"'.", os.Getenv("DOCKER_HOST"), hostURL) - } else { - cmd.out.Info.Printf("Docker Machine (%s) URL (%s) matches your environment configuration.", cmd.machine.Name, hostURL) + cmd.out.Error("Docker Host configuration should be '%s' but got '%s'. Please re-run 'eval \"$(rig config)\"'.", os.Getenv("DOCKER_HOST"), hostURL) + return cmd.Failure("Could not complete.", "DOCTOR-FATAL", 1) } + cmd.out.Info("Docker Machine (%s) URL (%s) matches your environment configuration.", cmd.machine.Name, hostURL) } } else { - cmd.out.Error.Fatalf("No machine named '%s' exists. Did you run 'rig start --name=\"%s\"'?", cmd.machine.Name, cmd.machine.Name) + cmd.out.Error("No machine named '%s' exists. Did you run 'rig start --name=\"%s\"'?", cmd.machine.Name, cmd.machine.Name) + return cmd.Failure("Could not complete.", "DOCTOR-FATAL", 1) } } // 1.5 Ensure docker / machine is running if !util.SupportsNativeDocker() { + cmd.out.Spin("Checking Docker Machine is operational...") if !cmd.machine.IsRunning() { - cmd.out.Error.Fatalf("Machine '%s' is not running. You may need to run 'rig start'", cmd.machine.Name) - } else { - cmd.out.Info.Printf("Docker Machine (%s) is running", cmd.machine.Name) + cmd.out.Error("Docker Machine '%s' is not running. You may need to run 'rig start'.", cmd.machine.Name) + return cmd.Failure(fmt.Sprintf("Machine '%s' is not running. ", cmd.machine.Name), "DOCTOR-FATAL", 1) } + cmd.out.Info("Docker Machine (%s) is running", cmd.machine.Name) } else { if err := util.Command("docker", "version").Run(); err != nil { - cmd.out.Error.Fatalf("Docker is not running. You may need to run 'systemctl start docker'") - } else { - cmd.out.Info.Println("Docker is running") + cmd.out.Error("Docker is not running. You may need to run 'systemctl start docker'") + return cmd.Failure("Docker is not running.", "DOCTOR-FATAL", 1) } + cmd.out.Info("Docker is running") } // 2. Check Docker API Version compatibility + cmd.out.Spin("Checking Docker version...") clientAPIVersion := util.GetDockerClientAPIVersion() serverAPIVersion, err := util.GetDockerServerAPIVersion() serverMinAPIVersion, _ := util.GetDockerServerMinAPIVersion() @@ -105,84 +115,87 @@ func (cmd *Doctor) Run(c *cli.Context) error { apiConstraint, _ := version.NewConstraint(constraintString) if err != nil { - cmd.out.Error.Println("Could not determine Docker versions: ", err) + cmd.out.Error("Could not determine Docker Machine Docker versions: ", err) } else if clientAPIVersion.Equal(serverAPIVersion) { - cmd.out.Info.Printf("Docker Client (%s) and Server (%s) have equal API Versions", clientAPIVersion, serverAPIVersion) + cmd.out.Info("Docker Client (%s) and Server (%s) have equal API Versions", clientAPIVersion, serverAPIVersion) } else if apiConstraint.Check(clientAPIVersion) { - cmd.out.Info.Printf("Docker Client (%s) has Server compatible API version (%s). Server current (%s), Server min compat (%s)", clientAPIVersion, constraintString, serverAPIVersion, serverMinAPIVersion) + cmd.out.Info("Docker Client (%s) has Server compatible API version (%s). Server current (%s), Server min compat (%s)", clientAPIVersion, constraintString, serverAPIVersion, serverMinAPIVersion) } else { - cmd.out.Error.Printf("Docker Client (%s) is incompatible with Server. Server current (%s), Server min compat (%s). Use `rig upgrade` to fix this.", clientAPIVersion, serverAPIVersion, serverMinAPIVersion) + cmd.out.Error("Docker Client (%s) is incompatible with Server. Server current (%s), Server min compat (%s). Use `rig upgrade` to fix this.", clientAPIVersion, serverAPIVersion, serverMinAPIVersion) } // 3. Pull down the data from DNSDock. This will confirm we can resolve names as well // as route to the appropriate IP addresses via the added route commands + cmd.out.Spin("Checking DNS configuration...") dnsRecords := DNSRecords{cmd.BaseCommand} if records, err := dnsRecords.LoadRecords(); err == nil { resolved := false for _, record := range records { if record["Name"] == "dnsdock" { resolved = true - cmd.out.Info.Printf("DNS and routing services are working. DNSDock resolves to %s", record["IPs"]) + cmd.out.Info("DNS and routing services are working. DNSDock resolves to %s", record["IPs"]) break } } if !resolved { - cmd.out.Error.Println("Unable to verify DNS services are working.") + cmd.out.Error("Unable to verify DNS services are working.") } } else { - cmd.out.Error.Println("Unable to verify DNS services and routing are working.") - cmd.out.Error.Println(err) + cmd.out.Error("Unable to verify DNS services and routing are working: %s", err.Error()) } // 4. Ensure that docker-machine-nfs script is available for our NFS mounts (Mac ONLY) if util.IsMac() { - if err := util.Command("which", "docker-machine-nfs").Run(); err != nil { - cmd.out.Error.Println("Docker Machine NFS is not installed.") + cmd.out.Spin("Checking NFS configuration...") + if err := exec.Command("which", "docker-machine-nfs").Run(); err != nil { + cmd.out.Error("Docker Machine NFS is not installed.") } else { - cmd.out.Info.Println("Docker Machine NFS is installed.") + cmd.out.Info("Docker Machine NFS is installed.") } } // 5. Check for storage on VM volume if !util.SupportsNativeDocker() { - output, err := util.Command("docker-machine", "ssh", cmd.machine.Name, "df -h 2> /dev/null | grep /dev/sda1 | head -1 | awk '{print $5}' | sed 's/%//'").Output() + cmd.out.Spin("Checking Data (/data) volume capacity...") + output, err := exec.Command("docker-machine", "ssh", cmd.machine.Name, "df -h 2> /dev/null | grep /dev/sda1 | head -1 | awk '{print $5}' | sed 's/%//'").Output() if err == nil { dataUsage := strings.TrimSpace(string(output)) if i, e := strconv.Atoi(dataUsage); e == nil { if i >= 85 && i < 95 { - cmd.out.Warning.Printf("Data volume (/data) is %d%% used. Please free up space soon.", i) + cmd.out.Warning("Data volume (/data) is %d%% used. Please free up space soon.", i) } else if i >= 95 { - cmd.out.Error.Printf("Data volume (/data) is %d%% used. Please free up space. Try 'docker system prune' or removing old projects / databases from /data.", i) + cmd.out.Error("Data volume (/data) is %d%% used. Please free up space. Try 'docker system prune' or removing old projects / databases from /data.", i) } else { - cmd.out.Info.Printf("Data volume (/data) is %d%% used.", i) + cmd.out.Info("Data volume (/data) is %d%% used.", i) } } else { - cmd.out.Warning.Printf("Unable to determine usage level of /data volume. Failed to parse '%s'", dataUsage) + cmd.out.Warning("Unable to determine usage level of /data volume. Failed to parse '%s'", dataUsage) } } else { - cmd.out.Warning.Printf("Unable to determine usage level of /data volume. Failed to execute 'df': %v", err) + cmd.out.Warning("Unable to determine usage level of /data volume. Failed to execute 'df': %v", err) } } // 6. Check for storage on /Users if !util.SupportsNativeDocker() { - output, err := util.Command("docker-machine", "ssh", cmd.machine.Name, "df -h 2> /dev/null | grep /Users | head -1 | awk '{print $5}' | sed 's/%//'").Output() + cmd.out.Spin("Checking Root (/Users) drive capacity...") + output, err := exec.Command("docker-machine", "ssh", cmd.machine.Name, "df -h 2> /dev/null | grep /Users | head -1 | awk '{print $5}' | sed 's/%//'").Output() if err == nil { userUsage := strings.TrimSpace(string(output)) if i, e := strconv.Atoi(userUsage); e == nil { if i >= 85 && i < 95 { - cmd.out.Warning.Printf("Root drive (/Users) is %d%% used. Please free up space soon.", i) + cmd.out.Warning("Root drive (/Users) is %d%% used. Please free up space soon.", i) } else if i >= 95 { - cmd.out.Error.Printf("Root drive (/Users) is %d%% used. Please free up space.", i) + cmd.out.Error("Root drive (/Users) is %d%% used. Please free up space.", i) } else { - cmd.out.Info.Printf("Root drive (/Users) is %d%% used.", i) + cmd.out.Info("Root drive (/Users) is %d%% used.", i) } } else { - cmd.out.Warning.Printf("Unable to determine usage level of root drive (/Users). Failed to parse '%s'", userUsage) + cmd.out.Warning("Unable to determine usage level of root drive (/Users). Failed to parse '%s'", userUsage) } } else { - cmd.out.Warning.Printf("Unable to determine usage level of root drive (/Users). Failed to execute 'df': %v", err) + cmd.out.Warning("Unable to determine usage level of root drive (/Users). Failed to execute 'df': %v", err) } } diff --git a/commands/kill.go b/commands/kill.go index 95f50f2..f2a3ea8 100644 --- a/commands/kill.go +++ b/commands/kill.go @@ -31,7 +31,7 @@ func (cmd *Kill) Run(c *cli.Context) error { } if !cmd.machine.Exists() { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } // First stop it (and cleanup) @@ -40,7 +40,7 @@ func (cmd *Kill) Run(c *cli.Context) error { return err } - cmd.out.Info.Printf("Killing machine '%s'", cmd.machine.Name) + cmd.out.Channel.Info.Printf("Killing machine '%s'", cmd.machine.Name) util.StreamCommand("docker-machine", "kill", cmd.machine.Name) // Ensure the underlying virtualization has stopped @@ -49,11 +49,11 @@ func (cmd *Kill) Run(c *cli.Context) error { case util.VirtualBox: util.StreamCommand("controlvm", cmd.machine.Name, "poweroff") case util.VMWare: - cmd.out.Warning.Println("Add vmrun suspend command.") + cmd.out.Warning("Add vmrun suspend command.") case util.Xhyve: - cmd.out.Warning.Println("Add equivalent xhyve kill command.") + cmd.out.Warning("Add equivalent xhyve kill command.") default: - cmd.out.Warning.Printf("Driver not recognized: %s\n", driver) + cmd.out.Channel.Warning.Printf("Driver not recognized: %s\n", driver) } return cmd.Success(fmt.Sprintf("Machine '%s' killed", cmd.machine.Name)) diff --git a/commands/machine.go b/commands/machine.go index 9d131aa..22f55fc 100644 --- a/commands/machine.go +++ b/commands/machine.go @@ -22,7 +22,7 @@ type Machine struct { // Create will generate a new Docker Machine configured according to user specification func (m *Machine) Create(driver string, cpuCount string, memSize string, diskSize string) error { - m.out.Info.Printf("Creating a %s machine named '%s' with CPU(%s) MEM(%s) DISK(%s)...", driver, m.Name, cpuCount, memSize, diskSize) + m.out.Channel.Info.Printf("Creating a %s machine named '%s' with CPU(%s) MEM(%s) DISK(%s)...", driver, m.Name, cpuCount, memSize, diskSize) boot2dockerURL := "https://github.com/boot2docker/boot2docker/releases/download/v" + util.GetRawCurrentDockerVersion() + "/boot2docker.iso" @@ -72,7 +72,7 @@ func (m *Machine) Create(driver string, cpuCount string, memSize string, diskSiz return fmt.Errorf("error creating machine '%s': %s", m.Name, err) } - m.out.Info.Printf("Created docker-machine named '%s'...", m.Name) + m.out.Channel.Info.Printf("Created docker-machine named '%s'...", m.Name) return nil } @@ -94,7 +94,7 @@ func (m Machine) CheckXhyveRequirements() error { // Start boots the Docker Machine func (m Machine) Start() error { if !m.IsRunning() { - m.out.Verbose.Printf("The machine '%s' is not running, starting...", m.Name) + m.out.Channel.Verbose.Printf("The machine '%s' is not running, starting...", m.Name) if err := util.StreamCommand("docker-machine", "start", m.Name); err != nil { return fmt.Errorf("error starting machine '%s': %s", m.Name, err) @@ -127,10 +127,10 @@ func (m Machine) WaitForDev() error { for i := 1; i <= maxTries; i++ { m.SetEnv() if err := util.Command("docker", "ps").Run(); err == nil { - m.out.Verbose.Printf("Machine '%s' has started", m.Name) + m.out.Channel.Verbose.Printf("Machine '%s' has started", m.Name) return nil } - m.out.Warning.Printf("Docker daemon not running! Trying again in %d seconds. Try %d of %d. \n", sleepSecs, i, maxTries) + m.out.Channel.Warning.Printf("Docker daemon not running! Trying again in %d seconds. Try %d of %d. \n", sleepSecs, i, maxTries) time.Sleep(time.Duration(sleepSecs) * time.Second) } @@ -183,7 +183,7 @@ func (m *Machine) GetData() *simplejson.Json { if inspect, inspectErr := util.Command("docker-machine", "inspect", m.Name).Output(); inspectErr == nil { if js, jsonErr := simplejson.NewJson(inspect); jsonErr != nil { - m.out.Error.Fatalf("Failed to parse '%s' JSON: %s", m.Name, jsonErr) + m.out.Channel.Error.Fatalf("Failed to parse '%s' JSON: %s", m.Name, jsonErr) } else { m.inspectData = js return m.inspectData @@ -273,7 +273,7 @@ func (m Machine) GetSysctl(setting string) (string, error) { // SetSysctl sets the sysctl setting on the Docker Machine func (m Machine) SetSysctl(key string, val string) error { cmd := fmt.Sprintf("sudo sysctl -w %s=%s", key, val) - m.out.Verbose.Printf("Modifying Docker Machine kernel settings: %s", cmd) + m.out.Verbose("Modifying Docker Machine kernel settings: %s", cmd) _, err := util.Command("docker-machine", "ssh", m.Name, cmd).CombinedOutput() return err } diff --git a/commands/project.go b/commands/project.go index ab97048..fe32a83 100644 --- a/commands/project.go +++ b/commands/project.go @@ -78,14 +78,14 @@ func (cmd *Project) GetScriptsAsSubcommands(otherSubcommands []cli.Command) []cl // Run executes the specified `rig project` script func (cmd *Project) Run(c *cli.Context) error { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Verbose("Loaded project configuration from %s", cmd.Config.Path) if cmd.Config.Scripts == nil { - cmd.out.Error.Fatal("There are no scripts discovered in: %s", cmd.Config.File) + cmd.out.Channel.Error.Fatal("There are no scripts discovered in: %s", cmd.Config.File) } key := strings.TrimPrefix(c.Command.Name, "run:") if script, ok := cmd.Config.Scripts[key]; ok { - cmd.out.Verbose.Printf("Initializing project script '%s': %s", key, script.Description) + cmd.out.Verbose("Initializing project script '%s': %s", key, script.Description) cmd.addCommandPath() dir := filepath.Dir(cmd.Config.Path) @@ -94,14 +94,14 @@ func (cmd *Project) Run(c *cli.Context) error { shellCmd := cmd.GetCommand(scriptCommands) shellCmd.Dir = dir - cmd.out.Verbose.Printf("Script execution - Working Directory: %s", dir) + cmd.out.Verbose("Script execution - Working Directory: %s", dir) - cmd.out.Verbose.Printf("Executing '%s' as '%s'", key, scriptCommands) + cmd.out.Verbose("Executing '%s' as '%s'", key, scriptCommands) if exitCode := util.PassthruCommand(shellCmd); exitCode != 0 { - return cmd.Error(fmt.Sprintf("Error running project script '%s'", key), "COMMAND-ERROR", exitCode) + return cmd.Failure(fmt.Sprintf("Failure running project script '%s'", key), "COMMAND-ERROR", exitCode) } } else { - return cmd.Error(fmt.Sprintf("Unrecognized script '%s'", key), "SCRIPT-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("Unrecognized script '%s'", key), "SCRIPT-NOT-FOUND", 12) } return cmd.Success("") @@ -131,7 +131,7 @@ func (cmd *Project) GetCommandSeparator() string { func (cmd *Project) addCommandPath() { binDir := cmd.Config.Bin if binDir != "" { - cmd.out.Verbose.Printf("Script execution - Adding to $PATH: %s", binDir) + cmd.out.Verbose("Script execution - Adding to $PATH: %s", binDir) path := os.Getenv("PATH") os.Setenv("PATH", fmt.Sprintf("%s%c%s", binDir, os.PathListSeparator, path)) } diff --git a/commands/project_config.go b/commands/project_config.go index fb1d9ab..813875d 100644 --- a/commands/project_config.go +++ b/commands/project_config.go @@ -89,16 +89,16 @@ func NewProjectConfigFromFile(filename string) (*ProjectConfig, error) { yamlFile, err := ioutil.ReadFile(config.File) if err != nil { - logger.Verbose.Printf("No project configuration file could be read at: %s", config.File) + logger.Verbose("No project configuration file could be read at: %s", config.File) return config, err } if err := yaml.Unmarshal(yamlFile, config); err != nil { - logger.Error.Fatalf("Error parsing YAML config: %v", err) + logger.Channel.Error.Fatalf("Failure parsing YAML config: %v", err) } if err := config.ValidateConfigVersion(); err != nil { - logger.Error.Fatalf("Error in %s: %s", filename, err) + logger.Channel.Error.Fatalf("Failure in %s: %s", filename, err) } if len(config.Bin) == 0 { @@ -147,21 +147,21 @@ func (c *ProjectConfig) ValidateProjectScripts(subcommands []cli.Command) { // Check for an empty script if script == nil { - logger.Error.Fatalf("Project script '%s' has no configuration", id) + logger.Channel.Error.Fatalf("Project script '%s' has no configuration", id) } // Check for scripts with conflicting aliases with existing subcommands or subcommand aliases for _, subcommand := range subcommands { if id == subcommand.Name { - logger.Error.Fatalf("Project script name '%s' conflicts with command name '%s'. Please choose a different script name", id, subcommand.Name) + logger.Channel.Error.Fatalf("Project script name '%s' conflicts with command name '%s'. Please choose a different script name", id, subcommand.Name) } else if script.Alias == subcommand.Name { - logger.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command name '%s'. Please choose a different script alias", script.Alias, id, subcommand.Name) + logger.Channel.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command name '%s'. Please choose a different script alias", script.Alias, id, subcommand.Name) } else if subcommand.Aliases != nil { for _, alias := range subcommand.Aliases { if id == alias { - logger.Error.Fatalf("Project script name '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script name", id, alias, subcommand.Name) + logger.Channel.Error.Fatalf("Project script name '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script name", id, alias, subcommand.Name) } else if script.Alias == alias { - logger.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script alias", script.Alias, id, alias, subcommand.Name) + logger.Channel.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script alias", script.Alias, id, alias, subcommand.Name) } } } @@ -169,10 +169,10 @@ func (c *ProjectConfig) ValidateProjectScripts(subcommands []cli.Command) { // Check for scripts with no run commands if script.Run == nil || len(script.Run) == 0 { - logger.Error.Fatalf("Project script '%s' does not have any run commands.", id) + logger.Channel.Error.Fatalf("Project script '%s' does not have any run commands.", id) } else if len(script.Run) > 10 { // Check for scripts with more than 10 run commands - logger.Warning.Printf("Project script '%s' has more than 10 run items (%d). You should create a shell script to contain those.", id, len(script.Run)) + logger.Warning("Project script '%s' has more than 10 run items (%d). You should create a shell script to contain those.", id, len(script.Run)) } } } diff --git a/commands/project_create.go b/commands/project_create.go index 1939488..57a5379 100644 --- a/commands/project_create.go +++ b/commands/project_create.go @@ -55,12 +55,12 @@ func (cmd *ProjectCreate) Create(ctx *cli.Context) error { } if cmd.machine.IsRunning() || util.SupportsNativeDocker() { - cmd.out.Verbose.Printf("Executing container %s%s", image, argsMessage) + cmd.out.Error("Executing container %s%s", image, argsMessage) if err := cmd.RunGenerator(ctx, cmd.machine, image); err != nil { return err } } else { - return cmd.Error(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) + return cmd.Failure(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) } return cmd.Success("") @@ -73,23 +73,25 @@ func (cmd *ProjectCreate) RunGenerator(ctx *cli.Context, machine Machine, image // The check for whether the image is older than 30 days is not currently used. _, seconds, err := util.ImageOlderThan(image, 86400*30) if err == nil { - cmd.out.Verbose.Printf("Local copy of the image '%s' was originally published %0.2f days ago.", image, seconds/86400) + cmd.out.Verbose("Local copy of the image '%s' was originally published %0.2f days ago.", image, seconds/86400) } // If there was an error it implies no previous instance of the image is available // or that docker operations failed and things will likely go wrong anyway. if err == nil && !ctx.Bool("no-update") { - cmd.out.Verbose.Printf("Attempting to update %s", image) + cmd.out.Spin(fmt.Sprintf("Attempting to update project generator docker image: %s", image)) if e := util.StreamCommand("docker", "pull", image); e != nil { - cmd.out.Verbose.Println("Failed to update generator image. Will use local cache if available.") + cmd.out.Error("Project generator docker image failed to update. Using local cache if available: %s", image) + } else { + cmd.out.Warning("Project generator docker image is up-to-date: %s", image) } } else if err == nil && ctx.Bool("no-update") { - cmd.out.Verbose.Printf("Automatic generator image update suppressed by --no-update option.") + cmd.out.Verbose("Automatic generator image update suppressed by --no-update option.") } cwd, err := os.Getwd() if err != nil { - return cmd.Error(fmt.Sprintf("Couldn't determine current working directory: %s", err), "WORKING-DIR-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("Couldn't determine current working directory: %s", err), "WORKING-DIR-NOT-FOUND", 12) } // Keep passed in args as distinct elements or they will be treated as @@ -107,7 +109,7 @@ func (cmd *ProjectCreate) RunGenerator(ctx *cli.Context, machine Machine, image shellCmd := exec.Command("docker", args...) if exitCode := util.PassthruCommand(shellCmd); exitCode != 0 { - return cmd.Error(fmt.Sprintf("Error running generator %s %s", image, strings.Join(ctx.Args(), " ")), "COMMAND-ERROR", exitCode) + return cmd.Failure(fmt.Sprintf("Failure running generator %s %s", image, strings.Join(ctx.Args(), " ")), "COMMAND-ERROR", exitCode) } return nil diff --git a/commands/project_sync.go b/commands/project_sync.go index 8fa7f67..f0a2dba 100644 --- a/commands/project_sync.go +++ b/commands/project_sync.go @@ -94,13 +94,13 @@ func (cmd *ProjectSync) Commands() []cli.Command { func (cmd *ProjectSync) RunStart(ctx *cli.Context) error { cmd.Config = NewProjectConfig() if cmd.Config.NotEmpty() { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Verbose("Loaded project configuration from %s", cmd.Config.Path) } // Determine the working directory for CWD-sensitive operations. var workingDir, err = cmd.DeriveLocalSyncPath(cmd.Config, ctx.String("dir")) if err != nil { - return cmd.Error(err.Error(), "SYNC-PATH-ERROR", 12) + return cmd.Failure(err.Error(), "SYNC-PATH-ERROR", 12) } // Determine the volume name to be used across all operating systems. @@ -109,10 +109,10 @@ func (cmd *ProjectSync) RunStart(ctx *cli.Context) error { switch platform := runtime.GOOS; platform { case "linux": - cmd.out.Verbose.Printf("Setting up local volume: %s", volumeName) + cmd.out.Verbose("Setting up local volume: %s", volumeName) return cmd.SetupBindVolume(volumeName, workingDir) default: - cmd.out.Verbose.Printf("Starting sync with volume: %s", volumeName) + cmd.out.Verbose("Starting sync with volume: %s", volumeName) return cmd.StartUnisonSync(ctx, volumeName, cmd.Config, workingDir) } } @@ -121,18 +121,18 @@ func (cmd *ProjectSync) RunStart(ctx *cli.Context) error { func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, config *ProjectConfig, workingDir string) error { // Ensure the processes can handle a large number of watches if err := cmd.machine.SetSysctl("fs.inotify.max_user_watches", maxWatches); err != nil { - cmd.Error(fmt.Sprintf("Error configuring file watches on Docker Machine: %v", err), "INOTIFY-WATCH-FAILURE", 12) + cmd.Failure(fmt.Sprintf("Failure configuring file watches on Docker Machine: %v", err), "INOTIFY-WATCH-FAILURE", 12) } - cmd.out.Info.Printf("Starting sync volume: %s", volumeName) + cmd.out.Channel.Info.Printf("Starting sync volume: %s", volumeName) if err := util.Command("docker", "volume", "create", volumeName).Run(); err != nil { - return cmd.Error(fmt.Sprintf("Failed to create sync volume: %s", volumeName), "VOLUME-CREATE-FAILED", 13) + return cmd.Failure(fmt.Sprintf("Failed to create sync volume: %s", volumeName), "VOLUME-CREATE-FAILED", 13) } - cmd.out.Info.Println("Starting Unison container") + cmd.out.Info("Starting Unison container") unisonMinorVersion := cmd.GetUnisonMinorVersion() - cmd.out.Verbose.Printf("Local Unison version for compatibilty: %s", unisonMinorVersion) + cmd.out.Channel.Verbose.Printf("Local Unison version for compatibilty: %s", unisonMinorVersion) util.Command("docker", "container", "stop", volumeName).Run() containerArgs := []string{ "container", "run", "--detach", "--rm", @@ -144,15 +144,15 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con fmt.Sprintf("outrigger/unison:%s", unisonMinorVersion), } if err := util.Command("docker", containerArgs...).Run(); err != nil { - cmd.Error(fmt.Sprintf("Error starting sync container %s: %v", volumeName, err), "SYNC-CONTAINER-START-FAILED", 13) + cmd.Failure(fmt.Sprintf("Failure starting sync container %s: %v", volumeName, err), "SYNC-CONTAINER-START-FAILED", 13) } ip, err := cmd.WaitForUnisonContainer(volumeName, ctx.Int("initial-sync-timeout")) if err != nil { - return cmd.Error(err.Error(), "SYNC-INIT-FAILED", 13) + return cmd.Failure(err.Error(), "SYNC-INIT-FAILED", 13) } - cmd.out.Info.Println("Initializing sync") + cmd.out.Info("Initializing sync") // Determine the location of the local Unison log file. var logFile = fmt.Sprintf("%s.log", volumeName) @@ -160,7 +160,7 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con // up and running. If the logfile does not exist, do not complain. If the // filesystem cannot delete the file when it exists, it will lead to errors. if err := util.RemoveFile(logFile, workingDir); err != nil { - cmd.out.Verbose.Printf("Could not remove Unison log file: %s: %s", logFile, err.Error()) + cmd.out.Channel.Verbose.Printf("Could not remove Unison log file: %s: %s", logFile, err.Error()) } // Initiate local Unison process. @@ -179,16 +179,16 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con unisonArgs = append(unisonArgs, "-ignore", ignore) } } - cmd.out.Verbose.Printf("Unison Args: %s", strings.Join(unisonArgs[:], " ")) + cmd.out.Channel.Verbose.Printf("Unison Args: %s", strings.Join(unisonArgs[:], " ")) command := exec.Command("unison", unisonArgs...) command.Dir = workingDir - cmd.out.Verbose.Printf("Sync execution - Working Directory: %s", workingDir) + cmd.out.Channel.Verbose.Printf("Sync execution - Working Directory: %s", workingDir) if err = util.Convert(command).Start(); err != nil { - return cmd.Error(fmt.Sprintf("Failure starting local Unison process: %v", err), "UNISON-START-FAILED", 13) + return cmd.Failure(fmt.Sprintf("Failure starting local Unison process: %v", err), "UNISON-START-FAILED", 13) } if err := cmd.WaitForSyncInit(logFile, workingDir, ctx.Int("initial-sync-timeout"), ctx.Int("initial-sync-wait")); err != nil { - return cmd.Error(err.Error(), "UNISON-SYNC-FAILED", 13) + return cmd.Failure(err.Error(), "UNISON-SYNC-FAILED", 13) } return cmd.Success("Unison sync started successfully") @@ -196,7 +196,7 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con // SetupBindVolume will create minimal Docker Volumes for systems that have native container/volume support func (cmd *ProjectSync) SetupBindVolume(volumeName string, workingDir string) error { - cmd.out.Info.Printf("Starting local bind volume: %s", volumeName) + cmd.out.Channel.Info.Printf("Starting local bind volume: %s", volumeName) util.Command("docker", "volume", "rm", volumeName).Run() volumeArgs := []string{ @@ -208,7 +208,7 @@ func (cmd *ProjectSync) SetupBindVolume(volumeName string, workingDir string) er } if err := util.Command("docker", volumeArgs...).Run(); err != nil { - return cmd.Error(err.Error(), "BIND-VOLUME-FAILURE", 13) + return cmd.Failure(err.Error(), "BIND-VOLUME-FAILURE", 13) } return cmd.Success("Bind volume created") @@ -221,20 +221,20 @@ func (cmd *ProjectSync) RunStop(ctx *cli.Context) error { } cmd.Config = NewProjectConfig() if cmd.Config.NotEmpty() { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Channel.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) } // Determine the working directory for CWD-sensitive operations. var workingDir, err = cmd.DeriveLocalSyncPath(cmd.Config, ctx.String("dir")) if err != nil { - return cmd.Error(err.Error(), "SYNC-PATH-ERROR", 12) + return cmd.Failure(err.Error(), "SYNC-PATH-ERROR", 12) } volumeName := cmd.GetVolumeName(cmd.Config, workingDir) - cmd.out.Verbose.Printf("Stopping sync with volume: %s", volumeName) - cmd.out.Info.Println("Stopping Unison container") + cmd.out.Channel.Verbose.Printf("Stopping sync with volume: %s", volumeName) + cmd.out.Info("Stopping Unison container") if err := util.Command("docker", "container", "stop", volumeName).Run(); err != nil { - return cmd.Error(err.Error(), "SYNC-CONTAINER-FAILURE", 13) + return cmd.Failure(err.Error(), "SYNC-CONTAINER-FAILURE", 13) } return cmd.Success("Unison container stopped") @@ -268,7 +268,7 @@ func (cmd *ProjectSync) LoadComposeFile() (*ComposeFile, error) { if err == nil { var config ComposeFile if e := yaml.Unmarshal(yamlFile, &config); e != nil { - cmd.out.Error.Fatalf("YAML Parsing Error: %s", e) + cmd.out.Channel.Error.Fatalf("YAML Parsing Failure: %s", e) } return &config, nil } @@ -282,7 +282,7 @@ func (cmd *ProjectSync) LoadComposeFile() (*ComposeFile, error) { // when compiled without -cgo this executable will not use the native mac dns resolution // which is how we have configured dnsdock to provide names for containers. func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeconds int) (string, error) { - cmd.out.Info.Println("Waiting for container to start") + cmd.out.Info("Waiting for container to start") var timeoutLoopSleep = time.Duration(100) * time.Millisecond // * 10 here because we loop once every 100 ms and we want to get to seconds @@ -294,7 +294,7 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco } ip := strings.Trim(string(output), "\n") - cmd.out.Verbose.Printf("Checking for Unison network connection on %s %d", ip, unisonPort) + cmd.out.Channel.Verbose.Printf("Checking for Unison network connection on %s %d", ip, unisonPort) for i := 1; i <= timeoutLoops; i++ { conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", ip, unisonPort)) if err == nil { @@ -302,7 +302,7 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco return ip, nil } - cmd.out.Info.Printf("Error: %v", err) + cmd.out.Channel.Info.Printf("Failure: %v", err) time.Sleep(timeoutLoopSleep) } @@ -312,20 +312,20 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco // WaitForSyncInit will wait for the local unison process to finish initializing // when the log file exists and has stopped growing in size func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeoutSeconds int, syncWaitSeconds int) error { - cmd.out.Info.Print("Waiting for initial sync detection") + cmd.out.Info("Waiting for initial sync detection") // The use of os.Stat below is not subject to our working directory configuration, // so to ensure we can stat the log file we convert it to an absolute path. if logFilePath, err := util.AbsJoin(workingDir, logFile); err != nil { - cmd.out.Info.Print(err.Error()) + cmd.out.Info(err.Error()) } else { // Create a temp file to cause a sync action var tempFile = ".rig-check-sync-start" if err := util.TouchFile(tempFile, workingDir); err != nil { - cmd.out.Error.Fatal("Could not create file used to detect initial sync: %s", err.Error()) + cmd.out.Channel.Error.Fatal("Could not create file used to detect initial sync: %s", err.Error()) } - cmd.out.Verbose.Printf("Creating temporary file so we can watch for Unison initialization: %s", tempFile) + cmd.out.Verbose("Creating temporary file so we can watch for Unison initialization: %s", tempFile) var timeoutLoopSleep = time.Duration(100) * time.Millisecond // * 10 here because we loop once every 100 ms and we want to get to seconds @@ -340,7 +340,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo if err == nil { os.Stdout.WriteString(" initial sync detected\n") - cmd.out.Info.Print("Waiting for initial sync to finish") + cmd.out.Info("Waiting for initial sync to finish") // Initialize at -2 to force at least one loop var lastSize = int64(-2) for lastSize != statInfo.Size() { @@ -348,7 +348,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo time.Sleep(statSleep) lastSize = statInfo.Size() if statInfo, err = os.Stat(logFilePath); err != nil { - cmd.out.Info.Print(err.Error()) + cmd.out.Info(err.Error()) lastSize = -1 } } @@ -356,7 +356,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo // Remove the temp file, waiting until after sync so spurious // failure message doesn't show in log if err := util.RemoveFile(tempFile, workingDir); err != nil { - cmd.out.Warning.Printf("Could not remove the temporary file: %s: %s", tempFile, err.Error()) + cmd.out.Warning("Could not remove the temporary file: %s: %s", tempFile, err.Error()) } return nil } @@ -368,7 +368,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo if err := util.RemoveFile(tempFile, workingDir); err != nil { // While the removal of the tempFile is not significant, if something // prevents removal there may be a bigger problem. - cmd.out.Warning.Printf("Could not remove the temporary file: %s", err.Error()) + cmd.out.Warning("Could not remove the temporary file: %s", err.Error()) } } diff --git a/commands/prune.go b/commands/prune.go index 106e276..fc10d2a 100644 --- a/commands/prune.go +++ b/commands/prune.go @@ -26,9 +26,9 @@ func (cmd *Prune) Commands() []cli.Command { // Run executes the `rig prune` command func (cmd *Prune) Run(c *cli.Context) error { - cmd.out.Info.Println("Cleaning up Docker images and containers...") + cmd.out.Info("Cleaning up Docker images and containers...") if exitCode := util.PassthruCommand(exec.Command("docker", "system", "prune", "--all", "--volumes")); exitCode != 0 { - return cmd.Error("Error pruning Docker resources.", "COMMAND-ERROR", 13) + return cmd.Failure("Failure pruning Docker resources.", "COMMAND-ERROR", 13) } return cmd.Success("") diff --git a/commands/remove.go b/commands/remove.go index 860c7c1..3ea04ed 100644 --- a/commands/remove.go +++ b/commands/remove.go @@ -37,32 +37,32 @@ func (cmd *Remove) Run(c *cli.Context) error { } if !cmd.machine.Exists() { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } - cmd.out.Info.Printf("Removing '%s'", cmd.machine.Name) - + cmd.out.Info("Removing '%s'", cmd.machine.Name) force := c.Bool("force") if !force { - cmd.out.Warning.Println("!!!!! This operation is destructive. You may lose important data. !!!!!!!") - cmd.out.Warning.Println("Run 'rig data-backup' if you want to save your /data volume.") - cmd.out.Warning.Println() + cmd.out.Warning("!!!!! This operation is destructive. You may lose important data. !!!!!!!") + cmd.out.Warning("Run 'rig data-backup' if you want to save your /data volume.") if !util.AskYesNo("Are you sure you want to remove '" + cmd.machine.Name + "'") { return cmd.Success("Remove was aborted") } } - // Run kill first + // Run kill first. kill := Kill{cmd.BaseCommand} if err := kill.Run(c); err != nil { return err } - cmd.out.Info.Println("Removing the docker-machine") + cmd.out.Spin("Removing the docker Virtual Machine") if err := cmd.machine.Remove(); err != nil { - return cmd.Error(err.Error(), "MACHINE-REMOVE-FAILED", 12) + cmd.out.Error("Failed to remove the docker Virtual Machine") + return cmd.Failure(err.Error(), "MACHINE-REMOVE-FAILED", 12) } + cmd.out.Info("Failed to remove the docker Virtual Machine") return cmd.Success(fmt.Sprintf("Machine '%s' removed", cmd.machine.Name)) } diff --git a/commands/restart.go b/commands/restart.go index a3c6d51..24bf774 100644 --- a/commands/restart.go +++ b/commands/restart.go @@ -29,9 +29,9 @@ func (cmd *Restart) Commands() []cli.Command { func (cmd *Restart) Run(c *cli.Context) error { if util.SupportsNativeDocker() || cmd.machine.Exists() { if util.SupportsNativeDocker() { - cmd.out.Info.Println("Restarting Outrigger services") + cmd.out.Info("Restarting Outrigger services") } else { - cmd.out.Info.Printf("Restarting Outrigger machine '%s' and services", cmd.machine.Name) + cmd.out.Channel.Info.Printf("Restarting Outrigger machine '%s' and services", cmd.machine.Name) } stop := Stop{cmd.BaseCommand} @@ -46,7 +46,7 @@ func (cmd *Restart) Run(c *cli.Context) error { return err } } else { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } return cmd.Success("Restart successful") diff --git a/commands/start.go b/commands/start.go index 4083699..7c4bcd9 100644 --- a/commands/start.go +++ b/commands/start.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "strconv" "github.com/phase2/rig/util" @@ -55,27 +56,28 @@ func (cmd *Start) Commands() []cli.Command { // Run executes the `rig start` command func (cmd *Start) Run(c *cli.Context) error { if util.SupportsNativeDocker() { - cmd.out.Info.Println("Linux users should use Docker natively for best performance.") - cmd.out.Info.Println("Please ensure your local Docker setup is compatible with Outrigger.") - cmd.out.Info.Println("See http://docs.outrigger.sh/getting-started/linux-installation/") + cmd.out.Info("Linux users should use Docker natively for best performance.") + cmd.out.Info("Please ensure your local Docker setup is compatible with Outrigger.") + cmd.out.Info("See http://docs.outrigger.sh/getting-started/linux-installation/") return cmd.StartMinimal(c.String("nameservers")) } - cmd.out.Info.Printf("Starting Docker inside a machine with name '%s'", cmd.machine.Name) - cmd.out.Verbose.Println("If something goes wrong, run 'rig doctor'") - cmd.out.Verbose.Println("Pre-flight check...") + cmd.out.Spin(fmt.Sprintf("Starting Docker & Docker Machine (%s)", cmd.machine.Name)) + cmd.out.Verbose("If something goes wrong, run 'rig doctor'") + + cmd.out.Verbose("Pre-flight check...") if err := util.Command("grep", "-qE", "'^\"?/Users/'", "/etc/exports").Run(); err == nil { - return cmd.Error("Vagrant NFS mount found. Please remove any non-Outrigger mounts that begin with /Users from your /etc/exports file", "NFS-MOUNT-CONFLICT", 12) + cmd.out.Error("Docker could not be started") + return cmd.Failure("Vagrant NFS mount found. Please remove any non-Outrigger mounts that begin with /Users from your /etc/exports file", "NFS-MOUNT-CONFLICT", 12) } - cmd.out.Verbose.Println("Resetting Docker environment variables...") + cmd.out.Verbose("Resetting Docker environment variables...") cmd.machine.UnsetEnv() // Does the docker-machine exist if !cmd.machine.Exists() { - cmd.out.Warning.Printf("No machine named '%s' exists", cmd.machine.Name) - + cmd.out.Spin(fmt.Sprintf("Creating Docker & Docker Machine (%s)", cmd.machine.Name)) driver := c.String("driver") diskSize := strconv.Itoa(c.Int("disk-size") * 1000) memSize := strconv.Itoa(c.Int("memory-size")) @@ -84,31 +86,35 @@ func (cmd *Start) Run(c *cli.Context) error { } if err := cmd.machine.Start(); err != nil { - return cmd.Error(err.Error(), "MACHINE-START-FAILED", 12) + cmd.out.Error("Docker could not be started") + return cmd.Failure(err.Error(), "MACHINE-START-FAILED", 12) } + cmd.out.Info("Docker Machine (%s) Created", cmd.machine.Name) - cmd.out.Verbose.Println("Configuring the local Docker environment") + cmd.out.Verbose("Configuring the local Docker environment") cmd.machine.SetEnv() + cmd.out.Info("Docker Machine is ready") - cmd.out.Info.Println("Setting up DNS...") dns := DNS{cmd.BaseCommand} dns.StartDNS(cmd.machine, c.String("nameservers")) // NFS mounts are Mac-only. if util.IsMac() { - cmd.out.Verbose.Println("Enabling NFS file sharing") + cmd.out.Spin("Enabling NFS file sharing...") if nfsErr := util.StreamCommand("docker-machine-nfs", cmd.machine.Name); nfsErr != nil { - cmd.out.Error.Printf("Error enabling NFS: %s", nfsErr) + cmd.out.Warning("Failure enabling NFS: %s", nfsErr.Error()) + } else { + cmd.out.Info("NFS is ready") } - cmd.out.Verbose.Println("NFS is ready to use") } + cmd.out.Spin("Preparing /data filesystem...") // NFS enabling may have caused a machine restart, wait for it to be available before proceeding if err := cmd.machine.WaitForDev(); err != nil { - return cmd.Error(err.Error(), "MACHINE-START-FAILED", 12) + return cmd.Failure(err.Error(), "MACHINE-START-FAILED", 12) } - cmd.out.Verbose.Println("Setting up persistent /data volume...") + cmd.out.Verbose("Setting up persistent /data volume...") dataMountSetup := `if [ ! -d /mnt/sda1/data ]; then echo '===> Creating /mnt/sda1/data directory'; sudo mkdir /mnt/sda1/data; @@ -124,20 +130,25 @@ func (cmd *Start) Run(c *cli.Context) error { sudo ln -s /mnt/sda1/data /data; fi;` if err := util.StreamCommand("docker-machine", "ssh", cmd.machine.Name, dataMountSetup); err != nil { - return cmd.Error(err.Error(), "DATA-MOUNT-FAILED", 13) + return cmd.Failure(err.Error(), "DATA-MOUNT-FAILED", 13) } + cmd.out.Info("/data filesystem is ready") + // Route configuration needs to be finalized after NFS-triggered reboots. + // This rebooting may change key details such as IP Address of the Dev machine. dns.ConfigureRoutes(cmd.machine) - cmd.out.Verbose.Println("Use docker-machine to interact with your virtual machine.") - cmd.out.Verbose.Printf("For example, to SSH into it: docker-machine ssh %s", cmd.machine.Name) - cmd.out.Info.Println("To run Docker commands, your terminal session should be initialized with: 'eval \"$(rig config)\"'") + cmd.out.Verbose("Use docker-machine to interact with your virtual machine.") + cmd.out.Verbose("For example, to SSH into it: docker-machine ssh %s", cmd.machine.Name) - cmd.out.Info.Println("Launching Dashboard...") + cmd.out.Spin("Launching Dashboard...") dash := Dashboard{cmd.BaseCommand} dash.LaunchDashboard(cmd.machine) + cmd.out.Info("Dashboard is ready") + cmd.out.Info("Run 'eval \"$(rig config)\"' to execute docker or docker-compose commands in your terminal.") return cmd.Success("Outrigger is ready to use") + } // StartMinimal will start "minimal" Outrigger operations, which refers to environments where diff --git a/commands/status.go b/commands/status.go index 40338f9..7a4b002 100644 --- a/commands/status.go +++ b/commands/status.go @@ -32,7 +32,7 @@ func (cmd *Status) Run(c *cli.Context) error { } if !cmd.machine.Exists() { - return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) + return cmd.Failure(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } if cmd.out.IsVerbose { diff --git a/commands/stop.go b/commands/stop.go index 83eabde..39807d7 100644 --- a/commands/stop.go +++ b/commands/stop.go @@ -38,8 +38,8 @@ func (cmd *Stop) Run(c *cli.Context) error { // StopMinimal will stop "minimal" Outrigger operations, which refers to environments where // a virtual machine and networking are not required or managed by Outrigger. func (cmd *Stop) StopMinimal() error { - cmd.out.Verbose.Printf("Skipping Step: Linux does not have a docker-machine to stop.") - cmd.out.Verbose.Printf("Skipping Step: Outrigger does not manage Linux networking.") + cmd.out.Channel.Verbose.Printf("Skipping Step: Linux does not have a docker-machine to stop.") + cmd.out.Channel.Verbose.Printf("Skipping Step: Outrigger does not manage Linux networking.") dash := Dashboard{cmd.BaseCommand} dash.StopDashboard() @@ -52,12 +52,13 @@ func (cmd *Stop) StopMinimal() error { // StopOutrigger will halt all Outrigger and Docker-related operations. func (cmd *Stop) StopOutrigger() error { - cmd.out.Info.Printf("Stopping machine '%s'", cmd.machine.Name) + cmd.out.Spin(fmt.Sprintf("Stopping machine '%s'...", cmd.machine.Name)) if err := cmd.machine.Stop(); err != nil { - return cmd.Error(err.Error(), "MACHINE-STOP-FAILED", 12) + return cmd.Failure(err.Error(), "MACHINE-STOP-FAILED", 12) } + cmd.out.Info("Stopped machine '%s'", cmd.machine.Name) - cmd.out.Info.Println("Cleaning up local networking (may require your admin password)") + cmd.out.Spin("Cleaning up local networking (may require your admin password)") if util.IsWindows() { util.Command("runas", "/noprofile", "/user:Administrator", "route", "DELETE", "172.17.0.0").Run() util.Command("runas", "/noprofile", "/user:Administrator", "route", "DELETE", "172.17.42.1").Run() @@ -66,6 +67,7 @@ func (cmd *Stop) StopOutrigger() error { util.Command("sudo", "route", "-n", "delete", "-net", "172.17.42.1").Run() } color.Unset() + cmd.out.Info("Networking cleanup completed") return cmd.Success(fmt.Sprintf("Machine '%s' stopped", cmd.machine.Name)) } diff --git a/commands/upgrade.go b/commands/upgrade.go index 1c6127d..4ae3137 100644 --- a/commands/upgrade.go +++ b/commands/upgrade.go @@ -45,23 +45,24 @@ func (cmd *Upgrade) Run(c *cli.Context) error { return cmd.Success("Upgrade is not needed on Linux") } - cmd.out.Info.Printf("Upgrading '%s'...", cmd.machine.Name) + cmd.out.Spin(fmt.Sprintf("Upgrading '%s'...", cmd.machine.Name)) if cmd.machine.GetData().Get("Driver").Get("Boot2DockerURL").MustString() == "" { - return cmd.Error(fmt.Sprintf("Machine '%s' was not created with a boot2docker URL. Run `docker-machine upgrade %s` directly", cmd.machine.Name, cmd.machine.Name), "MACHINE-CREATED-MANUALLY", 12) + cmd.out.Error("Machine %s not compatible with rig upgrade", cmd.machine.Name) + return cmd.Failure(fmt.Sprintf("Machine '%s' was not created with a boot2docker URL. Run `docker-machine upgrade %s` directly", cmd.machine.Name, cmd.machine.Name), "MACHINE-CREATED-MANUALLY", 12) } currentDockerVersion := util.GetCurrentDockerVersion() machineDockerVersion, err := cmd.machine.GetDockerVersion() if err != nil { - return cmd.Error(fmt.Sprintf("Could not determine Machine Docker version. Is your machine running?. %s", err), "MACHINE-STOPPED", 12) + return cmd.Failure(fmt.Sprintf("Could not determine Machine Docker version. Is your machine running?. %s", err), "MACHINE-STOPPED", 12) } if currentDockerVersion.Equal(machineDockerVersion) { return cmd.Success(fmt.Sprintf("Machine '%s' has the same Docker version (%s) as your local Docker binary (%s). There is nothing to upgrade. If you wish to upgrade you'll need to install a newer version of the Docker binary before running the upgrade command.", cmd.machine.Name, machineDockerVersion, currentDockerVersion)) } - cmd.out.Info.Printf("Backing up to prepare for upgrade...") + cmd.out.Channel.Info.Printf("Backing up to prepare for upgrade...") backup := &DataBackup{cmd.BaseCommand} if err := backup.Run(c); err != nil { return err diff --git a/util/logger.go b/util/logger.go index a58da1b..0095225 100644 --- a/util/logger.go +++ b/util/logger.go @@ -5,18 +5,35 @@ import ( "log" "os" + "fmt" "github.com/fatih/color" + spun "github.com/slok/gospinner" ) +// logger is the global logger data structure. Retrieve via Logger(). var logger *RigLogger +// logChannels defines various log channels. This nests within the RigLogger to expose the loggers directly for +// advanced use cases. +type logChannels struct { + Info *log.Logger + Warning *log.Logger + Error *log.Logger + Verbose *log.Logger +} + // RigLogger is the global logger object type RigLogger struct { - Info *log.Logger - Warning *log.Logger - Error *log.Logger - Verbose *log.Logger + Channel logChannels + Progress *RigSpinner IsVerbose bool + Spinning bool +} + +// RigSpinner object wrapper to facilitate our spinner service +// as a different +type RigSpinner struct { + Spins *spun.Spinner } // LoggerInit initializes the global logger @@ -25,12 +42,18 @@ func LoggerInit(verbose bool) { if verbose { verboseWriter = os.Stdout } + + s, _ := spun.NewSpinner(spun.Dots) logger = &RigLogger{ - Info: log.New(os.Stdout, color.BlueString("[INFO] "), 0), - Warning: log.New(os.Stdout, color.YellowString("[WARN] "), 0), - Error: log.New(os.Stderr, color.RedString("[ERROR] "), 0), - Verbose: log.New(verboseWriter, "[VERBOSE] ", 0), + Channel: logChannels{ + Info: log.New(os.Stdout, color.BlueString("[INFO] "), 0), + Warning: log.New(os.Stdout, color.YellowString("[WARN] "), 0), + Error: log.New(os.Stderr, color.RedString("[ERROR] "), 0), + Verbose: log.New(verboseWriter, "[VERBOSE] ", 0), + }, IsVerbose: verbose, + Progress: &RigSpinner{s}, + Spinning: false, } } @@ -42,3 +65,63 @@ func Logger() *RigLogger { return logger } + +// Spin restarts the spinner for a new task. +func (log *RigLogger) Spin(message string) { + if !log.IsVerbose { + log.Progress.Spins.Start(message) + log.Spinning = true + } +} + +// NoSpin stops the Progress spinner. +func (log *RigLogger) NoSpin() { + log.Progress.Spins.Stop() + log.Spinning = false +} + +// Info indicates success behavior of the spinner-associated task. +func (log *RigLogger) Info(format string, a ...interface{}) { + if log.IsVerbose || !log.Spinning { + log.Channel.Info.Println(fmt.Sprintf(format, a...)) + } else { + log.Progress.Spins.SetMessage(fmt.Sprintf(format, a...)) + log.Progress.Spins.Succeed() + } +} + +// Warning indicates a warning in the resolution of the spinner-associated task. +func (log *RigLogger) Warning(format string, a ...interface{}) { + if log.IsVerbose || !log.Spinning { + log.Channel.Warning.Println(fmt.Sprintf(format, a...)) + } else { + log.Progress.Spins.SetMessage(fmt.Sprintf(format, a...)) + log.Progress.Spins.Warn() + } +} + +// Warn is a convenience wrapper for Warning. +func (log *RigLogger) Warn(format string, a ...interface{}) { + log.Warning(format, a...) +} + +// Error indicates an error in the spinner-associated task. +func (log *RigLogger) Error(format string, a ...interface{}) { + if log.IsVerbose || !log.Spinning { + log.Channel.Error.Println(fmt.Sprintf(format, a...)) + } else { + log.Progress.Spins.SetMessage(fmt.Sprintf(format, a...)) + log.Progress.Spins.Fail() + } +} + +// Verbose allows Verbose logging of more advanced activities/information. +// In practice, if the spinner can be in use verbose is a no-op. +func (log *RigLogger) Verbose(format string, a ...interface{}) { + log.Channel.Verbose.Println(fmt.Sprintf(format, a...)) +} + +// Note allows output of an info log, bypassing the spinner if in use. +func (log *RigLogger) Note(format string, a ...interface{}) { + log.Channel.Info.Println(fmt.Sprintf(format, a...)) +} diff --git a/util/shell_exec.go b/util/shell_exec.go index ed021ce..e704de2 100644 --- a/util/shell_exec.go +++ b/util/shell_exec.go @@ -115,7 +115,7 @@ func (x Executor) Start() error { // Log verbosely logs the command. func (x Executor) Log(tag string) { color.Set(color.FgMagenta) - Logger().Verbose.Printf("%s: %s", tag, x.ToString()) + Logger().Verbose("%s: %s", tag, x.ToString()) color.Unset() }