From fcfb442acc98322ba5287294ce1fe42e28ab5cc5 Mon Sep 17 00:00:00 2001 From: Adam Ross Date: Thu, 16 Nov 2017 15:41:32 -0800 Subject: [PATCH] Refactor codebase to use existing log API via RigLogger.Channel.* --- CONTRIBUTING.md | 12 +++---- commands/command.go | 4 +-- commands/dashboard.go | 12 +++---- commands/data_backup.go | 8 ++--- commands/data_restore.go | 4 +-- commands/dns.go | 26 +++++++------- commands/doctor.go | 70 +++++++++++++++++++------------------- commands/kill.go | 8 ++--- commands/machine.go | 14 ++++---- commands/project.go | 12 +++---- commands/project_config.go | 20 +++++------ commands/project_create.go | 10 +++--- commands/project_sync.go | 52 ++++++++++++++-------------- commands/prune.go | 2 +- commands/remove.go | 10 +++--- commands/restart.go | 4 +-- commands/start.go | 38 ++++++++++----------- commands/stop.go | 9 +++-- commands/upgrade.go | 4 +-- util/logger.go | 51 ++++++++++++++------------- util/shell_exec.go | 2 +- 21 files changed, 187 insertions(+), 185 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 639d792..d1d9d66 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,17 +26,17 @@ Here are a few conventions: * **Starting a task that could take more than 5 seconds:** * `cmd.out.Spin("Preparing the sauce")` * **Use the correct method to log operational results: (Pick one)** - * `cmd.out.Success("Sauce is Ready.")` - * `cmd.out.Warn("Sauce is burnt on the bottom.")` - * `cmd.out.Oops("Discard this sauce and try again.")` + * `cmd.out.Info("Sauce is Ready.")` + * `cmd.out.Warning("Sauce is burnt on the bottom.")` + * `cmd.out.Error("Discard this sauce and try again.")` * **Going to send some contextual notes to the user**: 1. `cmd.out.NoSpin()` if currently using the spinner. 2. `cmd.out.Status("Sauce exists.")` 3. `cmd.out.Note("This is a list item.")` - 4. `cmd.out.Details("The ingredients of the sauce include tomato, salt, black pepper, garlic...")` + 4. `cmd.out.Verbose("The ingredients of the sauce include tomato, salt, black pepper, garlic...")` * **Command has executed and is successful. We do not want a notification:** ``` - cmd.out.Success("Enjoy your dinner.") + cmd.out.Info("Enjoy your dinner.") return cmd.Success("") ``` * **Command has executed and is successful. Get a notification too!** @@ -46,7 +46,7 @@ Here are a few conventions: * **Command failed:** ``` message := "Cooking sauce is hard, we failed" - cmd.out.Oops(fmt.Sprintf("%s: %s", message, err.Error())) + cmd.out.Error(fmt.Sprintf("%s: %s", message, err.Error())) return cmd.Error(message) ``` diff --git a/commands/command.go b/commands/command.go index 3d67991..eb72104 100644 --- a/commands/command.go +++ b/commands/command.go @@ -41,7 +41,7 @@ func (cmd *BaseCommand) Before(c *cli.Context) error { func (cmd *BaseCommand) Success(message string) error { // Handle success messaging. if message != "" { - cmd.out.Success(message) + cmd.out.Info(message) util.NotifySuccess(cmd.context, message) } @@ -73,6 +73,6 @@ func (cmd *BaseCommand) NewContext(name string, flags []cli.Flag, parent *cli.Co // SetContextFlag set a flag on the provided context func (cmd *BaseCommand) SetContextFlag(ctx *cli.Context, name string, value string) { if err := ctx.Set(name, value); err != nil { - cmd.out.Error.Fatal(err) + cmd.out.Channel.Error.Fatal(err) } } diff --git a/commands/dashboard.go b/commands/dashboard.go index a02ba01..a11b598 100644 --- a/commands/dashboard.go +++ b/commands/dashboard.go @@ -32,7 +32,7 @@ func (cmd *Dashboard) Commands() []cli.Command { // Run executes the `rig dashboard` command func (cmd *Dashboard) Run(ctx *cli.Context) error { if cmd.machine.IsRunning() || util.SupportsNativeDocker() { - cmd.out.Success("Launching Dashboard") + cmd.out.Info("Launching Dashboard") err := cmd.LaunchDashboard(cmd.machine) if err != nil { // Success may be presumed to only execute once per command execution. @@ -54,16 +54,16 @@ func (cmd *Dashboard) LaunchDashboard(machine Machine) error { // except to indicate the age of the image before update in the next section. _, seconds, err := util.ImageOlderThan(dashboardImageName, 86400*30) if err == nil { - cmd.out.Details(fmt.Sprintf("Local copy of the dashboardImageName '%s' was originally published %0.2f days ago.", dashboardImageName, seconds/86400)) + cmd.out.Verbose(fmt.Sprintf("Local copy of the dashboardImageName '%s' was originally published %0.2f days ago.", dashboardImageName, seconds/86400)) } // Updating the dashboard is rarely of interest to users so uses verbose logging. // Per our user interaction practices, we would normally use a spinner here. - cmd.out.Details(fmt.Sprintf("Attempting to update %s", dashboardImageName)) + cmd.out.Verbose(fmt.Sprintf("Attempting to update %s", dashboardImageName)) if err := util.StreamCommand("docker", "pull", dashboardImageName); err != nil { - cmd.out.Details("Failed to update dashboard image. Will use local cache if available.") + cmd.out.Verbose("Failed to update dashboard image. Will use local cache if available.") } else { - cmd.out.Details("Successfully updated dashboard.") + cmd.out.Verbose("Successfully updated dashboard.") } dockerAPIVersion, _ := util.GetDockerServerAPIVersion() @@ -85,7 +85,7 @@ func (cmd *Dashboard) LaunchDashboard(machine Machine) error { } else if util.IsWindows() { util.Command("start", "http://dashboard.outrigger.vm").Run() } else { - cmd.out.Success("Outrigger Dashboard is now available at http://dashboard.outrigger.vm") + cmd.out.Info("Outrigger Dashboard is now available at http://dashboard.outrigger.vm") } return nil diff --git a/commands/data_backup.go b/commands/data_backup.go index cc7aa18..315305d 100644 --- a/commands/data_backup.go +++ b/commands/data_backup.go @@ -51,9 +51,9 @@ func (cmd *DataBackup) Run(c *cli.Context) error { backupDir := c.String("backup-dir") backupFile := fmt.Sprintf("%s%c%s.tgz", backupDir, os.PathSeparator, cmd.machine.Name) if _, err := os.Stat(backupDir); err != nil { - cmd.out.Info.Printf("Creating backup directory: %s...", backupDir) + cmd.out.Info(fmt.Sprintf("Creating backup directory: %s...", backupDir)) if mkdirErr := util.Command("mkdir", "-p", backupDir).Run(); mkdirErr != nil { - cmd.out.Error.Println(mkdirErr) + cmd.out.Error(mkdirErr.Error()) return cmd.Error(fmt.Sprintf("Could not create backup directory %s", backupDir), "BACKUP-DIR-CREATE-FAILED", 12) } } else if _, err := os.Stat(backupFile); err == nil { @@ -66,10 +66,10 @@ func (cmd *DataBackup) Run(c *cli.Context) error { cmd.out.Spin(fmt.Sprintf("Backing up %s on '%s' to %s...", dataDir, cmd.machine.Name, backupFile)) archiveCmd := fmt.Sprintf("sudo tar czf - -C %s .", dataDir) if err := util.StreamCommand("docker-machine", "ssh", cmd.machine.Name, archiveCmd, ">", backupFile); err != nil { - cmd.out.Oops(fmt.Sprintf("Backup failed: %s", err.Error())) + cmd.out.Error(fmt.Sprintf("Backup failed: %s", err.Error())) return cmd.Error("Backup failed", "COMMAND-ERROR", 13) } - cmd.out.Success(fmt.Sprintf("Backup complete: %s", backupFile)) + cmd.out.Info(fmt.Sprintf("Backup complete: %s", backupFile)) return cmd.Success("Data Backup completed") } diff --git a/commands/data_restore.go b/commands/data_restore.go index e89bff2..8bef47d 100644 --- a/commands/data_restore.go +++ b/commands/data_restore.go @@ -61,10 +61,10 @@ func (cmd *DataRestore) Run(c *cli.Context) error { // Send the archive via stdin and extract inline. Saves on disk & performance extractCmd := fmt.Sprintf("cat %s | docker-machine ssh %s \"sudo tar xzf - -C %s\"", backupFile, cmd.machine.Name, dataDir) if err := util.StreamCommand("bash", "-c", extractCmd); err != nil { - cmd.out.Oops(fmt.Sprintf("Data restore failed: %s", err.Error())) + cmd.out.Error(fmt.Sprintf("Data restore failed: %s", err.Error())) return cmd.Error("Data restore failed", "COMMAND-ERROR", 13) } - cmd.out.Success("Data restore complete") + cmd.out.Info("Data restore complete") return cmd.Success("Data Restore was successful") } diff --git a/commands/dns.go b/commands/dns.go index cc522a6..9080aa1 100644 --- a/commands/dns.go +++ b/commands/dns.go @@ -37,14 +37,14 @@ func (cmd *DNS) Commands() []cli.Command { // Run executes the `rig dns` command func (cmd *DNS) Run(c *cli.Context) error { - cmd.out.Status("Configuring DNS") + cmd.out.Info("Configuring DNS") if !util.SupportsNativeDocker() && !cmd.machine.IsRunning() { return cmd.Error(fmt.Sprintf("Machine '%s' is not running.", cmd.machine.Name), "MACHINE-STOPPED", 12) } if err := cmd.StartDNS(cmd.machine, c.String("nameservers")); err != nil { - cmd.out.Oops("DNS is ready") + cmd.out.Error("DNS is ready") return cmd.Error(err.Error(), "DNS-SETUP-FAILED", 13) } @@ -65,7 +65,7 @@ func (cmd *DNS) ConfigureRoutes(machine Machine) { cmd.configureWindowsRoutes(machine) } - cmd.out.Success("Local networking is ready") + cmd.out.Info("Local networking is ready") } // ConfigureMac configures DNS resolution and network routing @@ -79,12 +79,12 @@ func (cmd *DNS) configureMacRoutes(machine Machine) { util.StreamCommand("sudo", "route", "-n", "add", "172.17.0.0/16", machineIP) if _, err := os.Stat("/usr/sbin/discoveryutil"); err == nil { // Put this here for people running OS X 10.10.0 to 10.10.3 (oy vey.) - cmd.out.Verbose.Println("Restarting discoveryutil to flush DNS caches") + cmd.out.Verbose("Restarting discoveryutil to flush DNS caches") util.StreamCommand("sudo", "launchctl", "unload", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") util.StreamCommand("sudo", "launchctl", "load", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") } else { // Reset DNS cache. We have seen this suddenly make /etc/resolver/vm work. - cmd.out.Verbose.Println("Restarting mDNSResponder to flush DNS caches") + cmd.out.Verbose("Restarting mDNSResponder to flush DNS caches") util.StreamCommand("sudo", "killall", "-HUP", "mDNSResponder") } } @@ -94,7 +94,7 @@ func (cmd *DNS) removeHostFilter(ipAddr string) { // #1: route -n get to find the interface name routeData, err := util.Command("route", "-n", "get", ipAddr).CombinedOutput() if err != nil { - cmd.out.Warning.Println("Unable to determine bridge interface to remove hostfilter") + cmd.out.Warning("Unable to determine bridge interface to remove hostfilter") return } ifaceRegexp := regexp.MustCompile(`interface:\s(\w+)`) @@ -103,7 +103,7 @@ func (cmd *DNS) removeHostFilter(ipAddr string) { // #2: ifconfig to get the details ifaceData, err := util.Command("ifconfig", iface).CombinedOutput() if err != nil { - cmd.out.Warning.Println("Unable to determine member to remove hostfilter") + cmd.out.Warning("Unable to determine member to remove hostfilter") return } memberRegexp := regexp.MustCompile(`member:\s(\w+)\s`) @@ -163,14 +163,14 @@ func (cmd *DNS) StartDNS(machine Machine, nameservers string) error { } else if util.IsWindows() { resolverReturn = cmd.configureWindowsResolver(machine) } - cmd.out.Success("DNS resolution is ready") + cmd.out.Info("DNS resolution is ready") return resolverReturn } // configureMacResolver configures DNS resolution and network routing func (cmd *DNS) configureMacResolver(machine Machine) error { - cmd.out.Details("Configuring DNS resolution for macOS") + cmd.out.Verbose("Configuring DNS resolution for macOS") bridgeIP := machine.GetBridgeIP() if err := util.Command("sudo", "mkdir", "-p", "/etc/resolver").Run(); err != nil { @@ -181,12 +181,12 @@ func (cmd *DNS) configureMacResolver(machine Machine) error { } if _, err := os.Stat("/usr/sbin/discoveryutil"); err == nil { // Put this here for people running OS X 10.10.0 to 10.10.3 (oy vey.) - cmd.out.Details("Restarting discoveryutil to flush DNS caches") + cmd.out.Verbose("Restarting discoveryutil to flush DNS caches") util.StreamCommand("sudo", "launchctl", "unload", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") util.StreamCommand("sudo", "launchctl", "load", "-w", "/System/Library/LaunchDaemons/com.apple.discoveryd.plist") } else { // Reset DNS cache. We have seen this suddenly make /etc/resolver/vm work. - cmd.out.Details("Restarting mDNSResponder to flush DNS caches") + cmd.out.Verbose("Restarting mDNSResponder to flush DNS caches") util.StreamCommand("sudo", "killall", "-HUP", "mDNSResponder") } return nil @@ -194,7 +194,7 @@ func (cmd *DNS) configureMacResolver(machine Machine) error { // configureLinuxResolver configures DNS resolution func (cmd *DNS) configureLinuxResolver() error { - cmd.out.Details("Configuring DNS resolution for linux") + cmd.out.Verbose("Configuring DNS resolution for linux") bridgeIP, err := util.GetBridgeIP() if err != nil { return err @@ -223,7 +223,7 @@ func (cmd *DNS) configureLinuxResolver() error { // configureWindowsResolver configures DNS resolution and network routing func (cmd *DNS) configureWindowsResolver(machine Machine) error { // TODO: Figure out Windows resolver configuration - cmd.out.Details("TODO: Configuring DNS resolution for windows") + cmd.out.Verbose("TODO: Configuring DNS resolution for windows") return nil } diff --git a/commands/doctor.go b/commands/doctor.go index 6ae0e6b..a7adb8b 100644 --- a/commands/doctor.go +++ b/commands/doctor.go @@ -35,23 +35,23 @@ func (cmd *Doctor) Run(c *cli.Context) error { // 0. Ensure all of rig's dependencies are available in the PATH. cmd.out.Spin("Checking Docker installation...") if err := exec.Command("docker", "-h").Start(); err == nil { - cmd.out.Success("Docker is installed.") + cmd.out.Info("Docker is installed.") } else { - cmd.out.Oops("Docker (docker) is not installed.") + cmd.out.Error("Docker (docker) is not installed.") } if !util.SupportsNativeDocker() { cmd.out.Spin("Checking Docker Machine installation...") if err := exec.Command("docker-machine", "-h").Start(); err == nil { - cmd.out.Success("Docker Machine is installed.") + cmd.out.Info("Docker Machine is installed.") } else { - cmd.out.Oops("Docker Machine (docker-machine) is not installed.") + cmd.out.Error("Docker Machine (docker-machine) is not installed.") } } cmd.out.Spin("Checking Docker Compose installation...") if err := exec.Command("docker-compose", "-h").Start(); err == nil { - cmd.out.Success("Docker Compose is installed.") + cmd.out.Info("Docker Compose is installed.") } else { - cmd.out.Oops("Docker Compose (docker-compose) is not installed.") + cmd.out.Error("Docker Compose (docker-compose) is not installed.") } // 1. Ensure the configured docker-machine matches the set environment. @@ -59,24 +59,24 @@ func (cmd *Doctor) Run(c *cli.Context) error { cmd.out.Spin("Checking Docker Machine configuration...") if cmd.machine.Exists() { if _, isset := os.LookupEnv("DOCKER_MACHINE_NAME"); !isset { - cmd.out.Oops("Docker configuration is not set. Please run 'eval \"$(rig config)\"'.") + cmd.out.Error("Docker configuration is not set. Please run 'eval \"$(rig config)\"'.") return cmd.Error("Could not complete.", "DOCTOR-FATAL", 1) } else if cmd.machine.Name != os.Getenv("DOCKER_MACHINE_NAME") { - cmd.out.Oops(fmt.Sprintf("Your environment configuration specifies a different machine. Please re-run as 'rig --name=\"%s\" doctor'.", cmd.machine.Name)) + cmd.out.Error(fmt.Sprintf("Your environment configuration specifies a different machine. Please re-run as 'rig --name=\"%s\" doctor'.", cmd.machine.Name)) return cmd.Error("Could not complete.", "DOCTOR-FATAL", 1) } else { - cmd.out.Success(fmt.Sprintf("Docker Machine (%s) name matches your environment configuration.", cmd.machine.Name)) + cmd.out.Info(fmt.Sprintf("Docker Machine (%s) name matches your environment configuration.", cmd.machine.Name)) } if output, err := exec.Command("docker-machine", "url", cmd.machine.Name).Output(); err == nil { hostURL := strings.TrimSpace(string(output)) if hostURL != os.Getenv("DOCKER_HOST") { - cmd.out.Oops(fmt.Sprintf("Docker Host configuration should be '%s' but got '%s'. Please re-run 'eval \"$(rig config)\"'.", os.Getenv("DOCKER_HOST"), hostURL)) + cmd.out.Error(fmt.Sprintf("Docker Host configuration should be '%s' but got '%s'. Please re-run 'eval \"$(rig config)\"'.", os.Getenv("DOCKER_HOST"), hostURL)) return cmd.Error("Could not complete.", "DOCTOR-FATAL", 1) } - cmd.out.Success(fmt.Sprintf("Docker Machine (%s) URL (%s) matches your environment configuration.", cmd.machine.Name, hostURL)) + cmd.out.Info(fmt.Sprintf("Docker Machine (%s) URL (%s) matches your environment configuration.", cmd.machine.Name, hostURL)) } } else { - cmd.out.Oops(fmt.Sprintf("No machine named '%s' exists. Did you run 'rig start --name=\"%s\"'?", cmd.machine.Name, cmd.machine.Name)) + cmd.out.Error(fmt.Sprintf("No machine named '%s' exists. Did you run 'rig start --name=\"%s\"'?", cmd.machine.Name, cmd.machine.Name)) return cmd.Error("Could not complete.", "DOCTOR-FATAL", 1) } } @@ -85,16 +85,16 @@ func (cmd *Doctor) Run(c *cli.Context) error { if !util.SupportsNativeDocker() { cmd.out.Spin("Checking Docker Machine is operational...") if !cmd.machine.IsRunning() { - cmd.out.Oops(fmt.Sprintf("Docker Machine '%s' is not running. You may need to run 'rig start'.", cmd.machine.Name)) + cmd.out.Error(fmt.Sprintf("Docker Machine '%s' is not running. You may need to run 'rig start'.", cmd.machine.Name)) return cmd.Error(fmt.Sprintf("Machine '%s' is not running. ", cmd.machine.Name), "DOCTOR-FATAL", 1) } - cmd.out.Success(fmt.Sprintf("Docker Machine (%s) is running", cmd.machine.Name)) + cmd.out.Info(fmt.Sprintf("Docker Machine (%s) is running", cmd.machine.Name)) } else { if err := util.Command("docker", "version").Run(); err != nil { - cmd.out.Oops("Docker is not running. You may need to run 'systemctl start docker'") + cmd.out.Error("Docker is not running. You may need to run 'systemctl start docker'") return cmd.Error("Docker is not running.", "DOCTOR-FATAL", 1) } - cmd.out.Success("Docker is running") + cmd.out.Info("Docker is running") } // 2. Check Docker API Version compatibility @@ -115,13 +115,13 @@ func (cmd *Doctor) Run(c *cli.Context) error { apiConstraint, _ := version.NewConstraint(constraintString) if err != nil { - cmd.out.Oops(fmt.Sprintln("Could not determine Docker Machine Docker versions: ", err)) + cmd.out.Error(fmt.Sprintln("Could not determine Docker Machine Docker versions: ", err)) } else if clientAPIVersion.Equal(serverAPIVersion) { - cmd.out.Success(fmt.Sprintf("Docker Client (%s) and Server (%s) have equal API Versions", clientAPIVersion, serverAPIVersion)) + cmd.out.Info(fmt.Sprintf("Docker Client (%s) and Server (%s) have equal API Versions", clientAPIVersion, serverAPIVersion)) } else if apiConstraint.Check(clientAPIVersion) { - cmd.out.Success(fmt.Sprintf("Docker Client (%s) has Server compatible API version (%s). Server current (%s), Server min compat (%s)", clientAPIVersion, constraintString, serverAPIVersion, serverMinAPIVersion)) + cmd.out.Info(fmt.Sprintf("Docker Client (%s) has Server compatible API version (%s). Server current (%s), Server min compat (%s)", clientAPIVersion, constraintString, serverAPIVersion, serverMinAPIVersion)) } else { - cmd.out.Oops(fmt.Sprintf("Docker Client (%s) is incompatible with Server. Server current (%s), Server min compat (%s). Use `rig upgrade` to fix this.", clientAPIVersion, serverAPIVersion, serverMinAPIVersion)) + cmd.out.Error(fmt.Sprintf("Docker Client (%s) is incompatible with Server. Server current (%s), Server min compat (%s). Use `rig upgrade` to fix this.", clientAPIVersion, serverAPIVersion, serverMinAPIVersion)) } // 3. Pull down the data from DNSDock. This will confirm we can resolve names as well @@ -133,25 +133,25 @@ func (cmd *Doctor) Run(c *cli.Context) error { for _, record := range records { if record["Name"] == "dnsdock" { resolved = true - cmd.out.Success(fmt.Sprintf("DNS and routing services are working. DNSDock resolves to %s", record["IPs"])) + cmd.out.Info(fmt.Sprintf("DNS and routing services are working. DNSDock resolves to %s", record["IPs"])) break } } if !resolved { - cmd.out.Oops("Unable to verify DNS services are working.") + cmd.out.Error("Unable to verify DNS services are working.") } } else { - cmd.out.Oops(fmt.Sprintf("Unable to verify DNS services and routing are working: %s", err.Error())) + cmd.out.Error(fmt.Sprintf("Unable to verify DNS services and routing are working: %s", err.Error())) } // 4. Ensure that docker-machine-nfs script is available for our NFS mounts (Mac ONLY) if util.IsMac() { cmd.out.Spin("Checking NFS configuration...") if err := exec.Command("which", "docker-machine-nfs").Run(); err != nil { - cmd.out.Oops("Docker Machine NFS is not installed.") + cmd.out.Error("Docker Machine NFS is not installed.") } else { - cmd.out.Success("Docker Machine NFS is installed.") + cmd.out.Info("Docker Machine NFS is installed.") } } @@ -163,17 +163,17 @@ func (cmd *Doctor) Run(c *cli.Context) error { dataUsage := strings.TrimSpace(string(output)) if i, e := strconv.Atoi(dataUsage); e == nil { if i >= 85 && i < 95 { - cmd.out.Warn(fmt.Sprintf("Data volume (/data) is %d%% used. Please free up space soon.", i)) + cmd.out.Warning(fmt.Sprintf("Data volume (/data) is %d%% used. Please free up space soon.", i)) } else if i >= 95 { - cmd.out.Oops(fmt.Sprintf("Data volume (/data) is %d%% used. Please free up space. Try 'docker system prune' or removing old projects / databases from /data.", i)) + cmd.out.Error(fmt.Sprintf("Data volume (/data) is %d%% used. Please free up space. Try 'docker system prune' or removing old projects / databases from /data.", i)) } else { - cmd.out.Success(fmt.Sprintf("Data volume (/data) is %d%% used.", i)) + cmd.out.Info(fmt.Sprintf("Data volume (/data) is %d%% used.", i)) } } else { - cmd.out.Warn(fmt.Sprintf("Unable to determine usage level of /data volume. Failed to parse '%s'", dataUsage)) + cmd.out.Warning(fmt.Sprintf("Unable to determine usage level of /data volume. Failed to parse '%s'", dataUsage)) } } else { - cmd.out.Warn(fmt.Sprintf("Unable to determine usage level of /data volume. Failed to execute 'df': %v", err)) + cmd.out.Warning(fmt.Sprintf("Unable to determine usage level of /data volume. Failed to execute 'df': %v", err)) } } @@ -185,17 +185,17 @@ func (cmd *Doctor) Run(c *cli.Context) error { userUsage := strings.TrimSpace(string(output)) if i, e := strconv.Atoi(userUsage); e == nil { if i >= 85 && i < 95 { - cmd.out.Warn(fmt.Sprintf("Root drive (/Users) is %d%% used. Please free up space soon.", i)) + cmd.out.Warning(fmt.Sprintf("Root drive (/Users) is %d%% used. Please free up space soon.", i)) } else if i >= 95 { - cmd.out.Oops(fmt.Sprintf("Root drive (/Users) is %d%% used. Please free up space.", i)) + cmd.out.Error(fmt.Sprintf("Root drive (/Users) is %d%% used. Please free up space.", i)) } else { - cmd.out.Success(fmt.Sprintf("Root drive (/Users) is %d%% used.", i)) + cmd.out.Info(fmt.Sprintf("Root drive (/Users) is %d%% used.", i)) } } else { - cmd.out.Warn(fmt.Sprintf("Unable to determine usage level of root drive (/Users). Failed to parse '%s'", userUsage)) + cmd.out.Warning(fmt.Sprintf("Unable to determine usage level of root drive (/Users). Failed to parse '%s'", userUsage)) } } else { - cmd.out.Warn(fmt.Sprintf("Unable to determine usage level of root drive (/Users). Failed to execute 'df': %v", err)) + cmd.out.Warning(fmt.Sprintf("Unable to determine usage level of root drive (/Users). Failed to execute 'df': %v", err)) } } diff --git a/commands/kill.go b/commands/kill.go index 95f50f2..dfe5f56 100644 --- a/commands/kill.go +++ b/commands/kill.go @@ -40,7 +40,7 @@ func (cmd *Kill) Run(c *cli.Context) error { return err } - cmd.out.Info.Printf("Killing machine '%s'", cmd.machine.Name) + cmd.out.Channel.Info.Printf("Killing machine '%s'", cmd.machine.Name) util.StreamCommand("docker-machine", "kill", cmd.machine.Name) // Ensure the underlying virtualization has stopped @@ -49,11 +49,11 @@ func (cmd *Kill) Run(c *cli.Context) error { case util.VirtualBox: util.StreamCommand("controlvm", cmd.machine.Name, "poweroff") case util.VMWare: - cmd.out.Warning.Println("Add vmrun suspend command.") + cmd.out.Warning("Add vmrun suspend command.") case util.Xhyve: - cmd.out.Warning.Println("Add equivalent xhyve kill command.") + cmd.out.Warning("Add equivalent xhyve kill command.") default: - cmd.out.Warning.Printf("Driver not recognized: %s\n", driver) + cmd.out.Channel.Warning.Printf("Driver not recognized: %s\n", driver) } return cmd.Success(fmt.Sprintf("Machine '%s' killed", cmd.machine.Name)) diff --git a/commands/machine.go b/commands/machine.go index 9d131aa..19579ef 100644 --- a/commands/machine.go +++ b/commands/machine.go @@ -22,7 +22,7 @@ type Machine struct { // Create will generate a new Docker Machine configured according to user specification func (m *Machine) Create(driver string, cpuCount string, memSize string, diskSize string) error { - m.out.Info.Printf("Creating a %s machine named '%s' with CPU(%s) MEM(%s) DISK(%s)...", driver, m.Name, cpuCount, memSize, diskSize) + m.out.Channel.Info.Printf("Creating a %s machine named '%s' with CPU(%s) MEM(%s) DISK(%s)...", driver, m.Name, cpuCount, memSize, diskSize) boot2dockerURL := "https://github.com/boot2docker/boot2docker/releases/download/v" + util.GetRawCurrentDockerVersion() + "/boot2docker.iso" @@ -72,7 +72,7 @@ func (m *Machine) Create(driver string, cpuCount string, memSize string, diskSiz return fmt.Errorf("error creating machine '%s': %s", m.Name, err) } - m.out.Info.Printf("Created docker-machine named '%s'...", m.Name) + m.out.Channel.Info.Printf("Created docker-machine named '%s'...", m.Name) return nil } @@ -94,7 +94,7 @@ func (m Machine) CheckXhyveRequirements() error { // Start boots the Docker Machine func (m Machine) Start() error { if !m.IsRunning() { - m.out.Verbose.Printf("The machine '%s' is not running, starting...", m.Name) + m.out.Channel.Verbose.Printf("The machine '%s' is not running, starting...", m.Name) if err := util.StreamCommand("docker-machine", "start", m.Name); err != nil { return fmt.Errorf("error starting machine '%s': %s", m.Name, err) @@ -127,10 +127,10 @@ func (m Machine) WaitForDev() error { for i := 1; i <= maxTries; i++ { m.SetEnv() if err := util.Command("docker", "ps").Run(); err == nil { - m.out.Verbose.Printf("Machine '%s' has started", m.Name) + m.out.Channel.Verbose.Printf("Machine '%s' has started", m.Name) return nil } - m.out.Warning.Printf("Docker daemon not running! Trying again in %d seconds. Try %d of %d. \n", sleepSecs, i, maxTries) + m.out.Channel.Warning.Printf("Docker daemon not running! Trying again in %d seconds. Try %d of %d. \n", sleepSecs, i, maxTries) time.Sleep(time.Duration(sleepSecs) * time.Second) } @@ -183,7 +183,7 @@ func (m *Machine) GetData() *simplejson.Json { if inspect, inspectErr := util.Command("docker-machine", "inspect", m.Name).Output(); inspectErr == nil { if js, jsonErr := simplejson.NewJson(inspect); jsonErr != nil { - m.out.Error.Fatalf("Failed to parse '%s' JSON: %s", m.Name, jsonErr) + m.out.Channel.Error.Fatalf("Failed to parse '%s' JSON: %s", m.Name, jsonErr) } else { m.inspectData = js return m.inspectData @@ -273,7 +273,7 @@ func (m Machine) GetSysctl(setting string) (string, error) { // SetSysctl sets the sysctl setting on the Docker Machine func (m Machine) SetSysctl(key string, val string) error { cmd := fmt.Sprintf("sudo sysctl -w %s=%s", key, val) - m.out.Verbose.Printf("Modifying Docker Machine kernel settings: %s", cmd) + m.out.Channel.Verbose.Printf("Modifying Docker Machine kernel settings: %s", cmd) _, err := util.Command("docker-machine", "ssh", m.Name, cmd).CombinedOutput() return err } diff --git a/commands/project.go b/commands/project.go index ab97048..8e72874 100644 --- a/commands/project.go +++ b/commands/project.go @@ -78,14 +78,14 @@ func (cmd *Project) GetScriptsAsSubcommands(otherSubcommands []cli.Command) []cl // Run executes the specified `rig project` script func (cmd *Project) Run(c *cli.Context) error { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Verbose(fmt.Sprintf("Loaded project configuration from %s", cmd.Config.Path)) if cmd.Config.Scripts == nil { - cmd.out.Error.Fatal("There are no scripts discovered in: %s", cmd.Config.File) + cmd.out.Channel.Error.Fatal("There are no scripts discovered in: %s", cmd.Config.File) } key := strings.TrimPrefix(c.Command.Name, "run:") if script, ok := cmd.Config.Scripts[key]; ok { - cmd.out.Verbose.Printf("Initializing project script '%s': %s", key, script.Description) + cmd.out.Verbose(fmt.Sprintf("Initializing project script '%s': %s", key, script.Description)) cmd.addCommandPath() dir := filepath.Dir(cmd.Config.Path) @@ -94,9 +94,9 @@ func (cmd *Project) Run(c *cli.Context) error { shellCmd := cmd.GetCommand(scriptCommands) shellCmd.Dir = dir - cmd.out.Verbose.Printf("Script execution - Working Directory: %s", dir) + cmd.out.Verbose(fmt.Sprintf("Script execution - Working Directory: %s", dir)) - cmd.out.Verbose.Printf("Executing '%s' as '%s'", key, scriptCommands) + cmd.out.Verbose(fmt.Sprintf("Executing '%s' as '%s'", key, scriptCommands)) if exitCode := util.PassthruCommand(shellCmd); exitCode != 0 { return cmd.Error(fmt.Sprintf("Error running project script '%s'", key), "COMMAND-ERROR", exitCode) } @@ -131,7 +131,7 @@ func (cmd *Project) GetCommandSeparator() string { func (cmd *Project) addCommandPath() { binDir := cmd.Config.Bin if binDir != "" { - cmd.out.Verbose.Printf("Script execution - Adding to $PATH: %s", binDir) + cmd.out.Verbose(fmt.Sprintf("Script execution - Adding to $PATH: %s", binDir)) path := os.Getenv("PATH") os.Setenv("PATH", fmt.Sprintf("%s%c%s", binDir, os.PathListSeparator, path)) } diff --git a/commands/project_config.go b/commands/project_config.go index fb1d9ab..71d4eba 100644 --- a/commands/project_config.go +++ b/commands/project_config.go @@ -89,16 +89,16 @@ func NewProjectConfigFromFile(filename string) (*ProjectConfig, error) { yamlFile, err := ioutil.ReadFile(config.File) if err != nil { - logger.Verbose.Printf("No project configuration file could be read at: %s", config.File) + logger.Channel.Verbose.Printf("No project configuration file could be read at: %s", config.File) return config, err } if err := yaml.Unmarshal(yamlFile, config); err != nil { - logger.Error.Fatalf("Error parsing YAML config: %v", err) + logger.Channel.Error.Fatalf("Error parsing YAML config: %v", err) } if err := config.ValidateConfigVersion(); err != nil { - logger.Error.Fatalf("Error in %s: %s", filename, err) + logger.Channel.Error.Fatalf("Error in %s: %s", filename, err) } if len(config.Bin) == 0 { @@ -147,21 +147,21 @@ func (c *ProjectConfig) ValidateProjectScripts(subcommands []cli.Command) { // Check for an empty script if script == nil { - logger.Error.Fatalf("Project script '%s' has no configuration", id) + logger.Channel.Error.Fatalf("Project script '%s' has no configuration", id) } // Check for scripts with conflicting aliases with existing subcommands or subcommand aliases for _, subcommand := range subcommands { if id == subcommand.Name { - logger.Error.Fatalf("Project script name '%s' conflicts with command name '%s'. Please choose a different script name", id, subcommand.Name) + logger.Channel.Error.Fatalf("Project script name '%s' conflicts with command name '%s'. Please choose a different script name", id, subcommand.Name) } else if script.Alias == subcommand.Name { - logger.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command name '%s'. Please choose a different script alias", script.Alias, id, subcommand.Name) + logger.Channel.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command name '%s'. Please choose a different script alias", script.Alias, id, subcommand.Name) } else if subcommand.Aliases != nil { for _, alias := range subcommand.Aliases { if id == alias { - logger.Error.Fatalf("Project script name '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script name", id, alias, subcommand.Name) + logger.Channel.Error.Fatalf("Project script name '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script name", id, alias, subcommand.Name) } else if script.Alias == alias { - logger.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script alias", script.Alias, id, alias, subcommand.Name) + logger.Channel.Error.Fatalf("Project script alias '%s' on script '%s' conflicts with command alias '%s' on command '%s'. Please choose a different script alias", script.Alias, id, alias, subcommand.Name) } } } @@ -169,10 +169,10 @@ func (c *ProjectConfig) ValidateProjectScripts(subcommands []cli.Command) { // Check for scripts with no run commands if script.Run == nil || len(script.Run) == 0 { - logger.Error.Fatalf("Project script '%s' does not have any run commands.", id) + logger.Channel.Error.Fatalf("Project script '%s' does not have any run commands.", id) } else if len(script.Run) > 10 { // Check for scripts with more than 10 run commands - logger.Warning.Printf("Project script '%s' has more than 10 run items (%d). You should create a shell script to contain those.", id, len(script.Run)) + logger.Warning(fmt.Sprintf("Project script '%s' has more than 10 run items (%d). You should create a shell script to contain those.", id, len(script.Run))) } } } diff --git a/commands/project_create.go b/commands/project_create.go index 1e39160..f2d93a2 100644 --- a/commands/project_create.go +++ b/commands/project_create.go @@ -55,7 +55,7 @@ func (cmd *ProjectCreate) Create(ctx *cli.Context) error { } if cmd.machine.IsRunning() || util.SupportsNativeDocker() { - cmd.out.Details(fmt.Sprintf("Executing container %s%s", image, argsMessage)) + cmd.out.Error(fmt.Sprintf("Executing container %s%s", image, argsMessage)) if err := cmd.RunGenerator(ctx, cmd.machine, image); err != nil { return err } @@ -73,7 +73,7 @@ func (cmd *ProjectCreate) RunGenerator(ctx *cli.Context, machine Machine, image // The check for whether the image is older than 30 days is not currently used. _, seconds, err := util.ImageOlderThan(image, 86400*30) if err == nil { - cmd.out.Details(fmt.Sprintf("Local copy of the image '%s' was originally published %0.2f days ago.", image, seconds/86400)) + cmd.out.Verbose(fmt.Sprintf("Local copy of the image '%s' was originally published %0.2f days ago.", image, seconds/86400)) } // If there was an error it implies no previous instance of the image is available @@ -81,12 +81,12 @@ func (cmd *ProjectCreate) RunGenerator(ctx *cli.Context, machine Machine, image if err == nil && !ctx.Bool("no-update") { cmd.out.Spin(fmt.Sprintf("Attempting to update project generator docker image: %s", image)) if e := util.StreamCommand("docker", "pull", image); e != nil { - cmd.out.Oops(fmt.Sprintf("Project generator docker image failed to update. Using local cache if available: %s", image)) + cmd.out.Error(fmt.Sprintf("Project generator docker image failed to update. Using local cache if available: %s", image)) } else { - cmd.out.Success(fmt.Sprintf("Project generator docker image is up-to-date: %s", image)) + cmd.out.Warning(fmt.Sprintf("Project generator docker image is up-to-date: %s", image)) } } else if err == nil && ctx.Bool("no-update") { - cmd.out.Verbose.Printf("Automatic generator image update suppressed by --no-update option.") + cmd.out.Verbose("Automatic generator image update suppressed by --no-update option.") } cwd, err := os.Getwd() diff --git a/commands/project_sync.go b/commands/project_sync.go index 8fa7f67..8d49e85 100644 --- a/commands/project_sync.go +++ b/commands/project_sync.go @@ -94,7 +94,7 @@ func (cmd *ProjectSync) Commands() []cli.Command { func (cmd *ProjectSync) RunStart(ctx *cli.Context) error { cmd.Config = NewProjectConfig() if cmd.Config.NotEmpty() { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Verbose(fmt.Sprintf("Loaded project configuration from %s", cmd.Config.Path)) } // Determine the working directory for CWD-sensitive operations. @@ -109,10 +109,10 @@ func (cmd *ProjectSync) RunStart(ctx *cli.Context) error { switch platform := runtime.GOOS; platform { case "linux": - cmd.out.Verbose.Printf("Setting up local volume: %s", volumeName) + cmd.out.Verbose(fmt.Sprintf("Setting up local volume: %s", volumeName)) return cmd.SetupBindVolume(volumeName, workingDir) default: - cmd.out.Verbose.Printf("Starting sync with volume: %s", volumeName) + cmd.out.Verbose(fmt.Sprintf("Starting sync with volume: %s", volumeName)) return cmd.StartUnisonSync(ctx, volumeName, cmd.Config, workingDir) } } @@ -124,15 +124,15 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con cmd.Error(fmt.Sprintf("Error configuring file watches on Docker Machine: %v", err), "INOTIFY-WATCH-FAILURE", 12) } - cmd.out.Info.Printf("Starting sync volume: %s", volumeName) + cmd.out.Channel.Info.Printf("Starting sync volume: %s", volumeName) if err := util.Command("docker", "volume", "create", volumeName).Run(); err != nil { return cmd.Error(fmt.Sprintf("Failed to create sync volume: %s", volumeName), "VOLUME-CREATE-FAILED", 13) } - cmd.out.Info.Println("Starting Unison container") + cmd.out.Info("Starting Unison container") unisonMinorVersion := cmd.GetUnisonMinorVersion() - cmd.out.Verbose.Printf("Local Unison version for compatibilty: %s", unisonMinorVersion) + cmd.out.Channel.Verbose.Printf("Local Unison version for compatibilty: %s", unisonMinorVersion) util.Command("docker", "container", "stop", volumeName).Run() containerArgs := []string{ "container", "run", "--detach", "--rm", @@ -152,7 +152,7 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con return cmd.Error(err.Error(), "SYNC-INIT-FAILED", 13) } - cmd.out.Info.Println("Initializing sync") + cmd.out.Info("Initializing sync") // Determine the location of the local Unison log file. var logFile = fmt.Sprintf("%s.log", volumeName) @@ -160,7 +160,7 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con // up and running. If the logfile does not exist, do not complain. If the // filesystem cannot delete the file when it exists, it will lead to errors. if err := util.RemoveFile(logFile, workingDir); err != nil { - cmd.out.Verbose.Printf("Could not remove Unison log file: %s: %s", logFile, err.Error()) + cmd.out.Channel.Verbose.Printf("Could not remove Unison log file: %s: %s", logFile, err.Error()) } // Initiate local Unison process. @@ -179,10 +179,10 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con unisonArgs = append(unisonArgs, "-ignore", ignore) } } - cmd.out.Verbose.Printf("Unison Args: %s", strings.Join(unisonArgs[:], " ")) + cmd.out.Channel.Verbose.Printf("Unison Args: %s", strings.Join(unisonArgs[:], " ")) command := exec.Command("unison", unisonArgs...) command.Dir = workingDir - cmd.out.Verbose.Printf("Sync execution - Working Directory: %s", workingDir) + cmd.out.Channel.Verbose.Printf("Sync execution - Working Directory: %s", workingDir) if err = util.Convert(command).Start(); err != nil { return cmd.Error(fmt.Sprintf("Failure starting local Unison process: %v", err), "UNISON-START-FAILED", 13) } @@ -196,7 +196,7 @@ func (cmd *ProjectSync) StartUnisonSync(ctx *cli.Context, volumeName string, con // SetupBindVolume will create minimal Docker Volumes for systems that have native container/volume support func (cmd *ProjectSync) SetupBindVolume(volumeName string, workingDir string) error { - cmd.out.Info.Printf("Starting local bind volume: %s", volumeName) + cmd.out.Channel.Info.Printf("Starting local bind volume: %s", volumeName) util.Command("docker", "volume", "rm", volumeName).Run() volumeArgs := []string{ @@ -221,7 +221,7 @@ func (cmd *ProjectSync) RunStop(ctx *cli.Context) error { } cmd.Config = NewProjectConfig() if cmd.Config.NotEmpty() { - cmd.out.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) + cmd.out.Channel.Verbose.Printf("Loaded project configuration from %s", cmd.Config.Path) } // Determine the working directory for CWD-sensitive operations. @@ -231,8 +231,8 @@ func (cmd *ProjectSync) RunStop(ctx *cli.Context) error { } volumeName := cmd.GetVolumeName(cmd.Config, workingDir) - cmd.out.Verbose.Printf("Stopping sync with volume: %s", volumeName) - cmd.out.Info.Println("Stopping Unison container") + cmd.out.Channel.Verbose.Printf("Stopping sync with volume: %s", volumeName) + cmd.out.Info("Stopping Unison container") if err := util.Command("docker", "container", "stop", volumeName).Run(); err != nil { return cmd.Error(err.Error(), "SYNC-CONTAINER-FAILURE", 13) } @@ -268,7 +268,7 @@ func (cmd *ProjectSync) LoadComposeFile() (*ComposeFile, error) { if err == nil { var config ComposeFile if e := yaml.Unmarshal(yamlFile, &config); e != nil { - cmd.out.Error.Fatalf("YAML Parsing Error: %s", e) + cmd.out.Channel.Error.Fatalf("YAML Parsing Error: %s", e) } return &config, nil } @@ -282,7 +282,7 @@ func (cmd *ProjectSync) LoadComposeFile() (*ComposeFile, error) { // when compiled without -cgo this executable will not use the native mac dns resolution // which is how we have configured dnsdock to provide names for containers. func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeconds int) (string, error) { - cmd.out.Info.Println("Waiting for container to start") + cmd.out.Info("Waiting for container to start") var timeoutLoopSleep = time.Duration(100) * time.Millisecond // * 10 here because we loop once every 100 ms and we want to get to seconds @@ -294,7 +294,7 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco } ip := strings.Trim(string(output), "\n") - cmd.out.Verbose.Printf("Checking for Unison network connection on %s %d", ip, unisonPort) + cmd.out.Channel.Verbose.Printf("Checking for Unison network connection on %s %d", ip, unisonPort) for i := 1; i <= timeoutLoops; i++ { conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", ip, unisonPort)) if err == nil { @@ -302,7 +302,7 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco return ip, nil } - cmd.out.Info.Printf("Error: %v", err) + cmd.out.Channel.Info.Printf("Error: %v", err) time.Sleep(timeoutLoopSleep) } @@ -312,20 +312,20 @@ func (cmd *ProjectSync) WaitForUnisonContainer(containerName string, timeoutSeco // WaitForSyncInit will wait for the local unison process to finish initializing // when the log file exists and has stopped growing in size func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeoutSeconds int, syncWaitSeconds int) error { - cmd.out.Info.Print("Waiting for initial sync detection") + cmd.out.Info("Waiting for initial sync detection") // The use of os.Stat below is not subject to our working directory configuration, // so to ensure we can stat the log file we convert it to an absolute path. if logFilePath, err := util.AbsJoin(workingDir, logFile); err != nil { - cmd.out.Info.Print(err.Error()) + cmd.out.Info(err.Error()) } else { // Create a temp file to cause a sync action var tempFile = ".rig-check-sync-start" if err := util.TouchFile(tempFile, workingDir); err != nil { - cmd.out.Error.Fatal("Could not create file used to detect initial sync: %s", err.Error()) + cmd.out.Channel.Error.Fatal("Could not create file used to detect initial sync: %s", err.Error()) } - cmd.out.Verbose.Printf("Creating temporary file so we can watch for Unison initialization: %s", tempFile) + cmd.out.Verbose(fmt.Sprintf("Creating temporary file so we can watch for Unison initialization: %s", tempFile)) var timeoutLoopSleep = time.Duration(100) * time.Millisecond // * 10 here because we loop once every 100 ms and we want to get to seconds @@ -340,7 +340,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo if err == nil { os.Stdout.WriteString(" initial sync detected\n") - cmd.out.Info.Print("Waiting for initial sync to finish") + cmd.out.Info("Waiting for initial sync to finish") // Initialize at -2 to force at least one loop var lastSize = int64(-2) for lastSize != statInfo.Size() { @@ -348,7 +348,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo time.Sleep(statSleep) lastSize = statInfo.Size() if statInfo, err = os.Stat(logFilePath); err != nil { - cmd.out.Info.Print(err.Error()) + cmd.out.Info(err.Error()) lastSize = -1 } } @@ -356,7 +356,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo // Remove the temp file, waiting until after sync so spurious // failure message doesn't show in log if err := util.RemoveFile(tempFile, workingDir); err != nil { - cmd.out.Warning.Printf("Could not remove the temporary file: %s: %s", tempFile, err.Error()) + cmd.out.Warning(fmt.Sprintf("Could not remove the temporary file: %s: %s", tempFile, err.Error())) } return nil } @@ -368,7 +368,7 @@ func (cmd *ProjectSync) WaitForSyncInit(logFile string, workingDir string, timeo if err := util.RemoveFile(tempFile, workingDir); err != nil { // While the removal of the tempFile is not significant, if something // prevents removal there may be a bigger problem. - cmd.out.Warning.Printf("Could not remove the temporary file: %s", err.Error()) + cmd.out.Channel.Warning.Printf("Could not remove the temporary file: %s", err.Error()) } } diff --git a/commands/prune.go b/commands/prune.go index 106e276..adca0da 100644 --- a/commands/prune.go +++ b/commands/prune.go @@ -26,7 +26,7 @@ func (cmd *Prune) Commands() []cli.Command { // Run executes the `rig prune` command func (cmd *Prune) Run(c *cli.Context) error { - cmd.out.Info.Println("Cleaning up Docker images and containers...") + cmd.out.Info("Cleaning up Docker images and containers...") if exitCode := util.PassthruCommand(exec.Command("docker", "system", "prune", "--all", "--volumes")); exitCode != 0 { return cmd.Error("Error pruning Docker resources.", "COMMAND-ERROR", 13) } diff --git a/commands/remove.go b/commands/remove.go index b3e6ece..dc1868f 100644 --- a/commands/remove.go +++ b/commands/remove.go @@ -40,11 +40,11 @@ func (cmd *Remove) Run(c *cli.Context) error { return cmd.Error(fmt.Sprintf("No machine named '%s' exists.", cmd.machine.Name), "MACHINE-NOT-FOUND", 12) } - cmd.out.Status(fmt.Sprintf("Removing '%s'", cmd.machine.Name)) + cmd.out.Info(fmt.Sprintf("Removing '%s'", cmd.machine.Name)) force := c.Bool("force") if !force { - cmd.out.Warn("!!!!! This operation is destructive. You may lose important data. !!!!!!!") - cmd.out.Warn("Run 'rig data-backup' if you want to save your /data volume.") + cmd.out.Warning("!!!!! This operation is destructive. You may lose important data. !!!!!!!") + cmd.out.Warning("Run 'rig data-backup' if you want to save your /data volume.") if !util.AskYesNo("Are you sure you want to remove '" + cmd.machine.Name + "'") { return cmd.Success("Remove was aborted") @@ -59,10 +59,10 @@ func (cmd *Remove) Run(c *cli.Context) error { cmd.out.Spin("Removing the docker Virtual Machine") if err := cmd.machine.Remove(); err != nil { - cmd.out.Oops("Failed to remove the docker Virtual Machine") + cmd.out.Error("Failed to remove the docker Virtual Machine") return cmd.Error(err.Error(), "MACHINE-REMOVE-FAILED", 12) } - cmd.out.Success("Failed to remove the docker Virtual Machine") + cmd.out.Info("Failed to remove the docker Virtual Machine") return cmd.Success(fmt.Sprintf("Machine '%s' removed", cmd.machine.Name)) } diff --git a/commands/restart.go b/commands/restart.go index a3c6d51..325502d 100644 --- a/commands/restart.go +++ b/commands/restart.go @@ -29,9 +29,9 @@ func (cmd *Restart) Commands() []cli.Command { func (cmd *Restart) Run(c *cli.Context) error { if util.SupportsNativeDocker() || cmd.machine.Exists() { if util.SupportsNativeDocker() { - cmd.out.Info.Println("Restarting Outrigger services") + cmd.out.Info("Restarting Outrigger services") } else { - cmd.out.Info.Printf("Restarting Outrigger machine '%s' and services", cmd.machine.Name) + cmd.out.Channel.Info.Printf("Restarting Outrigger machine '%s' and services", cmd.machine.Name) } stop := Stop{cmd.BaseCommand} diff --git a/commands/start.go b/commands/start.go index 9c0a711..e2ebe69 100644 --- a/commands/start.go +++ b/commands/start.go @@ -56,23 +56,23 @@ func (cmd *Start) Commands() []cli.Command { // Run executes the `rig start` command func (cmd *Start) Run(c *cli.Context) error { if util.SupportsNativeDocker() { - cmd.out.Info.Println("Linux users should use Docker natively for best performance.") - cmd.out.Info.Println("Please ensure your local Docker setup is compatible with Outrigger.") - cmd.out.Info.Println("See http://docs.outrigger.sh/getting-started/linux-installation/") + cmd.out.Info("Linux users should use Docker natively for best performance.") + cmd.out.Info("Please ensure your local Docker setup is compatible with Outrigger.") + cmd.out.Info("See http://docs.outrigger.sh/getting-started/linux-installation/") return cmd.StartMinimal(c.String("nameservers")) } cmd.out.Spin(fmt.Sprintf("Starting Docker & Docker Machine (%s)", cmd.machine.Name)) - cmd.out.Verbose.Println("If something goes wrong, run 'rig doctor'") + cmd.out.Verbose("If something goes wrong, run 'rig doctor'") - cmd.out.Verbose.Println("Pre-flight check...") + cmd.out.Verbose("Pre-flight check...") if err := util.Command("grep", "-qE", "'^\"?/Users/'", "/etc/exports").Run(); err == nil { - cmd.out.Oops("Docker could not be started") + cmd.out.Error("Docker could not be started") return cmd.Error("Vagrant NFS mount found. Please remove any non-Outrigger mounts that begin with /Users from your /etc/exports file", "NFS-MOUNT-CONFLICT", 12) } - cmd.out.Verbose.Println("Resetting Docker environment variables...") + cmd.out.Verbose("Resetting Docker environment variables...") cmd.machine.UnsetEnv() // Does the docker-machine exist @@ -86,14 +86,14 @@ func (cmd *Start) Run(c *cli.Context) error { } if err := cmd.machine.Start(); err != nil { - cmd.out.Oops("Docker could not be started") + cmd.out.Error("Docker could not be started") return cmd.Error(err.Error(), "MACHINE-START-FAILED", 12) } - cmd.out.Success(fmt.Sprintf("Docker Machine (%s) Created", cmd.machine.Name)) + cmd.out.Info(fmt.Sprintf("Docker Machine (%s) Created", cmd.machine.Name)) - cmd.out.Verbose.Println("Configuring the local Docker environment") + cmd.out.Verbose("Configuring the local Docker environment") cmd.machine.SetEnv() - cmd.out.Success("Docker Machine is ready") + cmd.out.Info("Docker Machine is ready") dns := DNS{cmd.BaseCommand} dns.StartDNS(cmd.machine, c.String("nameservers")) @@ -102,9 +102,9 @@ func (cmd *Start) Run(c *cli.Context) error { if util.IsMac() { cmd.out.Spin("Enabling NFS file sharing...") if nfsErr := util.StreamCommand("docker-machine-nfs", cmd.machine.Name); nfsErr != nil { - cmd.out.Warn(fmt.Sprintf("Error enabling NFS: %s", nfsErr)) + cmd.out.Warning(fmt.Sprintf("Error enabling NFS: %s", nfsErr)) } else { - cmd.out.Success("NFS is ready") + cmd.out.Info("NFS is ready") } } @@ -114,7 +114,7 @@ func (cmd *Start) Run(c *cli.Context) error { return cmd.Error(err.Error(), "MACHINE-START-FAILED", 12) } - cmd.out.Verbose.Println("Setting up persistent /data volume...") + cmd.out.Verbose("Setting up persistent /data volume...") dataMountSetup := `if [ ! -d /mnt/sda1/data ]; then echo '===> Creating /mnt/sda1/data directory'; sudo mkdir /mnt/sda1/data; @@ -132,21 +132,21 @@ func (cmd *Start) Run(c *cli.Context) error { if err := util.StreamCommand("docker-machine", "ssh", cmd.machine.Name, dataMountSetup); err != nil { return cmd.Error(err.Error(), "DATA-MOUNT-FAILED", 13) } - cmd.out.Success("/data filesystem is ready") + cmd.out.Info("/data filesystem is ready") // Route configuration needs to be finalized after NFS-triggered reboots. // This rebooting may change key details such as IP Address of the Dev machine. dns.ConfigureRoutes(cmd.machine) - cmd.out.Verbose.Println("Use docker-machine to interact with your virtual machine.") - cmd.out.Verbose.Printf("For example, to SSH into it: docker-machine ssh %s", cmd.machine.Name) + cmd.out.Verbose("Use docker-machine to interact with your virtual machine.") + cmd.out.Verbose(fmt.Sprintf("For example, to SSH into it: docker-machine ssh %s", cmd.machine.Name)) cmd.out.Spin("Launching Dashboard...") dash := Dashboard{cmd.BaseCommand} dash.LaunchDashboard(cmd.machine) - cmd.out.Success("Dashboard is ready") + cmd.out.Info("Dashboard is ready") - cmd.out.Info.Println("Run 'eval \"$(rig config)\"' to execute docker or docker-compose commands in your terminal.") + cmd.out.Info("Run 'eval \"$(rig config)\"' to execute docker or docker-compose commands in your terminal.") return cmd.Success("Outrigger is ready to use") } diff --git a/commands/stop.go b/commands/stop.go index 65d2a00..916147e 100644 --- a/commands/stop.go +++ b/commands/stop.go @@ -38,8 +38,8 @@ func (cmd *Stop) Run(c *cli.Context) error { // StopMinimal will stop "minimal" Outrigger operations, which refers to environments where // a virtual machine and networking are not required or managed by Outrigger. func (cmd *Stop) StopMinimal() error { - cmd.out.Verbose.Printf("Skipping Step: Linux does not have a docker-machine to stop.") - cmd.out.Verbose.Printf("Skipping Step: Outrigger does not manage Linux networking.") + cmd.out.Channel.Verbose.Printf("Skipping Step: Linux does not have a docker-machine to stop.") + cmd.out.Channel.Verbose.Printf("Skipping Step: Outrigger does not manage Linux networking.") dash := Dashboard{cmd.BaseCommand} dash.StopDashboard() @@ -56,7 +56,7 @@ func (cmd *Stop) StopOutrigger() error { if err := cmd.machine.Stop(); err != nil { return cmd.Error(err.Error(), "MACHINE-STOP-FAILED", 12) } - cmd.out.Success(fmt.Sprintf("Stopped machine '%s'", cmd.machine.Name)) + cmd.out.Info(fmt.Sprintf("Stopped machine '%s'", cmd.machine.Name)) cmd.out.Spin("Cleaning up local networking (may require your admin password)") if util.IsWindows() { @@ -67,8 +67,7 @@ func (cmd *Stop) StopOutrigger() error { util.Command("sudo", "route", "-n", "delete", "-net", "172.17.42.1").Run() } color.Unset() - cmd.out.Success("Networking cleanup completed") - cmd.out.NoSpin() + cmd.out.Info("Networking cleanup completed") return cmd.Success(fmt.Sprintf("Machine '%s' stopped", cmd.machine.Name)) } diff --git a/commands/upgrade.go b/commands/upgrade.go index 6f68476..21868e1 100644 --- a/commands/upgrade.go +++ b/commands/upgrade.go @@ -48,7 +48,7 @@ func (cmd *Upgrade) Run(c *cli.Context) error { cmd.out.Spin(fmt.Sprintf("Upgrading '%s'...", cmd.machine.Name)) if cmd.machine.GetData().Get("Driver").Get("Boot2DockerURL").MustString() == "" { - cmd.out.Oops(fmt.Sprintf("Machine %s not compatible with rig upgrade", cmd.machine.Name)) + cmd.out.Error(fmt.Sprintf("Machine %s not compatible with rig upgrade", cmd.machine.Name)) return cmd.Error(fmt.Sprintf("Machine '%s' was not created with a boot2docker URL. Run `docker-machine upgrade %s` directly", cmd.machine.Name, cmd.machine.Name), "MACHINE-CREATED-MANUALLY", 12) } @@ -62,7 +62,7 @@ func (cmd *Upgrade) Run(c *cli.Context) error { return cmd.Success(fmt.Sprintf("Machine '%s' has the same Docker version (%s) as your local Docker binary (%s). There is nothing to upgrade. If you wish to upgrade you'll need to install a newer version of the Docker binary before running the upgrade command.", cmd.machine.Name, machineDockerVersion, currentDockerVersion)) } - cmd.out.Info.Printf("Backing up to prepare for upgrade...") + cmd.out.Channel.Info.Printf("Backing up to prepare for upgrade...") backup := &DataBackup{cmd.BaseCommand} if err := backup.Run(c); err != nil { return err diff --git a/util/logger.go b/util/logger.go index 50da3fd..e7605f9 100644 --- a/util/logger.go +++ b/util/logger.go @@ -9,15 +9,21 @@ import ( spun "github.com/slok/gospinner" ) +// logger is the global logger data structure. Retrieve via Logger(). var logger *RigLogger -// RigLogger is the global logger object -type RigLogger struct { +// logChannels defines various log channels. This nests within the RigLogger to expose the loggers directly for +// advanced use cases. +type logChannels struct { Info *log.Logger Warning *log.Logger Error *log.Logger Verbose *log.Logger - Message *log.Logger +} + +// RigLogger is the global logger object +type RigLogger struct { + Channel logChannels Progress *RigSpinner IsVerbose bool Spinning bool @@ -38,11 +44,12 @@ func LoggerInit(verbose bool) { s, _ := spun.NewSpinner(spun.Dots) logger = &RigLogger{ - Info: log.New(os.Stdout, color.BlueString("[INFO] "), 0), - Warning: log.New(os.Stdout, color.YellowString("[WARN] "), 0), - Error: log.New(os.Stderr, color.RedString("[ERROR] "), 0), - Verbose: log.New(verboseWriter, "[VERBOSE] ", 0), - Message: log.New(os.Stdout, " - ", 0), + Channel: logChannels{ + Info: log.New(os.Stdout, color.BlueString("[INFO] "), 0), + Warning: log.New(os.Stdout, color.YellowString("[WARN] "), 0), + Error: log.New(os.Stderr, color.RedString("[ERROR] "), 0), + Verbose: log.New(verboseWriter, "[VERBOSE] ", 0), + }, IsVerbose: verbose, Progress: &RigSpinner{s}, Spinning: false, @@ -73,9 +80,9 @@ func (log *RigLogger) NoSpin() { } // Success indicates success behavior of the spinner-associated task. -func (log *RigLogger) Success(message string) { +func (log *RigLogger) Info(message string) { if log.IsVerbose || !log.Spinning { - log.Info.Println(message) + log.Channel.Info.Println(message) } else { log.Progress.Spins.SetMessage(message) log.Progress.Spins.Succeed() @@ -83,9 +90,9 @@ func (log *RigLogger) Success(message string) { } // Warn indicates a warning in the resolution of the spinner-associated task. -func (log *RigLogger) Warn(message string) { +func (log *RigLogger) Warning(message string) { if log.IsVerbose || !log.Spinning { - log.Warning.Println(message) + log.Channel.Warning.Println(message) } else { log.Progress.Spins.SetMessage(message) log.Progress.Spins.Warn() @@ -93,26 +100,22 @@ func (log *RigLogger) Warn(message string) { } // Error indicates an error in the spinner-associated task. -func (log *RigLogger) Oops(message string) { +func (log *RigLogger) Error(message string) { if log.IsVerbose || !log.Spinning { - log.Error.Println(message) + log.Channel.Error.Println(message) } else { log.Progress.Spins.SetMessage(message) log.Progress.Spins.Fail() } } -// Status allows output of an info log. -func (log *RigLogger) Status(message string) { - log.Info.Println(message) +// Details allows Verbose logging of more advanced activities/information. +// In practice, if the spinner can be in use verbose is a no-op. +func (log *RigLogger) Verbose(message string) { + log.Channel.Verbose.Println(message) } -// Note allows output of a simple message. +// Note allows output of an info log, bypassing the spinner if in use. func (log *RigLogger) Note(message string) { - log.Message.Println(message) + log.Channel.Info.Println(message) } - -// Details allows Verbose logging of more advanced activities/information. -func (log *RigLogger) Details(message string) { - log.Verbose.Println(message) -} \ No newline at end of file diff --git a/util/shell_exec.go b/util/shell_exec.go index ed021ce..6350dc3 100644 --- a/util/shell_exec.go +++ b/util/shell_exec.go @@ -115,7 +115,7 @@ func (x Executor) Start() error { // Log verbosely logs the command. func (x Executor) Log(tag string) { color.Set(color.FgMagenta) - Logger().Verbose.Printf("%s: %s", tag, x.ToString()) + Logger().Verbose(fmt.Sprintf("%s: %s", tag, x.ToString())) color.Unset() }