Skip to content

Commit

Permalink
cli: add monitor flag to deployment status
Browse files Browse the repository at this point in the history
Adding '-verbose' will print out the allocation information for the
deployment. This also changes the job run command so that it now blocks
until deployment is complete and adds timestamps to the output so that
it's more in line with the output of node drain.
  • Loading branch information
isabeldepapel committed May 26, 2021
1 parent e694000 commit f8c6672
Show file tree
Hide file tree
Showing 17 changed files with 585 additions and 24 deletions.
121 changes: 120 additions & 1 deletion command/deployment_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,16 @@ package command
import (
"errors"
"fmt"
"os"
"sort"
"strings"
"time"

"github.com/docker/docker/pkg/term"
"github.com/gosuri/uilive"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/posener/complete"
)

Expand Down Expand Up @@ -37,6 +42,9 @@ Status Options:
-json
Output the deployment in its JSON format.
-monitor
Enter monitor mode directly without modifying the deployment status.
-t
Format and display deployment using a Go template.
`
Expand All @@ -52,6 +60,7 @@ func (c *DeploymentStatusCommand) AutocompleteFlags() complete.Flags {
complete.Flags{
"-verbose": complete.PredictNothing,
"-json": complete.PredictNothing,
"-monitor": complete.PredictNothing,
"-t": complete.PredictAnything,
})
}
Expand All @@ -74,19 +83,26 @@ func (c *DeploymentStatusCommand) AutocompleteArgs() complete.Predictor {
func (c *DeploymentStatusCommand) Name() string { return "deployment status" }

func (c *DeploymentStatusCommand) Run(args []string) int {
var json, verbose bool
var json, verbose, monitor bool
var tmpl string

flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&json, "json", false, "")
flags.BoolVar(&monitor, "monitor", false, "")
flags.StringVar(&tmpl, "t", "", "")

if err := flags.Parse(args); err != nil {
return 1
}

// Check that json or tmpl isn't set with monitor
if monitor && (json || len(tmpl) > 0) {
c.Ui.Error("The monitor flag cannot be used with the '-json' or '-t' flags")
return 1
}

// Check that we got exactly one argument
args = flags.Args()
if l := len(args); l > 1 {
Expand Down Expand Up @@ -144,6 +160,19 @@ func (c *DeploymentStatusCommand) Run(args []string) int {
return 0
}

if monitor {
// Call just to get meta
_, meta, err := client.Deployments().Info(deploy.ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving deployment: %s", err))
}

c.Ui.Output(fmt.Sprintf("%s: Monitoring deployment %q",
formatTime(time.Now()), limit(deploy.ID, length)))
c.monitor(client, deploy.ID, meta.LastIndex, verbose)

return 0
}
c.Ui.Output(c.Colorize().Color(formatDeployment(client, deploy, length)))
return 0
}
Expand Down Expand Up @@ -358,3 +387,93 @@ func formatDeploymentGroups(d *api.Deployment, uuidLength int) string {

return formatList(rows)
}

func hasAutoRevert(d *api.Deployment) bool {
taskGroups := d.TaskGroups
for _, state := range taskGroups {
if state.AutoRevert {
return true
}
}
return false
}

func (c *DeploymentStatusCommand) monitor(client *api.Client, deployID string, index uint64, verbose bool) {
writer := uilive.New()
writer.Start()

q := api.QueryOptions{
AllowStale: true,
WaitIndex: index,
WaitTime: 2 * time.Second,
}

var length int
if verbose {
length = fullId
} else {
length = shortId
}

for {
deploy, meta, err := client.Deployments().Info(deployID, &q)
if err != nil {
c.Ui.Error(c.Colorize().Color("Error fetching deployment"))
return
}

status := deploy.Status
info := formatTime(time.Now())
info += fmt.Sprintf("\n%s", formatDeployment(client, deploy, length))

if verbose {
info += "\n\n[bold]Allocations[reset]\n"
allocs, _, err := client.Deployments().Allocations(deployID, nil)
if err != nil {
info += "Error fetching allocations"
} else {
info += formatAllocListStubs(allocs, verbose, length)
}
}

// Add newline before output to avoid prefix indentation when called from job run
msg := c.Colorize().Color(fmt.Sprintf("\n%s", info))

// Print in place if tty
_, isStdoutTerminal := term.GetFdInfo(os.Stdout)
if isStdoutTerminal {
fmt.Fprint(writer, msg)
} else {
c.Ui.Output(msg)
}

switch status {
case structs.DeploymentStatusFailed:
if hasAutoRevert(deploy) {
// Wait for rollback to launch
time.Sleep(1 * time.Second)
rollback, _, err := client.Jobs().LatestDeployment(deploy.JobID, nil)

if err != nil {
c.Ui.Error(c.Colorize().Color("Error fetching deployment of previous job version"))
return
}
c.Ui.Output("") // Separate rollback monitoring from failed deployment
c.monitor(client, rollback.ID, index, verbose)
}
return

case structs.DeploymentStatusSuccessful:
case structs.DeploymentStatusCancelled:
case structs.DeploymentStatusBlocked:
return
default:
q.WaitIndex = meta.LastIndex
continue
}

writer.Stop()
return
}

}
9 changes: 9 additions & 0 deletions command/deployment_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ func TestDeploymentStatusCommand_Fails(t *testing.T) {
// "deployments" indicates that we attempted to list all deployments
require.Contains(t, out, "Error retrieving deployments")
ui.ErrorWriter.Reset()

// Fails if monitor passed with json or tmpl flags
for _, flag := range []string{"-json", "-t"} {
code = cmd.Run([]string{"-monitor", flag, "12"})
require.Equal(t, 1, code)
out = ui.ErrorWriter.String()
require.Contains(t, out, "The monitor flag cannot be used with the '-json' or '-t' flags")
ui.ErrorWriter.Reset()
}
}

func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) {
Expand Down
69 changes: 46 additions & 23 deletions command/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,18 +108,20 @@ func (m *monitor) update(update *evalState) {

// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
m.ui.Output(fmt.Sprintf("%s: Evaluation triggered by node %q",
formatTime(time.Now()), limit(update.node, m.length)))
}

// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
m.ui.Output(fmt.Sprintf("%s: Evaluation triggered by job %q",
formatTime(time.Now()), update.job))
}

// Check if the evaluation was triggered by a deployment
if existing.deployment == "" && update.deployment != "" {
m.ui.Output(fmt.Sprintf("Evaluation within deployment: %q", limit(update.deployment, m.length)))
m.ui.Output(fmt.Sprintf("%s: Evaluation within deployment: %q",
formatTime(time.Now()), limit(update.deployment, m.length)))
}

// Check the allocations
Expand All @@ -130,14 +132,16 @@ func (m *monitor) update(update *evalState) {
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
"%s: Allocation %q modified: node %q, group %q",
formatTime(time.Now()), limit(alloc.id, m.length),
limit(alloc.node, m.length), alloc.group))

case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
"%s: Allocation %q created: node %q, group %q",
formatTime(time.Now()), limit(alloc.id, m.length),
limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
Expand All @@ -148,8 +152,9 @@ func (m *monitor) update(update *evalState) {
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
"%s: Allocation %q status changed: %q -> %q%s",
formatTime(time.Now()), limit(alloc.id, m.length),
existing.client, alloc.client, description))
}
}
}
Expand All @@ -158,8 +163,8 @@ func (m *monitor) update(update *evalState) {
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
m.ui.Output(fmt.Sprintf("%s: Evaluation status changed: %q -> %q",
formatTime(time.Now()), existing.status, update.status))
}
}

Expand Down Expand Up @@ -189,7 +194,8 @@ func (m *monitor) monitor(evalID string) int {
return 1
}

m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
m.ui.Info(fmt.Sprintf("%s: Monitoring evaluation %q",
formatTime(time.Now()), limit(eval.ID, m.length)))

// Create the new eval state.
state := newEvalState()
Expand All @@ -204,7 +210,7 @@ func (m *monitor) monitor(evalID string) int {
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
m.ui.Error(fmt.Sprintf("%s: Error reading allocations: %s", formatTime(time.Now()), err))
return 1
}

Expand All @@ -228,30 +234,31 @@ func (m *monitor) monitor(evalID string) int {
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
m.ui.Info(fmt.Sprintf("%s: Evaluation %q finished with status %q",
formatTime(time.Now()), limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
m.ui.Info(fmt.Sprintf("%s: Evaluation %q finished with status %q but failed to place all allocations:",
formatTime(time.Now()), limit(eval.ID, m.length), eval.Status))

// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
m.ui.Output(fmt.Sprintf("%s: Task Group %q (failed to place %d %s):",
formatTime(time.Now()), tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}

if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
m.ui.Output(fmt.Sprintf("%s: Evaluation %q waiting for additional capacity to place remainder",
formatTime(time.Now()), limit(eval.BlockedEval, m.length)))
}
}
default:
Expand All @@ -264,8 +271,8 @@ func (m *monitor) monitor(evalID string) int {
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"Monitoring next evaluation %q in %s",
limit(eval.NextEval, m.length), eval.Wait))
"%s: Monitoring next evaluation %q in %s",
formatTime(time.Now()), limit(eval.NextEval, m.length), eval.Wait))

// Skip some unnecessary polling
time.Sleep(eval.Wait)
Expand All @@ -278,6 +285,22 @@ func (m *monitor) monitor(evalID string) int {
break
}

// Monitor the deployment
dID := m.state.deployment
m.ui.Info(fmt.Sprintf("%s: Monitoring deployment %q", formatTime(time.Now()), limit(dID, m.length)))

var verbose bool
if m.length == fullId {
verbose = true
} else {
verbose = false
}

meta := new(Meta)
meta.Ui = m.ui
cmd := &DeploymentStatusCommand{Meta: *meta}
cmd.monitor(m.client, dID, 0, verbose)

// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ require (
github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.5.2
github.com/gorilla/websocket v1.4.2
github.com/gosuri/uilive v0.0.4
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39
github.com/hashicorp/consul v1.7.8
github.com/hashicorp/consul-template v0.25.1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,8 @@ github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uilive v0.0.4 h1:hUEBpQDj8D8jXgtCdBu7sWsy5sbW/5GhuO8KBwJ2jyY=
github.com/gosuri/uilive v0.0.4/go.mod h1:V/epo5LjjlDE5RJUcqx8dbw+zc93y5Ya3yg8tfZ74VI=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39 h1:MqvH60+R2JhSdvVgGxmExOndrkRQtGW7w4+gcrymN64=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
Expand Down
28 changes: 28 additions & 0 deletions vendor/github.com/gosuri/uilive/.travis.yml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit f8c6672

Please sign in to comment.