Skip to content

Commit

Permalink
merge OSS release changes 514b0d6
Browse files Browse the repository at this point in the history
  • Loading branch information
hashicorp-nomad committed Sep 17, 2020
2 parents 38c0d5c + 514b0d6 commit 99ba9e9
Show file tree
Hide file tree
Showing 26 changed files with 692 additions and 215 deletions.
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,16 @@
## 0.12.5 (September 17, 2020)

BUG FIXES:
* core: Fixed a panic on job submission when the job contains a service with `expose = true` set [[GH-8882](https://github.com/hashicorp/nomad/issues/8882)]
* core: Fixed a regression where stopping the sole job allocation result in two replacement allocations [[GH-8867](https://github.com/hashicorp/nomad/issues/8867)]
* core: Fixed a bug where an allocation may be left running expectedly despite promoting a new job version [[GH-8886](https://github.com/hashicorp/nomad/issues/8886)]
* cli: Fixed the whitespace in nomad monitor help output [[GH-8884](https://github.com/hashicorp/nomad/issues/8884)]
* cli: Updated job samples to avoid using deprecated task level networks and mbit syntax [[GH-8911](https://github.com/hashicorp/nomad/issues/8911)]
* cli: Fixed a bug where alloc signal fails if the CLI cannot contact the Nomad client directly [[GH-8897](https://github.com/hashicorp/nomad/issues/8897)]
* cli: Fixed a bug where host volumes could cause `nomad node status` to panic when the `-verbose` flag was used. [[GH-8902](https://github.com/hashicorp/nomad/issues/8902)]
* ui: Fixed ability to switch between tasks in alloc exec sessions [[GH-8856](https://github.com/hashicorp/nomad/issues/8856)]
* ui: Task log streaming will no longer suddenly flip to a different task's logs. [[GH-8833](https://github.com/hashicorp/nomad/issues/8833)]

## 0.12.4 (September 9, 2020)

FEATURES:
Expand Down
14 changes: 2 additions & 12 deletions api/allocations.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,13 +286,8 @@ func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceU
}

func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error {
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
if err != nil {
return err
}

var resp struct{}
_, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil)
_, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil)
return err
}

Expand Down Expand Up @@ -321,18 +316,13 @@ type AllocStopResponse struct {
}

func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error {
nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q)
if err != nil {
return err
}

req := AllocSignalRequest{
Signal: signal,
Task: task,
}

var resp GenericResponse
_, err = nodeClient.putQuery("/v1/client/allocation/"+alloc.ID+"/signal", &req, &resp, q)
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/signal", &req, &resp, q)
return err
}

Expand Down
22 changes: 11 additions & 11 deletions command/agent/bindata_assetfs.go

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions command/agent_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ func (c *MonitorCommand) Help() string {
helpText := `
Usage: nomad monitor [options]
Stream log messages of a nomad agent. The monitor command lets you
listen for log levels that may be filtered out of the Nomad agent. For
example your agent may only be logging at INFO level, but with the monitor
command you can set -log-level DEBUG
Stream log messages of a nomad agent. The monitor command lets you
listen for log levels that may be filtered out of the Nomad agent. For
example your agent may only be logging at INFO level, but with the monitor
command you can set -log-level DEBUG
General Options:
` + generalOptionsUsage() + `
` + generalOptionsUsage() + `
Monitor Specific Options:
Expand Down
15 changes: 7 additions & 8 deletions command/assets/example-short.nomad
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,24 @@ job "example" {
datacenters = ["dc1"]

group "cache" {
network {
port "db" {
to = 6379
}
}

task "redis" {
driver = "docker"

config {
image = "redis:3.2"

port_map {
db = 6379
}
ports = ["db"]
}

resources {
cpu = 500
memory = 256

network {
mbits = 10
port "db" {}
}
}
}
}
Expand Down
81 changes: 45 additions & 36 deletions command/assets/example.nomad
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,49 @@ job "example" {
# to 1.
count = 1

# The "network" stanza specifies the network configuration for the allocation
# including requesting port bindings.
#
# For more information and examples on the "network" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/network
#
network {
port "db" {
to = 6379
}
}

# The "service" stanza instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Consul. This will
# make the service addressable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"

# The "check" stanza instructs Nomad to create a Consul health check for
# this service. A sample check is provided here for your convenience;
# uncomment it to enable it. The "check" stanza is documented in the
# "service" stanza documentation.

# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }

}

# The "restart" stanza configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
Expand Down Expand Up @@ -261,9 +304,7 @@ job "example" {
config {
image = "redis:3.2"

port_map {
db = 6379
}
ports = ["db"]
}

# The "artifact" stanza instructs Nomad to download an artifact from a
Expand Down Expand Up @@ -301,7 +342,7 @@ job "example" {
# }

# The "resources" stanza describes the requirements a task needs to
# execute. Resource requirements include memory, network, cpu, and more.
# execute. Resource requirements include memory, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
Expand All @@ -313,40 +354,8 @@ job "example" {
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB

network {
mbits = 10
port "db" {}
}
}
# The "service" stanza instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Consul. This will
# make the service addressable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"

# The "check" stanza instructs Nomad to create a Consul health check for
# this service. A sample check is provided here for your convenience;
# uncomment it to enable it. The "check" stanza is documented in the
# "service" stanza documentation.

# check {
# name = "alive"
# type = "tcp"
# interval = "10s"
# timeout = "2s"
# }

}

# The "template" stanza instructs Nomad to manage a template, such as
# a configuration file or script. This template can optionally pull data
Expand Down
258 changes: 217 additions & 41 deletions command/job_init.bindata_assetfs.go

Large diffs are not rendered by default.

33 changes: 20 additions & 13 deletions command/node_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/posener/complete"
)

Expand Down Expand Up @@ -326,7 +327,9 @@ func nodeCSIVolumeNames(n *api.Node, allocs []*api.Allocation) []string {
}

for _, v := range tg.Volumes {
names = append(names, v.Name)
if v.Type == structs.VolumeTypeCSI {
names = append(names, v.Name)
}
}
}
sort.Strings(names)
Expand Down Expand Up @@ -550,8 +553,10 @@ func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *ap
}

for _, v := range tg.Volumes {
names = append(names, v.Name)
requests[v.Source] = v
if v.Type == structs.VolumeTypeCSI {
names = append(names, v.Name)
requests[v.Source] = v
}
}
}
if len(names) == 0 {
Expand All @@ -577,16 +582,18 @@ func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *ap
output := make([]string, 0, len(names)+1)
output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode")
for _, name := range names {
v := volumes[name]
output = append(output, fmt.Sprintf(
"%s|%s|%s|%t|%s|%s",
v.ID,
name,
v.PluginID,
v.Schedulable,
v.Provider,
v.AccessMode,
))
v, ok := volumes[name]
if ok {
output = append(output, fmt.Sprintf(
"%s|%s|%s|%t|%s|%s",
v.ID,
name,
v.PluginID,
v.Schedulable,
v.Provider,
v.AccessMode,
))
}
}

c.Ui.Output(formatList(output))
Expand Down
8 changes: 7 additions & 1 deletion nomad/job_endpoint_hook_expose_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func serviceUsesConnectEnvoy(s *structs.Service) bool {
}

// A non-nil connect.sidecar_task stanza implies the sidecar task is being
// overridden (i.e. the default Envoy is not being uesd).
// overridden (i.e. the default Envoy is not being used).
if s.Connect.SidecarTask != nil {
return false
}
Expand Down Expand Up @@ -199,6 +199,12 @@ func exposePathForCheck(tg *structs.TaskGroup, s *structs.Service, check *struct
return nil, nil
}

// Borrow some of the validation before we start manipulating the group
// network, which needs to exist once.
if err := tgValidateUseOfBridgeMode(tg); err != nil {
return nil, err
}

// If the check is exposable but doesn't have a port label set build
// a port with a generated label, add it to the group's Dynamic ports
// and set the check port label to the generated label.
Expand Down
24 changes: 24 additions & 0 deletions nomad/job_endpoint_hook_expose_check_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,30 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) {
ListenerPort: tg.Networks[0].DynamicPorts[0].Label,
}, ePath)
})

t.Run("missing network with no service check port label", func(t *testing.T) {
// this test ensures we do not try to manipulate the group network
// to inject an expose port if the group network does not exist
c := &structs.ServiceCheck{
Name: "check1",
Type: "http",
Path: "/health",
PortLabel: "", // not set
Expose: true, // will require a service check port label
}
s := &structs.Service{
Name: "service1",
Checks: []*structs.ServiceCheck{c},
}
tg := &structs.TaskGroup{
Name: "group1",
Services: []*structs.Service{s},
Networks: nil, // not set, should cause validation error
}
ePath, err := exposePathForCheck(tg, s, c)
require.EqualError(t, err, `group "group1" must specify one bridge network for exposing service check(s)`)
require.Nil(t, ePath)
})
}

func TestJobExposeCheckHook_containsExposePath(t *testing.T) {
Expand Down
Loading

0 comments on commit 99ba9e9

Please sign in to comment.