Skip to content

Commit

Permalink
Merge pull request #2735 from hashicorp/f-no_host_uuid-true
Browse files Browse the repository at this point in the history
Default no_host_uuid to true instead of false
  • Loading branch information
schmichael committed Jul 3, 2017
2 parents 460296e + b8de360 commit bf6c736
Show file tree
Hide file tree
Showing 8 changed files with 9 additions and 13 deletions.
1 change: 1 addition & 0 deletions client/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ func DefaultConfig() *Config {
GCDiskUsageThreshold: 80,
GCInodeUsageThreshold: 70,
GCMaxAllocs: 50,
NoHostUUID: true,
}
}

Expand Down
2 changes: 1 addition & 1 deletion command/agent/config-test-fixtures/basic.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ client {
gc_disk_usage_threshold = 82
gc_inode_usage_threshold = 91
gc_max_allocs = 50
no_host_uuid = true
no_host_uuid = false
}
server {
enabled = true
Expand Down
4 changes: 3 additions & 1 deletion command/agent/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,7 @@ func DefaultConfig() *Config {
GCDiskUsageThreshold: 80,
GCInodeUsageThreshold: 70,
GCMaxAllocs: 50,
NoHostUUID: true,
},
Server: &ServerConfig{
Enabled: false,
Expand Down Expand Up @@ -1003,7 +1004,8 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
if b.GCMaxAllocs != 0 {
result.GCMaxAllocs = b.GCMaxAllocs
}
if b.NoHostUUID {
// NoHostUUID defaults to true, merge if false
if !b.NoHostUUID {
result.NoHostUUID = b.NoHostUUID
}

Expand Down
2 changes: 1 addition & 1 deletion command/agent/config_parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func TestConfig_Parse(t *testing.T) {
GCDiskUsageThreshold: 82,
GCInodeUsageThreshold: 91,
GCMaxAllocs: 50,
NoHostUUID: true,
NoHostUUID: false,
},
Server: &ServerConfig{
Enabled: true,
Expand Down
1 change: 0 additions & 1 deletion demo/vagrant/client1.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ client {
reserved {
cpu = 500
}
no_host_uuid = true
}

# Modify our port to avoid a collision with server1
Expand Down
2 changes: 0 additions & 2 deletions demo/vagrant/client2.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ data_dir = "/tmp/client2"
# Enable the client
client {
enabled = true
no_host_uuid = true

# For demo assume we are talking to server1. For production,
# this should be like "nomad.service.consul:4647" and a system
Expand All @@ -18,7 +17,6 @@ client {
meta {
ssd = "true"
}
no_host_uuid = true
}

# Modify our port to avoid a collision with server1 and client1
Expand Down
5 changes: 3 additions & 2 deletions website/source/docs/agent/configuration/client.html.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,9 @@ client {
parallel destroys allowed by the garbage collector. This value should be
relatively low to avoid high resource usage during garbage collections.

- `no_host_uuid` `(bool: false)` - Force the UUID generated by the client to be
randomly generated and not be based on the host's UUID.
- `no_host_uuid` `(bool: true)` - By default a random node UUID will be
generated, but setting this to `false` will use the system's UUID. Before
Nomad 0.6 the default was to use the system UUID.

### `chroot_env` Parameters

Expand Down
5 changes: 0 additions & 5 deletions website/source/intro/getting-started/cluster.html.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,6 @@ client {
# this should be like "nomad.service.consul:4647" and a system
# like Consul used for service discovery.
servers = ["127.0.0.1:4647"]
# Since we are starting two clients on the same host, we need to set
# this in order to force nomad to generate client UUIDs randomly,
# instead of based on the host UUID.
no_host_uuid = true
}
# Modify our port to avoid a collision with server1
Expand Down

0 comments on commit bf6c736

Please sign in to comment.