forked from drpebcak/terraform-rancher-workload-cluster
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rancher.tf
80 lines (73 loc) · 2.34 KB
/
rancher.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
provider "rancher2" {
api_url = local.rancher_api_url
token_key = local.rancher_token_key
}
resource "rancher2_cluster" "cluster" {
name = local.name
description = local.cluster_description
rke_config {
ingress {
provider = local.ingress_provider
}
kubernetes_version = local.kubernetes_version
cloud_provider {
name = local.cloud_provider_name
}
upgrade_strategy {
drain = local.upgrade_drain
max_unavailable_worker = local.upgrade_max_unavailable_worker
drain_input {
delete_local_data = local.drain_delete_local_data
force = local.drain_force
timeout = local.drain_timeout
}
}
services {
kubelet {
extra_args = local.kubelet_extra_args
cluster_dns_server = local.cluster_dns_server
}
kube_api {
extra_args = local.kube_api_extra_args
service_cluster_ip_range = local.service_cluster_ip_range
secrets_encryption_config {
enabled = true
}
}
kube_controller {
extra_args = local.kube_controller_extra_args
cluster_cidr = local.cluster_cidr
service_cluster_ip_range = local.service_cluster_ip_range
}
scheduler {
extra_args = local.scheduler_extra_args
}
etcd {
backup_config {
enabled = true
interval_hours = 6
retention = 21
s3_backup_config {
access_key = aws_iam_access_key.etcd_backup_user.id
bucket_name = aws_s3_bucket.etcd_backups.id
endpoint = "s3.${aws_s3_bucket.etcd_backups.region}.amazonaws.com"
region = aws_s3_bucket.etcd_backups.region
secret_key = aws_iam_access_key.etcd_backup_user.secret
folder = "${local.name}-etcd-backup"
}
}
}
}
}
depends_on = [aws_s3_bucket.etcd_backups]
}
resource "rancher2_cluster_sync" "cluster" {
cluster_id = rancher2_cluster.cluster.id
}
resource "rancher2_cluster_role_template_binding" "deploy" {
count = local.deploy_user_enabled
name = "deploy"
role_template_id = "cluster-owner"
cluster_id = rancher2_cluster_sync.cluster.id
user_id = local.rancher_deploy_user
}