forked from spacetelescope/z2jh-aws-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
teardown.yml
157 lines (139 loc) · 4.36 KB
/
teardown.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
---
#### Jupyterhub to Zero #### tear down up through -> helm release of jupyterhub, kubernetes, aws fixtures
- hosts: all
user: ec2-user
environment:
- AWS_REGION: "{{ aws_region }}"
- KOPS_STATE_STORE: s3://{{ namespace }}-s3
tasks:
- name: Install boto3
pip:
name: boto3
executable: /usr/bin/pip
become: yes
tags: always
## Fix issue with importing AliasedEventEmitter
- name: Upgrade awscli
pip:
name: awscli
executable: /usr/bin/pip
become: yes
tags: always
## Idempotent delete helm release of JH
- name: Determine if Helm and Tiller are connected already
command: helm version --tiller-connection-timeout 30
failed_when: False
changed_when: False
register: helmver_result
async: 45
poll: 5
tags: always
- name: Determine if JH is currently installed.
command: helm status {{ namespace }}-jupyterhub
register: jh_installed_result
failed_when: False
changed_when: False
tags: always
# delete the Helm release. This deletes all resources that were created
# by Helm for your JupyterHub deployment
- name: Delete helm release
command: helm delete {{ namespace }}-jupyterhub --purge
when: helmver_result.stdout is defined and not helmver_result.rc and not jh_installed_result.rc
tags: always
- name: Check for kube namespace
command: kubectl get namespace {{ namespace }}
failed_when: False
changed_when: False
register: get_kube_namespace
tags:
- never
- kubernetes
- all-fixtures
# Next, delete the Kubernetes namespace the hub was installed in.
# This deletes any disks that may have been created to store user’s
# data, and any IP addresses that may have been provisioned.
- name: Delete kubernetes namespace
command: kubectl delete namespace {{ namespace }}
when: not get_kube_namespace.rc
tags:
- never
- kubernetes
- all-fixtures
- name: Gather EFS facts
efs_facts:
name: "{{ namespace }}-efs"
tags:
- never
- kubernetes
- all-fixtures
- name: Remove EFS mount targets
command: aws efs delete-mount-target --region {{ aws_region }} --mount-target-id {{ item['mount_target_id'] }}
with_items:
"{{ efs[0]['mount_targets'] }}"
tags:
- never
- kubernetes
- all-fixtures
when: efs[0] is defined
## For example, only delete the EFS if you specifically run with tag 'all-fixtures' or 'fixture-efs'
- name: Delete EFS fs
efs:
state: absent
name: "{{ namespace }}-efs"
tags:
- all-fixtures
- fixture-efs
- never
# delete security groups
- name: Delete NFS Security Group
ec2_group:
name: "{{ namespace }}.nfs"
state: absent
tags:
- all-fixtures
- kubernetes
- never
- name: Determine if cluster exists
shell: kops get clusters
tags:
- all-fixtures
- kubernetes
- never
failed_when: False
changed_when: False
register: get_cluster
- name: Delete kubernetes
shell: kops delete cluster {{ namespace }}.k8s.local --yes
tags:
- all-fixtures
- kubernetes
- never
when: not get_cluster.rc
- name: Delete S3 bucket
aws_s3:
bucket: "{{ namespace }}-s3"
mode: delete
# ec2_url: ec2-54-172-77-177.compute-1.amazonaws.com
register: s3_log
tags:
- all-fixtures
- fixture-s3
- never
- name: Gather EC2 Facts
ec2_instance_facts:
filters:
"tag:Name": "{{ namespace }}-ci"
register: ec2_facts
tags:
- all-fixtures
- fixture-ci
- never
- name: Delete CI node
ec2:
state: absent
instance_ids: "{{ ec2_facts['instances'][0]['instance_id'] }}"
tags:
- all-fixtures
- fixture-ci
- never
when: ec2_facts['instances'][0] is defined