From e4d1b126e332c7ae6a4db0b1b86ec823e9dc7e01 Mon Sep 17 00:00:00 2001 From: Shri Javadekar Date: Wed, 27 Mar 2019 15:36:42 -0700 Subject: [PATCH] Force drain pods. (#33) Testing done: - Unit tests - Ran minion-manager with bare pods running on nodes. It drained them and switched between on-demand and spot successfully. --- cloud_provider/aws/aws_minion_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud_provider/aws/aws_minion_manager.py b/cloud_provider/aws/aws_minion_manager.py index 0651725..f94fab6 100644 --- a/cloud_provider/aws/aws_minion_manager.py +++ b/cloud_provider/aws/aws_minion_manager.py @@ -400,7 +400,7 @@ def cordon_node(self, instance): instance_name = self.get_name_for_instance(instance) if instance_name: try: - cmd = "kubectl drain " + instance_name + " --ignore-daemonsets=true --delete-local-data=true" + cmd = "kubectl drain " + instance_name + " --ignore-daemonsets=true --delete-local-data=true --force --grace-period=-1" subprocess.check_call(shlex.split(cmd)) logger.info("Drained instance %s", instance_name) except Exception as ex: