diff --git a/lib/charms/mongodb/v0/config_server_interface.py b/lib/charms/mongodb/v0/config_server_interface.py index 47100d71a..e9b3ab0ca 100644 --- a/lib/charms/mongodb/v0/config_server_interface.py +++ b/lib/charms/mongodb/v0/config_server_interface.py @@ -14,7 +14,7 @@ ) from charms.mongodb.v1.helpers import add_args_to_env, get_mongos_args from charms.mongodb.v1.mongos import MongosConnection -from ops.charm import CharmBase, EventBase +from ops.charm import CharmBase, EventBase, RelationBrokenEvent from ops.framework import Object from ops.model import ActiveStatus, MaintenanceStatus, WaitingStatus @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 3 +LIBPATCH = 4 class ClusterProvider(Object): @@ -182,8 +182,13 @@ def __init__( self.framework.observe( charm.on[self.relation_name].relation_changed, self._on_relation_changed ) - - # TODO Future PRs handle scale down + self.framework.observe( + charm.on[self.relation_name].relation_departed, + self.charm.check_relation_broken_or_scale_down, + ) + self.framework.observe( + charm.on[self.relation_name].relation_broken, self._on_relation_broken + ) def _on_database_created(self, event) -> None: if not self.charm.unit.is_leader(): @@ -228,6 +233,30 @@ def _on_relation_changed(self, event) -> None: self.charm.unit.status = ActiveStatus() + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + # Only relation_deparated events can check if scaling down + if not self.charm.has_departed_run(event.relation.id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return + + if not self.charm.proceed_on_broken_event(event): + logger.info("Skipping relation broken event, broken event due to scale down") + return + + self.charm.stop_mongos_service() + logger.info("Stopped mongos daemon") + + if not self.charm.unit.is_leader(): + return + + logger.info("Database and user removed for mongos application") + self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.USERNAME) + self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.PASSWORD) + self.charm.remove_connection_info() + # BEGIN: helper functions def is_mongos_running(self) -> bool: diff --git a/lib/charms/mongodb/v1/shards_interface.py b/lib/charms/mongodb/v1/shards_interface.py index 4f62b418e..a9cd9f637 100644 --- a/lib/charms/mongodb/v1/shards_interface.py +++ b/lib/charms/mongodb/v1/shards_interface.py @@ -51,7 +51,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 3 +LIBPATCH = 4 KEYFILE_KEY = "key-file" HOSTS_KEY = "host" OPERATOR_PASSWORD_KEY = MongoDBUser.get_password_key_name_for_user(OperatorUser.get_username()) @@ -146,6 +146,17 @@ def pass_hook_checks(self, event: EventBase) -> bool: event.defer() return False + if isinstance(event, RelationBrokenEvent): + if not self.charm.has_departed_run(event.relation.id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return False + + if not self.charm.proceed_on_broken_event(event): + return False + return True def _on_relation_event(self, event): @@ -159,9 +170,7 @@ def _on_relation_event(self, event): departed_relation_id = None if isinstance(event, RelationBrokenEvent): - departed_relation_id = self.charm.proceed_on_broken_event(event) - if not departed_relation_id: - return + departed_relation_id = event.relation.id try: logger.info("Adding/Removing shards not present in cluster.") diff --git a/src/charm.py b/src/charm.py index e8cd654ad..6c0477577 100755 --- a/src/charm.py +++ b/src/charm.py @@ -1323,10 +1323,8 @@ def set_scaling_down(self, event: RelationDepartedEvent) -> bool: self.unit_peer_data[rel_departed_key] = json.dumps(scaling_down) return scaling_down - def proceed_on_broken_event(self, event) -> int: + def proceed_on_broken_event(self, event) -> bool: """Returns relation_id if relation broken event occurred due to a removed relation.""" - departed_relation_id = None - # Only relation_deparated events can check if scaling down departed_relation_id = event.relation.id if not self.has_departed_run(departed_relation_id): @@ -1334,16 +1332,16 @@ def proceed_on_broken_event(self, event) -> int: "Deferring, must wait for relation departed hook to decide if relation should be removed." ) event.defer() - return + return False # check if were scaling down and add a log message if self.is_scaling_down(departed_relation_id): logger.info( "Relation broken event occurring due to scale down, do not proceed to remove users." ) - return + return False - return departed_relation_id + return True @staticmethod def _generate_relation_departed_key(rel_id: int) -> str: