Skip to content

Commit

Permalink
WIP: fixes. Also assume the v3.4 branch is broken for a while since i…
Browse files Browse the repository at this point in the history
…m doing heavy refactoring
  • Loading branch information
psy0rz committed Sep 24, 2024
1 parent 35b079a commit 9a48851
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 13 deletions.
2 changes: 1 addition & 1 deletion tests/test_zfsautobackup.py
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,7 @@ def test_keep0(self):

#make another backup but with no-holds. we should naturally endup with only number 3
with mocktime("20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty --debug".split(" ")).run())

r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
Expand Down
34 changes: 26 additions & 8 deletions zfs_autobackup/ZfsDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,15 @@ def snapshots(self):

return self.__snapshots

def cache_snapshot(self, snapshot):
"""Update our snapshot cache (if we have any)
Args:
:type snapshot: ZfsDataset
"""

if self.__snapshots is not None:
self.__snapshots.append(snapshot)

@property
def our_snapshots(self):
"""get list[snapshots] creates by us of this dataset"""
Expand Down Expand Up @@ -521,9 +530,12 @@ def find_snapshot(self, snapshot):
Args:
:rtype: ZfsDataset|None
:type snapshot: str or ZfsDataset
:type snapshot: str|ZfsDataset|None
"""

if snapshot is None:
return None

if not isinstance(snapshot, ZfsDataset):
snapshot_name = snapshot
else:
Expand Down Expand Up @@ -1140,7 +1152,11 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
source_snapshot = self.find_next_snapshot(source_snapshot, False)

#Now the thinner can decide which snapshots we want on the target, by looking at the whole picture:
(target_keeps, target_obsoletes)=target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[possible_target_snapshots[-1]])
if possible_target_snapshots:
(target_keeps, target_obsoletes)=target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[possible_target_snapshots[-1]])
else:
target_keeps = []
target_obsoletes = []

#Create a list of all the target snapshots we want, that don't exist yet
target_transfers=[]
Expand Down Expand Up @@ -1213,13 +1229,12 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
# keep data encrypted by sending it raw (including properties)
raw = True

#note: only target_obsoletes is used during sync, to check if target doesnt want the snapshot
(source_common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_transfers,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
guid_check=guid_check, raw=raw)

# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
# NOTE: we do a pre-clean because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
self._pre_clean(
source_common_snapshot=source_common_snapshot, target_dataset=target_dataset,
Expand All @@ -1246,6 +1261,8 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert

# now actually transfer the snapshots
prev_source_snapshot = source_common_snapshot
prev_target_snapshot=target_dataset.find_snapshot(prev_source_snapshot)

source_snapshot = start_snapshot
do_rollback = rollback
while source_snapshot:
Expand All @@ -1272,22 +1289,23 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
target_snapshot.hold()
source_snapshot.hold()

if prev_source_snapshot:
if holds:
if prev_source_snapshot:
prev_source_snapshot.release()
target_dataset.find_snapshot(prev_source_snapshot).release()

if prev_target_snapshot:
prev_target_snapshot.release()

# we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy()

# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
# the rest was already destroyed or will not be send)
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
if prev_target_snapshot in target_obsoletes:
prev_target_snapshot.destroy()

prev_source_snapshot = source_snapshot
prev_target_snapshot = target_snapshot

source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)

Expand Down
5 changes: 1 addition & 4 deletions zfs_autobackup/ZfsNode.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,7 @@ def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_sn

pools[pool].append(snapshot)

# update cache, but try to prevent an unneeded zfs list
# if self.readonly or CachedProperty.is_cached(dataset, 'snapshots'):
#dataset.snapshots.append(snapshot) # NOTE: this will trigger zfs list if its not cached
dataset.invalidate()
dataset.cache_snapshot(snapshot)

if not pools:
self.verbose("No changes anywhere: not creating snapshots.")
Expand Down

0 comments on commit 9a48851

Please sign in to comment.