Skip to content

Commit

Permalink
Merge pull request #720 from meejah/711.conflict-naming
Browse files Browse the repository at this point in the history
Name conflicts after participant (not author) #711
  • Loading branch information
meejah authored Jun 1, 2023
2 parents c363f22 + 3953f2c commit f6e5a37
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 52 deletions.
1 change: 1 addition & 0 deletions newsfragments/711.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Conflict files now named after the Participant (not Author)
10 changes: 5 additions & 5 deletions src/magic_folder/downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,17 +571,17 @@ def _poll_collective(self):
for relpath, file_data in files.items():
if self._is_remote_update(relpath, file_data.snapshot_cap):
self._status.download_queued(self._config.name, relpath)
updates.append((relpath, file_data.snapshot_cap))
updates.append((relpath, file_data.snapshot_cap, participant))

# allow for parallel downloads
# (we could de-duplicate snapshots here, but the state-machine has to anyway)
yield gatherResults([
self._process_snapshot(relpath, snapshot_cap)
for relpath, snapshot_cap in updates
self._process_snapshot(relpath, snapshot_cap, participant)
for relpath, snapshot_cap, participant in updates
])

@inline_callbacks
def _process_snapshot(self, relpath, snapshot_cap):
def _process_snapshot(self, relpath, snapshot_cap, participant):
"""
Internal helper.
Expand Down Expand Up @@ -619,4 +619,4 @@ def _process_snapshot(self, relpath, snapshot_cap):
relpath=snapshot.relpath,
)
else:
yield maybeDeferred(mf.found_new_remote, snapshot)
yield maybeDeferred(mf.found_new_remote, snapshot, participant)
60 changes: 30 additions & 30 deletions src/magic_folder/magic_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,15 +258,15 @@ def is_empty(arg):
d.addCallback(is_empty)
return d

def found_new_remote(self, remote_snapshot):
def found_new_remote(self, remote_snapshot, participant):
"""
A RemoteSnapshot that doesn't match our existing database entry
has been found. It will be downloaded and applied (possibly
resulting in conflicts).
:param RemoteSnapshot remote_snapshot: the newly-discovered remote
"""
self._remote_update(remote_snapshot)
self._remote_update(remote_snapshot, participant)
return self.when_idle()

def local_snapshot_exists(self, local_snapshot):
Expand Down Expand Up @@ -389,20 +389,20 @@ def _no_upload_work(self, snapshot):
"""

@_machine.input()
def _download_mismatch(self, snapshot, staged_path):
def _download_mismatch(self, snapshot, staged_path, participant):
"""
The local file does not match what we expect given database state
"""

@_machine.input()
def _download_matches(self, snapshot, staged_path, local_pathstate):
def _download_matches(self, snapshot, staged_path, local_pathstate, participant):
"""
The local file (if any) matches what we expect given database
state
"""

@_machine.input()
def _remote_update(self, snapshot):
def _remote_update(self, snapshot, participant):
"""
The file has a remote update.
"""
Expand All @@ -424,7 +424,7 @@ def _existing_conflict(self):
"""

@_machine.input()
def _download_completed(self, snapshot, staged_path):
def _download_completed(self, snapshot, staged_path, participant):
"""
A remote Snapshot has been downloaded
"""
Expand Down Expand Up @@ -454,7 +454,7 @@ def _fatal_error_download(self, snapshot):
"""

@_machine.input()
def _queued_download(self, snapshot):
def _queued_download(self, snapshot, participant):
"""
There is queued RemoteSnapshot work
"""
Expand All @@ -472,13 +472,13 @@ def _conflict_resolution(self, snapshot):
"""

@_machine.input()
def _ancestor_matches(self, snapshot, staged_path):
def _ancestor_matches(self, snapshot, staged_path, participant):
"""
snapshot is our ancestor
"""

@_machine.input()
def _ancestor_mismatch(self, snapshot, staged_path):
def _ancestor_mismatch(self, snapshot, staged_path, participant):
"""
snapshot is not our ancestor
"""
Expand All @@ -496,12 +496,12 @@ def _cancel(self, snapshot):
"""

@_machine.output()
def _begin_download(self, snapshot):
def _begin_download(self, snapshot, participant):
"""
Download a given Snapshot (including its content)
"""
def downloaded(staged_path):
self._call_later(self._download_completed, snapshot, staged_path)
self._call_later(self._download_completed, snapshot, staged_path, participant)

retry_delay_sequence = _delay_sequence()

Expand Down Expand Up @@ -554,7 +554,7 @@ def clean(arg):
return perform_download()

@_machine.output()
def _check_local_update(self, snapshot, staged_path):
def _check_local_update(self, snapshot, staged_path, participant):
"""
Detect a 'last minute' change by comparing the state of our local
file to that of the database.
Expand Down Expand Up @@ -588,20 +588,20 @@ def _check_local_update(self, snapshot, staged_path):
# now, determine if we've found a local update
if current_pathstate is None:
if local_pathinfo.exists:
self._call_later(self._download_mismatch, snapshot, staged_path)
self._call_later(self._download_mismatch, snapshot, staged_path, participant)
return
else:
# we've seen this file before so its pathstate should
# match what we expect according to the database .. or
# else some update happened meantime.
if current_pathstate != local_pathinfo.state:
self._call_later(self._download_mismatch, snapshot, staged_path)
self._call_later(self._download_mismatch, snapshot, staged_path, participant)
return

self._call_later(self._download_matches, snapshot, staged_path, local_pathinfo.state)
self._call_later(self._download_matches, snapshot, staged_path, local_pathinfo.state, participant)

@_machine.output()
def _check_ancestor(self, snapshot, staged_path):
def _check_ancestor(self, snapshot, staged_path, participant):
"""
Check if the ancestor for this remote update is correct or not.
"""
Expand All @@ -624,13 +624,13 @@ def _check_ancestor(self, snapshot, staged_path):
Message.log(
message_type="ancestor_mismatch",
)
self._call_later(self._ancestor_mismatch, snapshot, staged_path)
self._call_later(self._ancestor_mismatch, snapshot, staged_path, participant)
return
self._call_later(self._ancestor_matches, snapshot, staged_path)
self._call_later(self._ancestor_matches, snapshot, staged_path, participant)
return

@_machine.output()
def _perform_remote_update(self, snapshot, staged_path, local_pathstate):
def _perform_remote_update(self, snapshot, staged_path, local_pathstate, participant):
"""
Resolve a remote update locally
Expand Down Expand Up @@ -683,7 +683,7 @@ def _perform_remote_update(self, snapshot, staged_path, local_pathstate):
# emergency data to be in the conflict file .. maybe
# this should just be the original tmpfile and we
# shouldn't mess with it further?
self._call_later(self._download_mismatch, snapshot, e.path)
self._call_later(self._download_mismatch, snapshot, e.path, participant)
return

# Note, if we crash here (after moving the file into place but
Expand Down Expand Up @@ -776,7 +776,7 @@ def _status_download_finished(self):
def _cancel_queued_work(self):
for d in self._queue_local:
d.cancel()
for d in self._queue_remote:
for d, _, _ in self._queue_remote:
d.cancel()

@_machine.output()
Expand Down Expand Up @@ -853,13 +853,13 @@ def got_remote(remote):
return d

@_machine.output()
def _mark_download_conflict(self, snapshot, staged_path):
def _mark_download_conflict(self, snapshot, staged_path, participant):
"""
Mark a conflict for this remote snapshot
"""
conflict_path = "{}.conflict-{}".format(
self._relpath,
snapshot.author.name
participant.name,
)
self._factory._magic_fs.mark_conflict(self._relpath, conflict_path, staged_path)
self._factory._config.add_conflict(snapshot)
Expand Down Expand Up @@ -960,18 +960,18 @@ def got_snap(snap):
return ret_d

@_machine.output()
def _queue_remote_update(self, snapshot):
def _queue_remote_update(self, snapshot, participant):
"""
Save this remote snapshot for later processing (in _check_for_remote_work)
"""
# skip queueing this download if we already have this snapshot
# ahead in the queue
for _, queued_snap in self._queue_remote:
for _, queued_snap, _ in self._queue_remote:
if snapshot == queued_snap:
# same return-value as _begin_download: None
return succeed(None)
d = Deferred()
self._queue_remote.append((d, snapshot))
self._queue_remote.append((d, snapshot, participant))
return d

@_machine.output()
Expand All @@ -995,12 +995,12 @@ def _check_for_remote_work(self):
Inject any saved remote updates.
"""
if self._queue_remote:
d, snapshot = self._queue_remote.pop(0)
d, snapshot, participant = self._queue_remote.pop(0)

def do_remote_update(done_d, snap):
update_d = self._queued_download(snap)
def do_remote_update(done_d, snap, part):
update_d = self._queued_download(snap, part)
update_d.addBoth(done_d.callback)
self._call_later(do_remote_update, d, snapshot)
self._call_later(do_remote_update, d, snapshot, participant)
return
self._call_later(self._no_download_work, None)

Expand Down
22 changes: 17 additions & 5 deletions src/magic_folder/participants.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ class _StaticParticipant(object):
"""
An in-memory IParticipant provider
"""
name = attr.ib()
my_files = attr.ib() # dict: str -> SnapshotEntry
_is_self = attr.ib(default=False)

Expand Down Expand Up @@ -163,9 +164,9 @@ class _StaticParticipants(object):
def __attrs_post_init__(self):
if self.participants is None:
self.participants = [
_StaticParticipant([], True),
_StaticParticipant("default", {}, True),
]
assert len([p.is_self() for p in self.participants]) == 1, "Must have exactly one 'self' participant"
assert len([p for p in self.participants if p.is_self()]) == 1, "Must have exactly one 'self' participant"

def list(self):
return self.participants
Expand All @@ -179,15 +180,26 @@ def add(self, author, personal_dmd_cap):
)


def static_participants(my_files=None, other_files=None):
def static_participants(my_files=None, other_files=None, names=None):
"""
An ``IParticipants`` provider. Usually for testing.
"""
# XXX name, dircap, is_self for participants is public?
def name_generator():
number = 0
while True:
yield "name_{}".format(number)
number += 1
if names is None:
names = name_generator()
else:
# turn it into a generator too
names = (n for n in names)

writer = _StaticWriteableParticipant()
reader = _StaticParticipant(my_files or [], True)
reader = _StaticParticipant(next(names), my_files or [], True)
others = [
_StaticParticipant(files, False)
_StaticParticipant(next(names), files, False)
for files in (other_files or [])
]
return _StaticParticipants(writer, [reader] + others)
Expand Down
Loading

0 comments on commit f6e5a37

Please # to comment.