Skip to content

Commit

Permalink
feat: new BACKUP2 transfer protocol
Browse files Browse the repository at this point in the history
New protocol streams .tar into iroh-net
stream without traversing all the files first.
Reception over old backup protocol
is still supported to allow
transferring backups from old devices
to new ones, but not vice versa.
  • Loading branch information
link2xt authored Jul 19, 2024
1 parent eb669af commit d6d90db
Show file tree
Hide file tree
Showing 12 changed files with 341 additions and 274 deletions.
2 changes: 2 additions & 0 deletions deltachat-ffi/deltachat.h
Original file line number Diff line number Diff line change
Expand Up @@ -2504,6 +2504,7 @@ void dc_stop_ongoing_process (dc_context_t* context);
#define DC_QR_FPR_WITHOUT_ADDR 230 // test1=formatted fingerprint
#define DC_QR_ACCOUNT 250 // text1=domain
#define DC_QR_BACKUP 251
#define DC_QR_BACKUP2 252
#define DC_QR_WEBRTC_INSTANCE 260 // text1=domain, text2=instance pattern
#define DC_QR_ADDR 320 // id=contact
#define DC_QR_TEXT 330 // text1=text
Expand Down Expand Up @@ -2550,6 +2551,7 @@ void dc_stop_ongoing_process (dc_context_t* context);
* if so, call dc_set_config_from_qr() and then dc_configure().
*
* - DC_QR_BACKUP:
* - DC_QR_BACKUP2:
* ask the user if they want to set up a new device.
* If so, pass the qr-code to dc_receive_backup().
*
Expand Down
2 changes: 1 addition & 1 deletion deltachat-ffi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4364,7 +4364,7 @@ pub unsafe extern "C" fn dc_backup_provider_wait(provider: *mut dc_backup_provid
let ctx = &*ffi_provider.context;
let provider = &mut ffi_provider.provider;
block_on(provider)
.context("Failed to await BackupProvider")
.context("Failed to await backup provider")
.log_err(ctx)
.set_last_error(ctx)
.ok();
Expand Down
5 changes: 5 additions & 0 deletions deltachat-ffi/src/lot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ impl Lot {
Qr::FprWithoutAddr { fingerprint, .. } => Some(fingerprint),
Qr::Account { domain } => Some(domain),
Qr::Backup { .. } => None,
Qr::Backup2 { .. } => None,
Qr::WebrtcInstance { domain, .. } => Some(domain),
Qr::Addr { draft, .. } => draft.as_deref(),
Qr::Url { url } => Some(url),
Expand Down Expand Up @@ -102,6 +103,7 @@ impl Lot {
Qr::FprWithoutAddr { .. } => LotState::QrFprWithoutAddr,
Qr::Account { .. } => LotState::QrAccount,
Qr::Backup { .. } => LotState::QrBackup,
Qr::Backup2 { .. } => LotState::QrBackup2,
Qr::WebrtcInstance { .. } => LotState::QrWebrtcInstance,
Qr::Addr { .. } => LotState::QrAddr,
Qr::Url { .. } => LotState::QrUrl,
Expand All @@ -127,6 +129,7 @@ impl Lot {
Qr::FprWithoutAddr { .. } => Default::default(),
Qr::Account { .. } => Default::default(),
Qr::Backup { .. } => Default::default(),
Qr::Backup2 { .. } => Default::default(),
Qr::WebrtcInstance { .. } => Default::default(),
Qr::Addr { contact_id, .. } => contact_id.to_u32(),
Qr::Url { .. } => Default::default(),
Expand Down Expand Up @@ -177,6 +180,8 @@ pub enum LotState {

QrBackup = 251,

QrBackup2 = 252,

/// text1=domain, text2=instance pattern
QrWebrtcInstance = 260,

Expand Down
13 changes: 13 additions & 0 deletions deltachat-jsonrpc/src/api/types/qr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ pub enum QrObject {
Backup {
ticket: String,
},
Backup2 {
auth_token: String,

node_addr: String,
},
WebrtcInstance {
domain: String,
instance_pattern: String,
Expand Down Expand Up @@ -132,6 +137,14 @@ impl From<Qr> for QrObject {
Qr::Backup { ticket } => QrObject::Backup {
ticket: ticket.to_string(),
},
Qr::Backup2 {
ref node_addr,
auth_token,
} => QrObject::Backup2 {
node_addr: serde_json::to_string(node_addr).unwrap_or_default(),

auth_token,
},
Qr::WebrtcInstance {
domain,
instance_pattern,
Expand Down
1 change: 1 addition & 0 deletions node/constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ module.exports = {
DC_QR_ASK_VERIFYCONTACT: 200,
DC_QR_ASK_VERIFYGROUP: 202,
DC_QR_BACKUP: 251,
DC_QR_BACKUP2: 252,
DC_QR_ERROR: 400,
DC_QR_FPR_MISMATCH: 220,
DC_QR_FPR_OK: 210,
Expand Down
1 change: 1 addition & 0 deletions node/lib/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ export enum C {
DC_QR_ASK_VERIFYCONTACT = 200,
DC_QR_ASK_VERIFYGROUP = 202,
DC_QR_BACKUP = 251,
DC_QR_BACKUP2 = 252,
DC_QR_ERROR = 400,
DC_QR_FPR_MISMATCH = 220,
DC_QR_FPR_OK = 210,
Expand Down
2 changes: 0 additions & 2 deletions python/tests/test_1_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -1562,8 +1562,6 @@ def assert_account_is_proper(ac):

# check progress events for import
assert imex_tracker.wait_progress(1, progress_upper_limit=249)
assert imex_tracker.wait_progress(500, progress_upper_limit=749)
assert imex_tracker.wait_progress(750, progress_upper_limit=999)
assert imex_tracker.wait_progress(1000)

assert_account_is_proper(ac1)
Expand Down
86 changes: 63 additions & 23 deletions src/imex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use std::path::{Path, PathBuf};
use ::pgp::types::KeyTrait;
use anyhow::{bail, ensure, format_err, Context as _, Result};
use deltachat_contact_tools::EmailAddress;
use futures::StreamExt;
use futures::TryStreamExt;
use futures_lite::FutureExt;

use tokio::fs::{self, File};
Expand Down Expand Up @@ -269,24 +269,56 @@ async fn import_backup(
context.get_dbfile().display()
);

import_backup_stream(context, backup_file, file_size, passphrase).await?;
Ok(())
}

/// Imports backup by reading a tar file from a stream.
///
/// `file_size` is used to calculate the progress
/// and emit progress events.
/// Ideally it is the sum of the entry
/// sizes without the header overhead,
/// but can be estimated as tar file size
/// in which case the progress is underestimated
/// and may not reach 99.9% by the end of import.
/// Underestimating is better than
/// overestimating because the progress
/// jumps to 100% instead of getting stuck at 99.9%
/// for some time.
pub(crate) async fn import_backup_stream<R: tokio::io::AsyncRead + Unpin>(
context: &Context,
backup_file: R,
file_size: u64,
passphrase: String,
) -> Result<()> {
let mut archive = Archive::new(backup_file);

let mut entries = archive.entries()?;
let mut last_progress = 0;
while let Some(file) = entries.next().await {
let f = &mut file?;

let current_pos = f.raw_file_position();
let progress = 1000 * current_pos / file_size;
if progress != last_progress && progress > 10 && progress < 1000 {
// We already emitted ImexProgress(10) above

// We already emitted ImexProgress(10) above
let mut last_progress = 10;
let mut total_size = 0;
while let Some(mut f) = entries
.try_next()
.await
.context("Failed to get next entry")?
{
total_size += f.header().entry_size()?;
let progress = std::cmp::min(
1000 * total_size.checked_div(file_size).unwrap_or_default(),
999,
);
if progress > last_progress {
context.emit_event(EventType::ImexProgress(progress as usize));
last_progress = progress;
}

if f.path()?.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
// async_tar can't unpack to a specified file name, so we just unpack to the blobdir and then move the unpacked file.
f.unpack_in(context.get_blobdir()).await?;
f.unpack_in(context.get_blobdir())
.await
.context("Failed to unpack database")?;
let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
context
.sql
Expand All @@ -298,7 +330,9 @@ async fn import_backup(
.context("cannot remove unpacked database")?;
} else {
// async_tar will unpack to blobdir/BLOBS_BACKUP_NAME, so we move the file afterwards.
f.unpack_in(context.get_blobdir()).await?;
f.unpack_in(context.get_blobdir())
.await
.context("Failed to unpack blob")?;
let from_path = context.get_blobdir().join(f.path()?);
if from_path.is_file() {
if let Some(name) = from_path.file_name() {
Expand Down Expand Up @@ -375,34 +409,40 @@ async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Res
dest_path.display(),
);

export_backup_inner(context, &temp_db_path, &temp_path).await?;
let file = File::create(&temp_path).await?;
let blobdir = BlobDirContents::new(context).await?;
export_backup_stream(context, &temp_db_path, blobdir, file)
.await
.context("Exporting backup to file failed")?;
fs::rename(temp_path, &dest_path).await?;
context.emit_event(EventType::ImexFileWritten(dest_path));
Ok(())
}

async fn export_backup_inner(
context: &Context,
/// Exports the database and blobs into a stream.
pub(crate) async fn export_backup_stream<'a, W>(
context: &'a Context,
temp_db_path: &Path,
temp_path: &Path,
) -> Result<()> {
let file = File::create(temp_path).await?;

let mut builder = tokio_tar::Builder::new(file);
blobdir: BlobDirContents<'a>,
writer: W,
) -> Result<()>
where
W: tokio::io::AsyncWrite + tokio::io::AsyncWriteExt + Unpin + Send + 'static,
{
let mut builder = tokio_tar::Builder::new(writer);

builder
.append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
.await?;

let blobdir = BlobDirContents::new(context).await?;
let mut last_progress = 0;
let mut last_progress = 10;

for (i, blob) in blobdir.iter().enumerate() {
let mut file = File::open(blob.to_abs_path()).await?;
let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
builder.append_file(path_in_archive, &mut file).await?;
let progress = 1000 * i / blobdir.len();
if progress != last_progress && progress > 10 && progress < 1000 {
let progress = std::cmp::min(1000 * i / blobdir.len(), 999);
if progress > last_progress {
context.emit_event(EventType::ImexProgress(progress));
last_progress = progress;
}
Expand Down
Loading

0 comments on commit d6d90db

Please sign in to comment.