Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add verity digests for exported block device #1176

Merged
merged 2 commits into from
Mar 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 68 additions & 7 deletions service/src/block_device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use std::cmp::{max, min};
use std::fs::OpenOptions;
use std::io::Result;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use std::thread;
use std::thread::JoinHandle;

Expand All @@ -25,6 +25,9 @@ use nydus_rafs::metadata::layout::v6::{
EROFS_BLOCK_BITS_12, EROFS_BLOCK_BITS_9, EROFS_BLOCK_SIZE_4096, EROFS_BLOCK_SIZE_512,
};
use nydus_storage::utils::alloc_buf;
use nydus_utils::digest::{self, RafsDigest};
use nydus_utils::round_up;
use nydus_utils::verity::VerityGenerator;
use tokio_uring::buf::IoBufMut;

use crate::blob_cache::{generate_blob_key, BlobCacheMgr, BlobConfig, DataBlob, MetaBlob};
Expand Down Expand Up @@ -287,6 +290,7 @@ impl BlockDevice {
output: Option<String>,
localfs_dir: Option<String>,
threads: u32,
verity: bool,
) -> Result<()> {
let cache_mgr = Arc::new(BlobCacheMgr::new());
cache_mgr.add_blob_entry(&blob_entry).map_err(|e| {
Expand All @@ -303,6 +307,7 @@ impl BlockDevice {
))
})?;
let block_device = Arc::new(block_device);
let blocks = block_device.blocks();

let path = match output {
Some(v) => PathBuf::from(v),
Expand Down Expand Up @@ -353,7 +358,27 @@ impl BlockDevice {
})?;
let output_file = Arc::new(tokio_uring::fs::File::from_std(output_file));

let blocks = block_device.blocks();
let mut verity_offset = 0;
let generator = if verity {
let file = OpenOptions::new()
.read(true)
.write(true)
.open(&path)
.map_err(|e| {
eother!(format!(
"block_device: failed to create output file {}, {}",
path.display(),
e
))
})?;
verity_offset = round_up(block_device.blocks_to_size(blocks), 4096);
let mut generator = VerityGenerator::new(file, verity_offset, blocks)?;
generator.initialize()?;
Some(Arc::new(Mutex::new(generator)))
} else {
None
};

let batch_size = BLOCK_DEVICE_EXPORT_BATCH_SIZE as u32 / block_device.block_size() as u32;
assert_eq!(batch_size.count_ones(), 1);
let threads = max(threads, 1);
Expand All @@ -363,8 +388,10 @@ impl BlockDevice {
}

if threads == 1 {
let generator = generator.clone();
let block_device = block_device.clone();
tokio_uring::start(async move {
Self::do_export(block_device.clone(), output_file, 0, block_device.blocks()).await
Self::do_export(block_device, output_file, 0, blocks, generator).await
})?;
} else {
let mut thread_handlers: Vec<JoinHandle<Result<()>>> =
Expand All @@ -377,6 +404,7 @@ impl BlockDevice {
let mgr = cache_mgr.clone();
let id = blob_id.clone();
let path = path.to_path_buf();
let generator = generator.clone();

let handler = thread::spawn(move || {
let output_file = OpenOptions::new()
Expand All @@ -399,9 +427,9 @@ impl BlockDevice {
})?;
let device = Arc::new(block_device);

tokio_uring::start(
async move { Self::do_export(device, file, pos, count).await },
)?;
tokio_uring::start(async move {
Self::do_export(device, file, pos, count, generator).await
})?;
Ok(())
});
pos += count;
Expand All @@ -424,6 +452,21 @@ impl BlockDevice {
})?;
}
}

if let Some(generator) = generator.as_ref() {
let mut guard = generator.lock().unwrap();
let root_digest = guard.generate_all_digests()?;
let root_digest: String = root_digest
.data
.iter()
.map(|v| format!("{:02x}", v))
.collect();
println!(
"dm-verity options: --no-superblock --format=1 -s \"\" --hash=sha256 --data-block-size={} --hash-block-size=4096 --data-blocks {} --hash-offset {} {}",
block_device.block_size(), blocks, verity_offset, root_digest
);
}

Ok(())
}

Expand All @@ -432,16 +475,18 @@ impl BlockDevice {
output_file: Arc<tokio_uring::fs::File>,
start: u32,
mut blocks: u32,
generator: Option<Arc<Mutex<VerityGenerator>>>,
) -> Result<()> {
let batch_size = BLOCK_DEVICE_EXPORT_BATCH_SIZE as u32 / block_device.block_size() as u32;
let block_size = block_device.block_size() as usize;
let mut pos = start;
let mut buf = alloc_buf(BLOCK_DEVICE_EXPORT_BATCH_SIZE);

while blocks > 0 {
let count = min(batch_size, blocks);
let (res, buf1) = block_device.async_read(pos, count, buf).await;
let sz = res?;
if sz != count as usize * block_device.block_size() as usize {
if sz != count as usize * block_size {
return Err(eio!(
"block_device: failed to read data, got less data than requested"
));
Expand All @@ -462,6 +507,22 @@ impl BlockDevice {
}
buf = buf2;

// Generate Merkle tree leaf nodes.
if let Some(generator) = generator.as_ref() {
let mut page_idx = (block_device.blocks_to_size(pos) / block_size as u64) as u32;
let mut offset = 0;
while offset < buf.len() {
let digest = RafsDigest::from_buf(
&buf[offset..offset + block_size],
digest::Algorithm::Sha256,
);
let mut guard = generator.lock().unwrap();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to put generator.lock.unwrap() out of the while loop?

guard.set_digest(1, page_idx, &digest.data)?;
offset += block_size;
page_idx += 1;
}
}

pos += count;
blocks -= count;
}
Expand Down
11 changes: 10 additions & 1 deletion src/bin/nydus-image/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,14 @@ fn prepare_cmd_args(bti_string: &'static str) -> App {
.help("File path for saving the exported content")
.required_unless_present("localfs-dir")
)
.arg(
Arg::new("verity")
.long("verity")
.help("Generate dm-verity data for block device")
.action(ArgAction::SetTrue)
.required(false)
.requires("block")
)
);

let app = app.subcommand(
Expand Down Expand Up @@ -1558,8 +1566,9 @@ impl Command {
.map(|n| n.parse().unwrap_or(1))
.unwrap_or(1);
let output = subargs.value_of("output").map(|v| v.to_string());
let verity = subargs.is_present("verity");

BlockDevice::export(entry, output, localfs_dir, threads)
BlockDevice::export(entry, output, localfs_dir, threads, verity)
.context("failed to export RAFS filesystem as raw block device image")
}
}
Expand Down
22 changes: 22 additions & 0 deletions utils/src/filemap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,28 @@ impl FileMapState {
Ok(unsafe { std::slice::from_raw_parts(start as *const T, count) })
}

/// Get a mutable slice of 'T' at 'offset' with 'count' entries.
pub fn get_slice_mut<T>(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
let start = self.base.wrapping_add(offset);
if count.checked_mul(size_of::<T>()).is_none() {
bail_einval!("count 0x{count:x} to validate_slice() is too big");
}
let size = count * size_of::<T>();
if size.checked_add(start as usize).is_none() {
bail_einval!(
"invalid parameter to validate_slice(), offset 0x{offset:x}, count 0x{count:x}"
);
}
let end = start.wrapping_add(size);
if start > end || start < self.base || end < self.base || end > self.end {
bail_einval!(
"invalid range in validate_slice, base 0x{:p}, start 0x{start:p}, end 0x{end:p}",
self.base
);
}
Ok(unsafe { std::slice::from_raw_parts_mut(start as *mut T, count) })
}

/// Check whether the range [offset, offset + size) is valid and return the start address.
pub fn validate_range(&self, offset: usize, size: usize) -> Result<*const u8> {
let start = self.base.wrapping_add(offset);
Expand Down
8 changes: 8 additions & 0 deletions utils/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ pub mod mpmc;
pub mod reader;
pub mod trace;
pub mod types;
pub mod verity;

/// Round up and divide the value `n` by `d`.
pub fn div_round_up(n: u64, d: u64) -> u64 {
Expand All @@ -48,6 +49,13 @@ pub fn round_up(n: u64, d: u64) -> u64 {
(n + d - 1) / d * d
}

/// Round up the value `n` to by `d`.
pub fn round_up_usize(n: usize, d: usize) -> usize {
debug_assert!(d != 0);
debug_assert!(d.is_power_of_two());
(n + d - 1) / d * d
}

/// Overflow can fail this rounder if the base value is large enough with 4095 added.
pub fn try_round_up_4k<U: TryFrom<u64>, T: Into<u64>>(x: T) -> Option<U> {
let t = 4095u64;
Expand Down
Loading