diff --git a/Cargo.toml b/Cargo.toml index 3b1224d..5a9dc27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "camera-intrinsic-calibration" -version = "0.1.4" +version = "0.1.5" edition = "2021" authors = ["Powei Lin "] readme = "README.md" @@ -28,6 +28,8 @@ rand = "0.8.5" rand_chacha = "0.3.1" rayon = "1.10.0" rerun = "0.17.0" +serde = { version = "1.0.215", features = ["derive"] } +serde_json = "1.0.133" sqpnp_simple = "0.1.3" tiny-solver = "0.10.0" diff --git a/README.md b/README.md index e7a1bff..3142760 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,14 @@ ccrs dataset-calib-cam1_1024_16 --model eucm    │   └── {time_stamp}.png    └── data.csv ``` +* General `--dataset-format general` + ``` + dataset_root + └── cam0 +    ├── any_file_name.png +    ├── any_file_name.png +    └── any_file_name.png + ``` ### Camera models * Extended Unified (EUCM) * Extended Unified with Tangential (EUCMT) diff --git a/data/board_config.json b/data/board_config.json new file mode 100644 index 0000000..1c8e333 --- /dev/null +++ b/data/board_config.json @@ -0,0 +1,6 @@ +{ + "tag_size_meter": 0.088, + "tag_spacing": 0.3, + "tag_rows": 5, + "tag_cols": 9 +} \ No newline at end of file diff --git a/examples/convert_model.rs b/examples/convert_model.rs index 04a01ed..348c9c2 100644 --- a/examples/convert_model.rs +++ b/examples/convert_model.rs @@ -21,7 +21,7 @@ fn main() { source_model.width().round() as u32, source_model.height().round() as u32, )); - convert_model(&source_model, &mut target_model); + convert_model(&source_model, &mut target_model, 0); model_to_json("ucm.json", &target_model); let new_w_h = 1024; let p = target_model.estimate_new_camera_matrix_for_undistort(1.0, Some((new_w_h, new_w_h))); diff --git a/src/bin/camera_calibration.rs b/src/bin/camera_calibration.rs index 9692ee7..7cb3ae5 100644 --- a/src/bin/camera_calibration.rs +++ b/src/bin/camera_calibration.rs @@ -1,16 +1,25 @@ use aprilgrid::detector::TagDetector; use aprilgrid::TagFamily; -use camera_intrinsic_calibration::board::create_default_6x6_board; -use camera_intrinsic_calibration::data_loader::load_euroc; +use camera_intrinsic_calibration::board::Board; +use camera_intrinsic_calibration::board::{ + board_config_from_json, board_config_to_json, BoardConfig, +}; +use camera_intrinsic_calibration::data_loader::{load_euroc, load_general}; use camera_intrinsic_calibration::optimization::*; use camera_intrinsic_calibration::types::RvecTvec; use camera_intrinsic_calibration::util::*; use camera_intrinsic_calibration::visualization::*; use camera_intrinsic_model::*; -use clap::Parser; +use clap::{Parser, ValueEnum}; use log::trace; use std::time::Instant; +#[derive(Debug, Clone, Copy, ValueEnum)] +enum DatasetFormat { + Euroc, + General, +} + #[derive(Parser)] #[command(version, about, author)] struct CCRSCli { @@ -34,49 +43,84 @@ struct CCRSCli { #[arg(long, default_value_t = 600)] max_images: usize, + #[arg(long, default_value_t = 1)] + cam_num: usize, + + #[arg(long)] + board_config: Option, + #[arg(short, long, default_value = "output.json")] output_json: String, + + #[arg(long, value_enum, default_value = "euroc")] + dataset_format: DatasetFormat, + + #[arg(long, action)] + one_focal: bool, + + #[arg(long, default_value_t = 0)] + disabled_distortion_num: usize, } fn main() { env_logger::init(); let cli = CCRSCli::parse(); let detector = TagDetector::new(&cli.tag_family, None); - let board = create_default_6x6_board(); + let board = if let Some(board_config_path) = cli.board_config { + Board::from_config(&board_config_from_json(&board_config_path)) + } else { + let config = BoardConfig::default(); + board_config_to_json("default_board_config.json", &config); + Board::from_config(&config) + }; let dataset_root = &cli.path; let now = Instant::now(); let recording = rerun::RecordingStreamBuilder::new("calibration") .save("output.rrd") .unwrap(); trace!("Start loading data"); - let mut detected_feature_frames = load_euroc( - dataset_root, - &detector, - &board, - cli.start_idx, - cli.step, - Some(&recording), - ); - detected_feature_frames.truncate(cli.max_images); + let mut cams_detected_feature_frames = match cli.dataset_format { + DatasetFormat::Euroc => load_euroc( + dataset_root, + &detector, + &board, + cli.start_idx, + cli.step, + cli.cam_num, + Some(&recording), + ), + DatasetFormat::General => load_general( + dataset_root, + &detector, + &board, + cli.start_idx, + cli.step, + cli.cam_num, + Some(&recording), + ), + }; let duration_sec = now.elapsed().as_secs_f64(); println!("detecting feature took {:.6} sec", duration_sec); + println!("total: {} images", cams_detected_feature_frames[0].len()); + cams_detected_feature_frames[0].truncate(cli.max_images); println!( "avg: {} sec", - duration_sec / detected_feature_frames.len() as f64 + duration_sec / cams_detected_feature_frames[0].len() as f64 ); - log_frames(&recording, &detected_feature_frames); - let (frame0, frame1) = find_best_two_frames(&detected_feature_frames); - let key_frames = vec![ - detected_feature_frames[frame0].clone(), - detected_feature_frames[frame1].clone(), - ]; - log_frames(&recording, &key_frames); + for (cam_idx, feature_frames) in cams_detected_feature_frames.iter().enumerate() { + let topic = format!("/cam{}", cam_idx); + log_feature_frames(&recording, &topic, feature_frames); + } + let (frame0, frame1) = find_best_two_frames(&cams_detected_feature_frames[0]); + + let frame_feature0 = &cams_detected_feature_frames[0][frame0].clone().unwrap(); + let frame_feature1 = &cams_detected_feature_frames[0][frame1].clone().unwrap(); + + let key_frames = vec![Some(frame_feature0.clone()), Some(frame_feature1.clone())]; + log_feature_frames(&recording, "/cam0/key", &key_frames); // initialize focal length and undistorted p2d for init poses - let (lambda, h_mat) = radial_distortion_homography( - &detected_feature_frames[frame0], - &detected_feature_frames[frame1], - ); + let (lambda, h_mat) = radial_distortion_homography(frame_feature0, frame_feature1); // focal let f_option = homography_to_focal(&h_mat); if f_option.is_none() { @@ -86,8 +130,6 @@ fn main() { println!("focal {}", focal); // poses - let frame_feature0 = &detected_feature_frames[frame0]; - let frame_feature1 = &detected_feature_frames[frame1]; let (rvec0, tvec0) = rtvec_to_na_dvec(init_pose(frame_feature0, lambda)); let (rvec1, tvec1) = rtvec_to_na_dvec(init_pose(frame_feature1, lambda)); let rtvec0 = RvecTvec::new(rvec0, tvec0); @@ -107,16 +149,25 @@ fn main() { init_f, init_alpha, ); + println!("Initialized {:?}", initial_camera); let mut final_model = cli.model; final_model.set_w_h( initial_camera.width().round() as u32, initial_camera.height().round() as u32, ); - println!("{:?}", final_model); - convert_model(&initial_camera, &mut final_model); - println!("{:?}", final_model); + convert_model( + &initial_camera, + &mut final_model, + cli.disabled_distortion_num, + ); + println!("Converted {:?}", final_model); - let (final_result, _rtvec_list) = calib_camera(&detected_feature_frames, &final_model); - println!("{:?}", final_result); + let (final_result, _rtvec_list) = calib_camera( + &cams_detected_feature_frames[0], + &final_model, + cli.one_focal, + cli.disabled_distortion_num, + ); + println!("Final {:?}", final_result); model_to_json(&cli.output_json, &final_result); } diff --git a/src/board.rs b/src/board.rs index 631d859..346fecf 100644 --- a/src/board.rs +++ b/src/board.rs @@ -1,11 +1,51 @@ use glam; -use std::collections::HashMap; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, io::Write}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct BoardConfig { + tag_size_meter: f32, + tag_spacing: f32, + tag_rows: usize, + tag_cols: usize, +} + +pub fn board_config_to_json(output_path: &str, board_config: &BoardConfig) { + let j = serde_json::to_string_pretty(board_config).unwrap(); + let mut file = std::fs::File::create(output_path).unwrap(); + file.write_all(j.as_bytes()).unwrap(); +} + +pub fn board_config_from_json(file_path: &str) -> BoardConfig { + let contents = + std::fs::read_to_string(file_path).expect("Should have been able to read the file"); + serde_json::from_str(&contents).unwrap() +} + +impl Default for BoardConfig { + fn default() -> Self { + Self { + tag_size_meter: 0.088, + tag_spacing: 0.3, + tag_rows: 6, + tag_cols: 6, + } + } +} pub struct Board { pub id_to_3d: HashMap, } impl Board { + pub fn from_config(board_config: &BoardConfig) -> Board { + Self::init_aprilgrid( + board_config.tag_size_meter, + board_config.tag_spacing, + board_config.tag_rows, + board_config.tag_cols, + ) + } pub fn init_aprilgrid( tag_size_meter: f32, tag_spacing: f32, diff --git a/src/data_loader.rs b/src/data_loader.rs index e7e54a0..824f520 100644 --- a/src/data_loader.rs +++ b/src/data_loader.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; use std::path::Path; -use crate::board; +use crate::board::{self, Board}; use crate::detected_points::{FeaturePoint, FrameFeature}; use crate::visualization::log_image_as_compressed; use aprilgrid::detector::TagDetector; use glam::Vec2; use glob::glob; -use image::ImageReader; +use image::{DynamicImage, ImageReader}; use rayon::prelude::*; fn path_to_timestamp(path: &Path) -> i64 { @@ -21,54 +21,108 @@ fn path_to_timestamp(path: &Path) -> i64 { time_ns } +fn image_to_option_feature_frame( + tag_detector: &TagDetector, + img: &DynamicImage, + board: &Board, + min_corners: usize, + time_ns: i64, +) -> Option { + let detected_tag = tag_detector.detect(img); + let tags_expand_ids: HashMap = detected_tag + .iter() + .flat_map(|(k, v)| { + v.iter() + .enumerate() + .filter_map(|(i, p)| { + let id = k * 4 + i as u32; + if let Some(p3d) = board.id_to_3d.get(&id) { + let p2d = Vec2::new(p.0, p.1); + Some((id, FeaturePoint { p2d, p3d: *p3d })) + } else { + None + } + }) + .collect::>() + }) + .collect(); + if tags_expand_ids.len() < min_corners { + None + } else { + Some(FrameFeature { + time_ns, + img_w_h: (img.width(), img.height()), + features: tags_expand_ids, + }) + } +} + pub fn load_euroc( root_folder: &str, tag_detector: &TagDetector, board: &board::Board, start_idx: usize, step: usize, + cam_num: usize, recording_option: Option<&rerun::RecordingStream>, -) -> Vec { - let img_paths = glob(format!("{}/mav0/cam0/data/*.png", root_folder).as_str()).expect("failed"); - img_paths - .skip(start_idx) - .step_by(step) - .par_bridge() - .filter_map(|path| { - let path = path.unwrap(); - let time_ns = path_to_timestamp(&path); - let img = ImageReader::open(&path).unwrap().decode().unwrap(); - if let Some(recording) = recording_option { - recording.set_time_nanos("stable", time_ns); - log_image_as_compressed(recording, "/cam0", &img, image::ImageFormat::Jpeg); - }; - let detected_tag = tag_detector.detect(&img); - let tags_expand_ids: HashMap = detected_tag - .iter() - .flat_map(|(k, v)| { - v.iter() - .enumerate() - .filter_map(|(i, p)| { - let id = k * 4 + i as u32; - if let Some(p3d) = board.id_to_3d.get(&id) { - let p2d = Vec2::new(p.0, p.1); - Some((id, FeaturePoint { p2d, p3d: *p3d })) - } else { - None - } - }) - .collect::>() +) -> Vec>> { + const MIN_CORNERS: usize = 24; + (0..cam_num) + .map(|cam_idx| { + let img_paths = + glob(format!("{}/mav0/cam{}/data/*.png", root_folder, cam_idx).as_str()) + .expect("failed"); + img_paths + .skip(start_idx) + .step_by(step) + .par_bridge() + .map(|path| { + let path = path.unwrap(); + let time_ns = path_to_timestamp(&path); + let img = ImageReader::open(&path).unwrap().decode().unwrap(); + if let Some(recording) = recording_option { + recording.set_time_nanos("stable", time_ns); + let topic = format!("/cam{}", cam_idx); + log_image_as_compressed(recording, &topic, &img, image::ImageFormat::Jpeg); + }; + image_to_option_feature_frame(tag_detector, &img, board, MIN_CORNERS, time_ns) }) - .collect(); - if tags_expand_ids.len() < 24 { - None - } else { - Some(FrameFeature { - time_ns, - img_w_h: (img.width(), img.height()), - features: tags_expand_ids, + .collect() + }) + .collect() +} + +pub fn load_general( + root_folder: &str, + tag_detector: &TagDetector, + board: &board::Board, + start_idx: usize, + step: usize, + cam_num: usize, + recording_option: Option<&rerun::RecordingStream>, +) -> Vec>> { + const MIN_CORNERS: usize = 24; + (0..cam_num) + .map(|cam_idx| { + let img_paths = + glob(format!("{}/cam{}/**/*.png", root_folder, cam_idx).as_str()).expect("failed"); + img_paths + .skip(start_idx) + .step_by(step) + .enumerate() + .par_bridge() + .map(|(idx, path)| { + let path = path.unwrap(); + let time_ns = idx as i64 * 100000000; + let img = ImageReader::open(&path).unwrap().decode().unwrap(); + if let Some(recording) = recording_option { + recording.set_time_nanos("stable", time_ns); + let topic = format!("/cam{}", cam_idx); + log_image_as_compressed(recording, &topic, &img, image::ImageFormat::Jpeg); + }; + image_to_option_feature_frame(tag_detector, &img, board, MIN_CORNERS, time_ns) }) - } + .collect() }) .collect() } diff --git a/src/optimization/factors.rs b/src/optimization/factors.rs index 85d808c..a06dc70 100644 --- a/src/optimization/factors.rs +++ b/src/optimization/factors.rs @@ -127,6 +127,7 @@ pub struct ReprojectionFactor { pub target: GenericModel, pub p3d: na::Point3, pub p2d: na::Vector2, + pub xy_same_focal: bool, } impl ReprojectionFactor { @@ -134,11 +135,17 @@ impl ReprojectionFactor { target: &GenericModel, p3d: &glam::Vec3, p2d: &glam::Vec2, + xy_same_focal: bool, ) -> ReprojectionFactor { let target = target.cast(); let p3d = na::Point3::new(p3d.x, p3d.y, p3d.z).cast(); let p2d = na::Vector2::new(p2d.x, p2d.y).cast(); - ReprojectionFactor { target, p3d, p2d } + ReprojectionFactor { + target, + p3d, + p2d, + xy_same_focal, + } } } impl Factor for ReprojectionFactor { @@ -147,7 +154,11 @@ impl Factor for ReprojectionFactor { params: &[nalgebra::DVector], ) -> nalgebra::DVector { // params[params, rvec, tvec] - let model = self.target.new_from_params(¶ms[0]); + let mut params0 = params[0].clone(); + if self.xy_same_focal { + params0 = params0.clone().insert_row(1, params0[0].clone()); + } + let model = self.target.new_from_params(¶ms0); let rvec = na::Vector3::new( params[1][0].clone(), params[1][1].clone(), diff --git a/src/util.rs b/src/util.rs index 59c75b0..9606889 100644 --- a/src/util.rs +++ b/src/util.rs @@ -23,13 +23,29 @@ pub fn rtvec_to_na_dvec( fn set_problem_parameter_bound( problem: &mut tiny_solver::Problem, generic_camera: &GenericModel, + xy_same_focal: bool, ) { + let shift = if xy_same_focal { 1 } else { 0 }; problem.set_variable_bounds("params", 0, 0.0, 10000.0); - problem.set_variable_bounds("params", 1, 0.0, 10000.0); - problem.set_variable_bounds("params", 2, 0.0, generic_camera.width()); - problem.set_variable_bounds("params", 3, 0.0, generic_camera.height()); + problem.set_variable_bounds("params", 1 - shift, 0.0, 10000.0); + problem.set_variable_bounds("params", 2 - shift, 0.0, generic_camera.width()); + problem.set_variable_bounds("params", 3 - shift, 0.0, generic_camera.height()); for (distortion_idx, (lower, upper)) in generic_camera.distortion_params_bound() { - problem.set_variable_bounds("params", distortion_idx, lower, upper); + problem.set_variable_bounds("params", distortion_idx - shift, lower, upper); + } +} +fn set_problem_parameter_disabled( + problem: &mut tiny_solver::Problem, + init_values: &mut HashMap>, + generic_camera: &GenericModel, + xy_same_focal: bool, + disabled_distortions: usize, +) { + let shift = if xy_same_focal { 1 } else { 0 }; + for i in 0..disabled_distortions { + let distortion_idx = generic_camera.params().len() - 1 - shift - i; + problem.fix_variable("params", distortion_idx); + init_values.get_mut("params").unwrap()[distortion_idx] = 0.0; } } @@ -60,28 +76,27 @@ fn vec2_distance2(v0: &glam::Vec2, v1: &glam::Vec2) -> f32 { v.x * v.x + v.y * v.y } -pub fn find_best_two_frames(detected_feature_frames: &[FrameFeature]) -> (usize, usize) { +pub fn find_best_two_frames(detected_feature_frames: &[Option]) -> (usize, usize) { let mut max_detection = 0; let mut max_detection_idxs = Vec::new(); for (i, f) in detected_feature_frames.iter().enumerate() { - match f.features.len().cmp(&max_detection) { - Ordering::Greater => { - max_detection = f.features.len(); - max_detection_idxs = vec![i]; - } - Ordering::Less => {} - Ordering::Equal => { - max_detection_idxs.push(i); + if let Some(f) = f { + match f.features.len().cmp(&max_detection) { + Ordering::Greater => { + max_detection = f.features.len(); + max_detection_idxs = vec![i]; + } + Ordering::Less => {} + Ordering::Equal => { + max_detection_idxs.push(i); + } } } - // if f.features.len() > max_detection { - // } else if f.features.len() == max_detection { - // } } let mut v0: Vec<_> = max_detection_idxs .iter() - .map(|i| { - let p_avg = features_avg_center(&detected_feature_frames[*i].features); + .map(|&i| { + let p_avg = features_avg_center(&detected_feature_frames[i].clone().unwrap().features); (i, p_avg) }) .collect(); @@ -96,23 +111,27 @@ pub fn find_best_two_frames(detected_feature_frames: &[FrameFeature]) -> (usize, let mut v1: Vec<_> = max_detection_idxs .iter() .map(|&i| { - let area = features_covered_area(&detected_feature_frames[i].features); + let area = features_covered_area(&detected_feature_frames[i].clone().unwrap().features); (i, area) }) .collect(); v1.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap()); // (*v0[0].0, *v0.last().unwrap().0) - (v1.last().unwrap().0, *v0.last().unwrap().0) + (v1.last().unwrap().0, v0.last().unwrap().0) } -pub fn convert_model(source_model: &GenericModel, target_model: &mut GenericModel) { +pub fn convert_model( + source_model: &GenericModel, + target_model: &mut GenericModel, + disabled_distortions: usize, +) { let mut problem = tiny_solver::Problem::new(); let edge_pixels = source_model.width().max(source_model.height()) as u32 / 100; let cost = ModelConvertFactor::new(source_model, target_model, edge_pixels, 3); problem.add_residual_block( cost.residaul_num(), - vec![("x".to_string(), target_model.params().len())], + vec![("params".to_string(), target_model.params().len())], Box::new(cost), Some(Box::new(HuberLoss::new(1.0))), ); @@ -121,20 +140,26 @@ pub fn convert_model(source_model: &GenericModel, target_model: &mut Generi let mut target_params_init = target_model.params(); target_params_init.rows_mut(0, 4).copy_from(&camera_params); - let initial_values = - HashMap::>::from([("x".to_string(), target_params_init)]); + let mut initial_values = + HashMap::>::from([("params".to_string(), target_params_init)]); // initialize optimizer let optimizer = tiny_solver::GaussNewtonOptimizer {}; // distortion parameter bound - set_problem_parameter_bound(&mut problem, target_model); - + set_problem_parameter_bound(&mut problem, target_model, false); + set_problem_parameter_disabled( + &mut problem, + &mut initial_values, + &target_model, + false, + disabled_distortions, + ); // optimize let result = optimizer.optimize(&problem, &initial_values, None); // save result - let result_params = result.get("x").unwrap(); + let result_params = result.get("params").unwrap(); target_model.set_params(result_params); } @@ -198,10 +223,10 @@ pub fn init_ucm( let optimizer = tiny_solver::GaussNewtonOptimizer {}; println!("init ucm init f {}", initial_values.get("params").unwrap()); - println!("init rvec0{}", initial_values.get("rvec0").unwrap()); - println!("init tvec0{}", initial_values.get("tvec0").unwrap()); - println!("init rvec1{}", initial_values.get("rvec1").unwrap()); - println!("init tvec1{}", initial_values.get("tvec1").unwrap()); + // println!("init rvec0{}", initial_values.get("rvec0").unwrap()); + // println!("init tvec0{}", initial_values.get("tvec0").unwrap()); + // println!("init rvec1{}", initial_values.get("rvec1").unwrap()); + // println!("init tvec1{}", initial_values.get("tvec1").unwrap()); // optimize init_focal_alpha_problem.set_variable_bounds("params", 0, init_f / 3.0, init_f * 3.0); @@ -213,10 +238,10 @@ pub fn init_ucm( "params after {:?}\n", second_round_values.get("params").unwrap() ); - println!("after rvec0{}", second_round_values.get("rvec0").unwrap()); - println!("after tvec0{}", second_round_values.get("tvec0").unwrap()); - println!("after rvec1{}", second_round_values.get("rvec1").unwrap()); - println!("after tvec1{}", second_round_values.get("tvec1").unwrap()); + // println!("after rvec0{}", second_round_values.get("rvec0").unwrap()); + // println!("after tvec0{}", second_round_values.get("tvec0").unwrap()); + // println!("after rvec1{}", second_round_values.get("rvec1").unwrap()); + // println!("after tvec1{}", second_round_values.get("tvec1").unwrap()); // panic!("stop"); let focal = second_round_values["params"][0]; @@ -229,74 +254,95 @@ pub fn init_ucm( )); second_round_values.remove("params"); calib_camera( - &[frame_feature0.clone(), frame_feature1.clone()], + &[Some(frame_feature0.clone()), Some(frame_feature1.clone())], &ucm_camera, + true, + 0, ) .0 } pub fn calib_camera( - frame_feature_list: &[FrameFeature], + frame_feature_list: &[Option], generic_camera: &GenericModel, + xy_same_focal: bool, + disabled_distortions: usize, ) -> (GenericModel, Vec) { - let params = generic_camera.params(); + let mut params = generic_camera.params(); + if xy_same_focal { + // remove fy + params = params.remove_row(1); + }; let params_len = params.len(); - let mut problem = tiny_solver::Problem::new(); let mut initial_values = HashMap::>::from([("params".to_string(), params)]); debug!("init {:?}", initial_values); + let mut problem = tiny_solver::Problem::new(); let mut valid_indexes = Vec::new(); for (i, frame_feature) in frame_feature_list.iter().enumerate() { - let mut p3ds = Vec::new(); - let mut p2ds = Vec::new(); - let rvec_name = format!("rvec{}", i); - let tvec_name = format!("tvec{}", i); - for fp in frame_feature.features.values() { - let cost = ReprojectionFactor::new(generic_camera, &fp.p3d, &fp.p2d); - problem.add_residual_block( - 2, - vec![ - ("params".to_string(), params_len), - (rvec_name.clone(), 3), - (tvec_name.clone(), 3), - ], - Box::new(cost), - Some(Box::new(HuberLoss::new(1.0))), - ); - p3ds.push(fp.p3d); - p2ds.push(na::Vector2::new(fp.p2d.x as f64, fp.p2d.y as f64)); + if let Some(frame_feature) = frame_feature { + let mut p3ds = Vec::new(); + let mut p2ds = Vec::new(); + let rvec_name = format!("rvec{}", i); + let tvec_name = format!("tvec{}", i); + for fp in frame_feature.features.values() { + let cost = ReprojectionFactor::new(generic_camera, &fp.p3d, &fp.p2d, xy_same_focal); + problem.add_residual_block( + 2, + vec![ + ("params".to_string(), params_len), + (rvec_name.clone(), 3), + (tvec_name.clone(), 3), + ], + Box::new(cost), + Some(Box::new(HuberLoss::new(1.0))), + ); + p3ds.push(fp.p3d); + p2ds.push(na::Vector2::new(fp.p2d.x as f64, fp.p2d.y as f64)); + } + let undistorted = generic_camera.unproject(&p2ds); + let (p3ds, p2ds_z): (Vec<_>, Vec<_>) = undistorted + .iter() + .zip(p3ds) + .filter_map(|(p2, p3)| { + p2.as_ref() + .map(|p2| (p3, glam::Vec2::new(p2.x as f32, p2.y as f32))) + }) + .unzip(); + // if p3ds.len() < 6 { + // println!("skip frame {}", i); + // continue; + // } + valid_indexes.push(i); + let (rvec, tvec) = + rtvec_to_na_dvec(sqpnp_simple::sqpnp_solve_glam(&p3ds, &p2ds_z).unwrap()); + + initial_values.entry(rvec_name).or_insert(rvec); + initial_values.entry(tvec_name).or_insert(tvec); } - let undistorted = generic_camera.unproject(&p2ds); - let (p3ds, p2ds_z): (Vec<_>, Vec<_>) = undistorted - .iter() - .zip(p3ds) - .filter_map(|(p2, p3)| { - p2.as_ref() - .map(|p2| (p3, glam::Vec2::new(p2.x as f32, p2.y as f32))) - }) - .unzip(); - // if p3ds.len() < 6 { - // println!("skip frame {}", i); - // continue; - // } - valid_indexes.push(i); - let (rvec, tvec) = - rtvec_to_na_dvec(sqpnp_simple::sqpnp_solve_glam(&p3ds, &p2ds_z).unwrap()); - - initial_values.entry(rvec_name).or_insert(rvec); - initial_values.entry(tvec_name).or_insert(tvec); } let optimizer = tiny_solver::GaussNewtonOptimizer {}; - let initial_values = optimizer.optimize(&problem, &initial_values, None); - - set_problem_parameter_bound(&mut problem, generic_camera); + // let initial_values = optimizer.optimize(&problem, &initial_values, None); + + set_problem_parameter_bound(&mut problem, generic_camera, xy_same_focal); + set_problem_parameter_disabled( + &mut problem, + &mut initial_values, + &generic_camera, + false, + disabled_distortions, + ); let mut result = optimizer.optimize(&problem, &initial_values, None); - let new_params = result.get("params").unwrap(); + let mut new_params = result.get("params").unwrap().clone(); + if xy_same_focal { + // remove fy + new_params = new_params.clone().insert_row(1, new_params[0]); + }; println!("params {}", new_params); let mut calibrated_camera = *generic_camera; - calibrated_camera.set_params(new_params); + calibrated_camera.set_params(&new_params); let rtvec_vec: Vec<_> = valid_indexes .iter() .map(|&i| { diff --git a/src/visualization.rs b/src/visualization.rs index fa70428..a498385 100644 --- a/src/visualization.rs +++ b/src/visualization.rs @@ -42,24 +42,33 @@ pub fn rerun_shift(p2ds: &[(f32, f32)]) -> Vec<(f32, f32)> { p2ds.iter().map(|(x, y)| (*x + 0.5, *y + 0.5)).collect() } -pub fn log_frames(recording: &RecordingStream, detected_feature_frames: &[FrameFeature]) { +pub fn log_feature_frames( + recording: &RecordingStream, + topic: &str, + detected_feature_frames: &[Option], +) { for f in detected_feature_frames { - let (pts, colors_labels): (Vec<_>, Vec<_>) = f - .features - .iter() - .map(|(id, p)| { - let color = id_to_color(*id as usize); - ( - (p.p2d.x, p.p2d.y), - (color, format!("{:?}", p.p3d).to_string()), - ) - }) - .unzip(); + let ((pts, colors_labels), time_ns): ((Vec<_>, Vec<_>), i64) = if let Some(f) = f { + ( + f.features + .iter() + .map(|(id, p)| { + let color = id_to_color(*id as usize); + ( + (p.p2d.x, p.p2d.y), + (color, format!("{:?}", p.p3d).to_string()), + ) + }) + .unzip(), + f.time_ns, + ) + } else { + continue; + }; let (colors, labels): (Vec<_>, Vec<_>) = colors_labels.iter().cloned().unzip(); let pts = rerun_shift(&pts); - let topic = "/cam0"; - recording.set_time_nanos("stable", f.time_ns); + recording.set_time_nanos("stable", time_ns); recording .log( format!("{}/pts", topic),