Skip to content

Commit

Permalink
Merge pull request #22 from eZioPan/discontinuous-field
Browse files Browse the repository at this point in the history
Support discontinuous field
  • Loading branch information
Dirbaio authored Feb 15, 2024
2 parents 87298cb + e71c1c5 commit 689341a
Show file tree
Hide file tree
Showing 7 changed files with 243 additions and 52 deletions.
130 changes: 97 additions & 33 deletions src/generate/fieldset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res
_ => panic!("Invalid bit_size {}", fs.bit_size),
};

for f in sorted(&fs.fields, |f| (f.bit_offset, f.name.clone())) {
for f in sorted(&fs.fields, |f| (f.bit_offset.clone(), f.name.clone())) {
let name = Ident::new(&f.name, span);
let name_set = Ident::new(&format!("set_{}", f.name), span);
let bit_offset = f.bit_offset as usize;
let off_in_reg = f.bit_offset.clone();
let _bit_size = f.bit_size as usize;
let mask = util::hex(1u64.wrapping_shl(f.bit_size).wrapping_sub(1));
let doc = util::doc(&f.description);
Expand Down Expand Up @@ -64,40 +64,104 @@ pub fn render(_opts: &super::Options, ir: &IR, fs: &FieldSet, path: &str) -> Res
}
}

if let Some(array) = &f.array {
let (len, offs_expr) = super::process_array(array);
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self, n: usize) -> #field_ty{
assert!(n < #len);
let offs = #bit_offset + #offs_expr;
let val = (self.0 >> offs) & #mask;
#from_bits
match off_in_reg {
BitOffset::Regular(off_in_reg) => {
let off_in_reg = off_in_reg as usize;
if let Some(array) = &f.array {
let (len, offs_expr) = super::process_array(array);
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self, n: usize) -> #field_ty{
assert!(n < #len);
let offs = #off_in_reg + #offs_expr;
let val = (self.0 >> offs) & #mask;
#from_bits
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, n: usize, val: #field_ty) {
assert!(n < #len);
let offs = #off_in_reg + #offs_expr;
self.0 = (self.0 & !(#mask << offs)) | (((#to_bits) & #mask) << offs);
}
));
} else {
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self) -> #field_ty{
let val = (self.0 >> #off_in_reg) & #mask;
#from_bits
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, val: #field_ty) {
self.0 = (self.0 & !(#mask << #off_in_reg)) | (((#to_bits) & #mask) << #off_in_reg);
}
));
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, n: usize, val: #field_ty) {
assert!(n < #len);
let offs = #bit_offset + #offs_expr;
self.0 = (self.0 & !(#mask << offs)) | (((#to_bits) & #mask) << offs);
}
));
} else {
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self) -> #field_ty{
let val = (self.0 >> #bit_offset) & #mask;
#from_bits
}
BitOffset::Cursed(ranges) => {
// offset of "range"s inside register
let mut off_in_reg: Vec<usize> = Vec::new();
let mut mask: Vec<TokenStream> = Vec::new();
// offset to shift "range" value to final value
// preload first offset as 0,
// since we order "range" from less to larger, first offset-in-value should always be 0.
let mut off_in_val: Vec<usize> = vec![0];
for (index, range) in ranges.iter().enumerate() {
off_in_reg.push(*range.start() as usize);
mask.push(util::hex(
1u64.wrapping_shl(range.end() - range.start() + 1)
.wrapping_sub(1),
));

// prepare next "range" offset-in-value value
if index < ranges.len() - 1 {
off_in_val
.push(off_in_val[index] + ((range.end() - range.start()) as usize + 1))
}
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, val: #field_ty) {
self.0 = (self.0 & !(#mask << #bit_offset)) | (((#to_bits) & #mask) << #bit_offset);

if let Some(array) = &f.array {
let (len, offs_expr) = super::process_array(array);
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self, n: usize) -> #field_ty{
assert!(n < #len);
let mut val = 0;
#( let offs = #off_in_reg + #offs_expr;
val += (((self.0 >> offs) & #mask) << #off_in_val); )*;
#from_bits
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, n: usize, val: #field_ty) {
assert!(n < #len);
#( let offs = #off_in_reg + #offs_expr;
self.0 = (self.0 & !(#mask << offs)) | (((#to_bits >> #off_in_val) & #mask) << offs); )*;
}
));
} else {
items.extend(quote!(
#doc
#[inline(always)]
pub const fn #name(&self) -> #field_ty{
let mut val = 0;
#( val += (((self.0 >> #off_in_reg) & #mask) << #off_in_val); )*;
#from_bits
}
#doc
#[inline(always)]
pub fn #name_set(&mut self, val: #field_ty) {
#( self.0 = (self.0 & !(#mask << #off_in_reg)) | (((#to_bits >> #off_in_val) & #mask) << #off_in_reg); )*;
}
))
}
));
}
}
};
}

let (_, name) = super::split_path(path);
Expand Down
57 changes: 55 additions & 2 deletions src/ir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use de::MapAccess;
use serde::{de, de::Visitor, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use std::collections::{BTreeMap, HashMap};
use std::fmt;
use std::ops::RangeInclusive;

#[derive(Default, Clone, Debug, PartialEq)]
pub struct IR {
Expand Down Expand Up @@ -142,13 +143,65 @@ pub struct FieldSet {
pub fields: Vec<Field>,
}

#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Eq)]
#[serde(untagged)]
pub enum BitOffset {
Regular(u32),
// This vector assume all RangeInclusive is non-overlapped and sorted.
// It should be checked when parse source files.
Cursed(Vec<RangeInclusive<u32>>),
}

impl BitOffset {
pub(crate) fn min_offset(&self) -> u32 {
match self {
BitOffset::Regular(offset) => *offset,
BitOffset::Cursed(ranges) => *ranges[0].start(),
}
}

pub(crate) fn max_offset(&self) -> u32 {
match self {
BitOffset::Regular(offset) => *offset,
BitOffset::Cursed(ranges) => *ranges[ranges.len() - 1].end(),
}
}

pub(crate) fn into_ranges(self, bit_size: u32) -> Vec<RangeInclusive<u32>> {
match self {
BitOffset::Regular(offset) => vec![offset..=offset + bit_size - 1],
BitOffset::Cursed(ranges) => ranges,
}
}
}

// Custom bit offset ordering:
// 1. Compare min offset: less is less, greater is greater. If min offset is equal,
// 2. Compare max offset: less is less, greater is greater, equal is equal
impl Ord for BitOffset {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
use std::cmp::Ordering;

let min_order = self.min_offset().cmp(&other.min_offset());
match min_order {
Ordering::Equal => self.max_offset().cmp(&other.max_offset()),
min_order => min_order,
}
}
}

impl PartialOrd for BitOffset {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}

#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Field {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,

pub bit_offset: u32,
pub bit_offset: BitOffset,
pub bit_size: u32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub array: Option<Array>,
Expand Down
2 changes: 1 addition & 1 deletion src/svd2ir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ pub fn convert_peripheral(ir: &mut IR, p: &svd::Peripheral) -> anyhow::Result<()
let mut field = Field {
name: f.name.clone(),
description: f.description.clone(),
bit_offset: f.bit_range.offset,
bit_offset: BitOffset::Regular(f.bit_range.offset),
bit_size: f.bit_range.width,
array: None,
enumm: None,
Expand Down
5 changes: 4 additions & 1 deletion src/transform/delete_fieldsets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,13 @@ impl DeleteFieldsets {
}
}

// Fieldset is useless when
// 1. it has no Fields, or
// 2. it has one Fields, which occupied entire Fieldset, and without a enum
fn is_useless(fs: &FieldSet) -> bool {
match &fs.fields[..] {
[] => true,
[f] => fs.bit_size == f.bit_size && f.bit_offset == 0 && f.enumm.is_none(),
[f] => fs.bit_size == f.bit_size && f.bit_offset.min_offset() == 0 && f.enumm.is_none(),
_ => false,
}
}
Expand Down
23 changes: 20 additions & 3 deletions src/transform/make_field_array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,32 @@ impl MakeFieldArray {
}

// todo check they're mergeable

// one array shouldn't contain both regular and cursed bit_offset type
{
let has_regular_bit_offset = items
.iter()
.any(|i| matches!(i.bit_offset, BitOffset::Regular(_)));

let has_cursed_bit_offset = items
.iter()
.any(|i| matches!(i.bit_offset, BitOffset::Cursed(_)));

if has_regular_bit_offset && has_cursed_bit_offset {
panic!("arrayize: items cannot mix bit_offset type")
}
}

// todo check they're not arrays (arrays of arrays not supported)

// Sort by offs
items.sort_by_key(|i| i.bit_offset);
items.sort_by_key(|i| &i.bit_offset);
for i in &items {
info!(" {}", i.name);
}

let (offset, array) = calc_array(items.iter().map(|x| x.bit_offset).collect());
let (offset, array) =
calc_array(items.iter().map(|x| x.bit_offset.min_offset()).collect());
if let Array::Cursed(_) = &array {
if !self.allow_cursed {
panic!("arrayize: items are not evenly spaced. Set `allow_cursed: true` to allow this.")
Expand All @@ -53,7 +70,7 @@ impl MakeFieldArray {
// Create the new array item
item.name = to;
item.array = Some(array);
item.bit_offset = offset;
item.bit_offset = BitOffset::Regular(offset);
b.fields.push(item);
}
}
Expand Down
3 changes: 2 additions & 1 deletion src/transform/sort.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ impl Sort {
z.items.sort_by_key(|i| (i.byte_offset, i.name.clone()))
}
for z in ir.fieldsets.values_mut() {
z.fields.sort_by_key(|i| (i.bit_offset, i.name.clone()))
z.fields
.sort_by_key(|i| (i.bit_offset.clone(), i.name.clone()))
}
for z in ir.enums.values_mut() {
z.variants.sort_by_key(|i| (i.value, i.name.clone()))
Expand Down
Loading

0 comments on commit 689341a

Please sign in to comment.