Skip to content

Commit

Permalink
correction: support erasures
Browse files Browse the repository at this point in the history
  • Loading branch information
apoelstra committed Sep 30, 2024
1 parent 2e1b7be commit 383f788
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 35 deletions.
151 changes: 121 additions & 30 deletions src/primitives/correction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,11 @@ pub trait CorrectableError {
return None;
}

self.residue_error().map(|e| Corrector { residue: e.residue(), phantom: PhantomData })
self.residue_error().map(|e| Corrector {
erasures: FieldVec::new(),
residue: e.residue(),
phantom: PhantomData,
})
}
}

Expand Down Expand Up @@ -127,12 +131,40 @@ impl CorrectableError for DecodeError {
}

/// An error-correction context.
pub struct Corrector<Ck> {
pub struct Corrector<Ck: Checksum> {
erasures: FieldVec<usize>,
residue: Polynomial<Fe32>,
phantom: PhantomData<Ck>,
}

impl<Ck: Checksum> Corrector<Ck> {
/// A bound on the number of errors and erasures (errors with known location)
/// can be corrected by this corrector.
///
/// Returns N such that, given E errors and X erasures, corection is possible
/// iff 2E + X <= N.
pub fn singleton_bound(&self) -> usize {
// d - 1, where d = [number of consecutive roots] + 2
Ck::ROOT_EXPONENTS.end() - Ck::ROOT_EXPONENTS.start() + 1
}

/// TODO
pub fn add_erasures(&mut self, locs: &[usize]) {
for loc in locs {
// If the user tries to add too many erasures, just ignore them. In
// this case error correction is guaranteed to fail anyway, because
// they will have exceeded the singleton bound. (Otherwise, the
// singleton bound, which is always <= the checksum length, must be
// greater than NO_ALLOC_MAX_LENGTH. So the checksum length must be
// greater than NO_ALLOC_MAX_LENGTH. Then correction will still fail.)
#[cfg(not(feature = "alloc"))]
if self.erasures.len() == NO_ALLOC_MAX_LENGTH {
break;
}
self.erasures.push(*loc);
}
}

/// Returns an iterator over the errors in the string.
///
/// Returns `None` if it can be determined that there are too many errors to be
Expand All @@ -145,29 +177,44 @@ impl<Ck: Checksum> Corrector<Ck> {
/// string may not actually be the intended string.
pub fn bch_errors(&self) -> Option<ErrorIterator<Ck>> {
// 1. Compute all syndromes by evaluating the residue at each power of the generator.
let syndromes: FieldVec<_> = Ck::ROOT_GENERATOR
let syndromes: Polynomial<_> = Ck::ROOT_GENERATOR
.powers_range(Ck::ROOT_EXPONENTS)
.map(|rt| self.residue.evaluate(&rt))
.collect();

// 1a. Compute the "Forney syndrome polynomial" which is the product of the syndrome
// polynomial and the erasure locator. This "erases the erasures" so that B-M
// can find only the errors.
let mut erasure_locator = Polynomial::with_monic_leading_term(&[]); // 1
for loc in &self.erasures {
let factor: Polynomial<_> =
[Ck::CorrectionField::ONE, -Ck::ROOT_GENERATOR.powi(*loc as i64)]
.iter()
.cloned()
.collect(); // alpha^-ix - 1
erasure_locator = erasure_locator.mul_mod_x_d(&factor, usize::MAX);
}
let forney_syndromes = erasure_locator.convolution(&syndromes);

// 2. Use the Berlekamp-Massey algorithm to find the connection polynomial of the
// LFSR that generates these syndromes. For magical reasons this will be equal
// to the error locator polynomial for the syndrome.
let lfsr = LfsrIter::berlekamp_massey(&syndromes[..]);
let lfsr = LfsrIter::berlekamp_massey(&forney_syndromes.as_inner()[..]);
let conn = lfsr.coefficient_polynomial();

// 3. The connection polynomial is the error locator polynomial. Use this to get
// the errors.
let max_correctable_errors =
(Ck::ROOT_EXPONENTS.end() - Ck::ROOT_EXPONENTS.start() + 1) / 2;
if conn.degree() <= max_correctable_errors {
if erasure_locator.degree() + 2 * conn.degree() <= self.singleton_bound() {
// 3a. Compute the "errata locator" which is the product of the error locator
// and the erasure locator. Note that while we used the Forney syndromes
// when calling the BM algorithm, in all other cases we use the ordinary
// unmodified syndromes.
let errata_locator = conn.mul_mod_x_d(&erasure_locator, usize::MAX);
Some(ErrorIterator {
evaluator: conn.mul_mod_x_d(
&Polynomial::from(syndromes),
Ck::ROOT_EXPONENTS.end() - Ck::ROOT_EXPONENTS.start() + 1,
),
locator_derivative: conn.formal_derivative(),
inner: conn.find_nonzero_distinct_roots(Ck::ROOT_GENERATOR),
evaluator: errata_locator.mul_mod_x_d(&syndromes, self.singleton_bound()),
locator_derivative: errata_locator.formal_derivative(),
erasures: &self.erasures[..],
errors: conn.find_nonzero_distinct_roots(Ck::ROOT_GENERATOR),
a: Ck::ROOT_GENERATOR,
c: *Ck::ROOT_EXPONENTS.start(),
})
Expand Down Expand Up @@ -206,32 +253,39 @@ impl<Ck: Checksum> Corrector<Ck> {
/// caller should fix this before attempting error correction. If it is unknown,
/// the caller cannot assume anything about the intended checksum, and should not
/// attempt error correction.
pub struct ErrorIterator<Ck: Checksum> {
pub struct ErrorIterator<'c, Ck: Checksum> {
evaluator: Polynomial<Ck::CorrectionField>,
locator_derivative: Polynomial<Ck::CorrectionField>,
inner: super::polynomial::RootIter<Ck::CorrectionField>,
erasures: &'c [usize],
errors: super::polynomial::RootIter<Ck::CorrectionField>,
a: Ck::CorrectionField,
c: usize,
}

impl<Ck: Checksum> Iterator for ErrorIterator<Ck> {
impl<'c, Ck: Checksum> Iterator for ErrorIterator<'c, Ck> {
type Item = (usize, Fe32);

fn next(&mut self) -> Option<Self::Item> {
// Compute -i, which is the location we will return to the user.
let neg_i = match self.inner.next() {
None => return None,
Some(0) => 0,
Some(x) => Ck::ROOT_GENERATOR.multiplicative_order() - x,
let neg_i = if self.erasures.is_empty() {
match self.errors.next() {
None => return None,
Some(0) => 0,
Some(x) => Ck::ROOT_GENERATOR.multiplicative_order() - x,
}
} else {
let pop = self.erasures[0];
self.erasures = &self.erasures[1..];
pop
};

// Forney's equation, as described in https://en.wikipedia.org/wiki/BCH_code#Forney_algorithm
//
// It is rendered as
//
// a^i evaluator(a^-i)
// e_k = - ---------------------------------
// a^(ci) locator_derivative(a^-i)
// evaluator(a^-i)
// e_k = - -----------------------------------------
// (a^i)^(c - 1)) locator_derivative(a^-i)
//
// where here a is `Ck::ROOT_GENERATOR`, c is the first element of the range
// `Ck::ROOT_EXPONENTS`, and both evalutor and locator_derivative are polynomials
Expand All @@ -240,8 +294,8 @@ impl<Ck: Checksum> Iterator for ErrorIterator<Ck> {
let a_i = self.a.powi(neg_i as i64);
let a_neg_i = a_i.clone().multiplicative_inverse();

let num = self.evaluator.evaluate(&a_neg_i) * &a_i;
let den = a_i.powi(self.c as i64) * self.locator_derivative.evaluate(&a_neg_i);
let num = self.evaluator.evaluate(&a_neg_i);
let den = a_i.powi(self.c as i64 - 1) * self.locator_derivative.evaluate(&a_neg_i);
let ret = -num / den;
match ret.try_into() {
Ok(ret) => Some((neg_i, ret)),
Expand All @@ -263,9 +317,13 @@ mod tests {
match SegwitHrpstring::new(s) {
Ok(_) => panic!("{} successfully, and wrongly, parsed", s),
Err(e) => {
let ctx = e.correction_context::<Bech32>().unwrap();
let mut ctx = e.correction_context::<Bech32>().unwrap();
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((0, Fe32::X)));
assert_eq!(iter.next(), None);

ctx.add_erasures(&[0]);
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((0, Fe32::X)));
assert_eq!(iter.next(), None);
}
Expand All @@ -276,9 +334,13 @@ mod tests {
match SegwitHrpstring::new(s) {
Ok(_) => panic!("{} successfully, and wrongly, parsed", s),
Err(e) => {
let ctx = e.correction_context::<Bech32>().unwrap();
let mut ctx = e.correction_context::<Bech32>().unwrap();
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((6, Fe32::T)));
assert_eq!(iter.next(), None);

ctx.add_erasures(&[6]);
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((6, Fe32::T)));
assert_eq!(iter.next(), None);
}
Expand All @@ -297,13 +359,42 @@ mod tests {
}
}

// Two errors.
let s = "bc1qar0srrr7xfkvy5l643lydnw9re59gtzzwf5mxx";
// Two errors; cannot correct.
let s = "bc1qar0srrr7xfkvy5l64qlydnw9re59gtzzwf5mdx";
match SegwitHrpstring::new(s) {
Ok(_) => panic!("{} successfully, and wrongly, parsed", s),
Err(e) => {
let ctx = e.correction_context::<Bech32>().unwrap();
let mut ctx = e.correction_context::<Bech32>().unwrap();
assert!(ctx.bch_errors().is_none());

// But we can correct it if we inform where an error is.
ctx.add_erasures(&[0]);
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((0, Fe32::X)));
assert_eq!(iter.next(), Some((20, Fe32::_3)));
assert_eq!(iter.next(), None);

ctx.add_erasures(&[20]);
let mut iter = ctx.bch_errors().unwrap();
assert_eq!(iter.next(), Some((0, Fe32::X)));
assert_eq!(iter.next(), Some((20, Fe32::_3)));
assert_eq!(iter.next(), None);
}
}

// In fact, if we know the locations, we can correct up to 3 errors.
let s = "bc1q9r0srrr7xfkvy5l64qlydnw9re59gtzzwf5mdx";
match SegwitHrpstring::new(s) {
Ok(_) => panic!("{} successfully, and wrongly, parsed", s),
Err(e) => {
let mut ctx = e.correction_context::<Bech32>().unwrap();
ctx.add_erasures(&[37, 0, 20]);
let mut iter = ctx.bch_errors().unwrap();

assert_eq!(iter.next(), Some((37, Fe32::C)));
assert_eq!(iter.next(), Some((0, Fe32::X)));
assert_eq!(iter.next(), Some((20, Fe32::_3)));
assert_eq!(iter.next(), None);
}
}
}
Expand Down
33 changes: 28 additions & 5 deletions src/primitives/polynomial.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ pub struct Polynomial<F> {
}

impl<F: Field> PartialEq for Polynomial<F> {
fn eq(&self, other: &Self) -> bool {
self.inner[..self.degree()] == other.inner[..other.degree()]
}
fn eq(&self, other: &Self) -> bool { self.coefficients() == other.coefficients() }
}

impl<F: Field> Eq for Polynomial<F> {}
Expand Down Expand Up @@ -58,9 +56,16 @@ impl<F: Field> Polynomial<F> {
debug_assert_ne!(self.inner.len(), 0, "polynomials never have no terms");
let degree_without_leading_zeros = self.inner.len() - 1;
let leading_zeros = self.inner.iter().rev().take_while(|el| **el == F::ZERO).count();
degree_without_leading_zeros - leading_zeros
degree_without_leading_zeros.saturating_sub(leading_zeros)
}

/// Accessor for the coefficients of the polynomial, in "little endian" order.
///
/// # Panics
///
/// Panics if [`Self::has_data`] is false.
pub fn coefficients(&self) -> &[F] { &self.inner[..self.degree() + 1] }

/// An iterator over the coefficients of the polynomial.
///
/// Yields value in "little endian" order; that is, the constant term is returned first.
Expand All @@ -70,7 +75,7 @@ impl<F: Field> Polynomial<F> {
/// Panics if [`Self::has_data`] is false.
pub fn iter(&self) -> slice::Iter<F> {
self.assert_has_data();
self.inner[..self.degree() + 1].iter()
self.coefficients().iter()
}

/// The leading term of the polynomial.
Expand Down Expand Up @@ -143,6 +148,24 @@ impl<F: Field> Polynomial<F> {
res
}

/// TODO
pub fn convolution(&self, syndromes: &Self) -> Self {
let mut ret = FieldVec::new();
let terms = (1 + syndromes.inner.len()).saturating_sub(1 + self.degree());
if terms == 0 {
ret.push(F::ZERO);
return Self::from(ret);
}

let n = 1 + self.degree();
for idx in 0..terms {
ret.push(
(0..n).map(|i| self.inner[n - i - 1].clone() * &syndromes.inner[idx + i]).sum(),
);
}
Self::from(ret)
}

/// Multiplies two polynomials modulo x^d, for some given `d`.
///
/// Can be used to simply multiply two polynomials, by passing `usize::MAX` or
Expand Down

0 comments on commit 383f788

Please sign in to comment.