From a3246d82e59d016067089249b9827e9d318c4194 Mon Sep 17 00:00:00 2001 From: Jack Grigg Date: Sat, 16 Mar 2019 04:43:08 +0000 Subject: [PATCH 1/3] Tests showing that scalar addition and subtraction don't reduce mod l --- src/scalar.rs | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/scalar.rs b/src/scalar.rs index fe794db1c..c87dfbf8f 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -1233,6 +1233,33 @@ mod test { 0,0,0,11,0,0,0,0,0,15,0,0,0,0,0,-9,0,0,0,0,0,0,0,-1,0,0,0,0,0,0,0,7, 0,0,0,0,0,-15,0,0,0,0,0,15,0,0,0,0,15,0,0,0,0,15,0,0,0,0,0,1,0,0,0,0]; + static LARGEST_ED25519_S: Scalar = Scalar { + bytes: [ + 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + ], + }; + + static CANONICAL_LARGEST_ED25519_S_PLUS_ONE: Scalar = Scalar { + bytes: [ + 0x7e, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97, + 0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + ], + }; + + static CANONICAL_LARGEST_ED25519_S_MINUS_ONE: Scalar = Scalar { + bytes: [ + 0x7c, 0x34, 0x47, 0x75, 0x47, 0x4a, 0x7f, 0x97, + 0x23, 0xb6, 0x3a, 0x8b, 0xe9, 0x2a, 0xe7, 0x6d, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + ], + }; + #[test] fn fuzzer_testcase_reduction() { // LE bytes of 24519928653854221733733552434404946937899825954937634815 @@ -1322,6 +1349,34 @@ mod test { } } + #[test] + fn add_reduces() { + // Check that the addition works + assert_eq!( + (LARGEST_ED25519_S + Scalar::one()).reduce(), + CANONICAL_LARGEST_ED25519_S_PLUS_ONE + ); + // Check that the addition reduces + assert_eq!( + LARGEST_ED25519_S + Scalar::one(), + CANONICAL_LARGEST_ED25519_S_PLUS_ONE + ); + } + + #[test] + fn sub_reduces() { + // Check that the subtraction works + assert_eq!( + (LARGEST_ED25519_S - Scalar::one()).reduce(), + CANONICAL_LARGEST_ED25519_S_MINUS_ONE + ); + // Check that the subtraction reduces + assert_eq!( + LARGEST_ED25519_S - Scalar::one(), + CANONICAL_LARGEST_ED25519_S_MINUS_ONE + ); + } + #[test] fn impl_add() { let two = Scalar::from(2u64); From 90baabe50b56f19b3684b298d725817f8801d4ec Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 6 Aug 2019 15:12:49 -0700 Subject: [PATCH 2/3] Ensure Scalar Add and Sub produce canonical results. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #238. This issue was discovered independently by both Jack "str4d" Grigg (issue #238), who noted that reduction was not performed on addition, and Laurent Grémy & Nicolas Surbayrole of Quarkslab, who noted that it was possible to cause an overflow and compute incorrect results. --- src/scalar.rs | 82 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 76 insertions(+), 6 deletions(-) diff --git a/src/scalar.rs b/src/scalar.rs index c87dfbf8f..b5253acf0 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -297,7 +297,7 @@ define_mul_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar); impl<'b> AddAssign<&'b Scalar> for Scalar { fn add_assign(&mut self, _rhs: &'b Scalar) { - *self = UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack(); + *self = *self + _rhs; } } @@ -305,8 +305,17 @@ define_add_assign_variants!(LHS = Scalar, RHS = Scalar); impl<'a, 'b> Add<&'b Scalar> for &'a Scalar { type Output = Scalar; + #[allow(non_snake_case)] fn add(self, _rhs: &'b Scalar) -> Scalar { - UnpackedScalar::add(&self.unpack(), &_rhs.unpack()).pack() + // The UnpackedScalar::add function produces reduced outputs + // if the inputs are reduced. However, these inputs may not + // be reduced -- they might come from Scalar::from_bits. So + // after computing the sum, we explicitly reduce it mod l + // before repacking. + let sum = UnpackedScalar::add(&self.unpack(), &_rhs.unpack()); + let sum_R = UnpackedScalar::mul_internal(&sum, &constants::R); + let sum_mod_l = UnpackedScalar::montgomery_reduce(&sum_R); + sum_mod_l.pack() } } @@ -314,7 +323,7 @@ define_add_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar); impl<'b> SubAssign<&'b Scalar> for Scalar { fn sub_assign(&mut self, _rhs: &'b Scalar) { - *self = UnpackedScalar::sub(&self.unpack(), &_rhs.unpack()).pack(); + *self = *self - _rhs; } } @@ -322,8 +331,18 @@ define_sub_assign_variants!(LHS = Scalar, RHS = Scalar); impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar { type Output = Scalar; - fn sub(self, _rhs: &'b Scalar) -> Scalar { - UnpackedScalar::sub(&self.unpack(), &_rhs.unpack()).pack() + #[allow(non_snake_case)] + fn sub(self, rhs: &'b Scalar) -> Scalar { + // The UnpackedScalar::sub function requires reduced inputs + // and produces reduced output. However, these inputs may not + // be reduced -- they might come from Scalar::from_bits. So + // we explicitly reduce the inputs. + let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R); + let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R); + let rhs_R = UnpackedScalar::mul_internal(&rhs.unpack(), &constants::R); + let rhs_mod_l = UnpackedScalar::montgomery_reduce(&rhs_R); + + UnpackedScalar::sub(&self_mod_l, &rhs_mod_l).pack() } } @@ -331,8 +350,11 @@ define_sub_variants!(LHS = Scalar, RHS = Scalar, Output = Scalar); impl<'a> Neg for &'a Scalar { type Output = Scalar; + #[allow(non_snake_case)] fn neg(self) -> Scalar { - &Scalar::zero() - self + let self_R = UnpackedScalar::mul_internal(&self.unpack(), &constants::R); + let self_mod_l = UnpackedScalar::montgomery_reduce(&self_R); + UnpackedScalar::sub(&UnpackedScalar::zero(), &self_mod_l).pack() } } @@ -1377,6 +1399,54 @@ mod test { ); } + #[test] + fn quarkslab_scalar_overflow_does_not_occur() { + // Check that manually-constructing large Scalars with + // from_bits cannot produce incorrect results. + // + // The from_bits function is required to implement X/Ed25519, + // while all other methods of constructing a Scalar produce + // reduced Scalars. However, this "invariant loophole" allows + // constructing large scalars which are not reduced mod l. + // + // This issue was discovered independently by both Jack + // "str4d" Grigg (issue #238), who noted that reduction was + // not performed on addition, and Laurent Grémy & Nicolas + // Surbayrole of Quarkslab, who noted that it was possible to + // cause an overflow and compute incorrect results. + // + // This test is adapted from the one suggested by Quarkslab. + + let large_bytes = [ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + ]; + + let a = Scalar::from_bytes_mod_order(large_bytes); + let b = Scalar::from_bits(large_bytes); + + assert_eq!(a, b.reduce()); + + let a_3 = a + a + a; + let b_3 = b + b + b; + + assert_eq!(a_3, b_3); + + let neg_a = -a; + let neg_b = -b; + + assert_eq!(neg_a, neg_b); + + let minus_a_3 = Scalar::zero() - a - a - a; + let minus_b_3 = Scalar::zero() - b - b - b; + + assert_eq!(minus_a_3, minus_b_3); + assert_eq!(minus_a_3, -a_3); + assert_eq!(minus_b_3, -b_3); + } + #[test] fn impl_add() { let two = Scalar::from(2u64); From a4808449923c060d2672f97b08bd56a796b743d0 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 6 Aug 2019 15:17:34 -0700 Subject: [PATCH 3/3] Tighten a too-permissive debug_assert in NafLookupTable8. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This issue was found by Laurent Grémy & Nicolas Surbayrole of Quarkslab. --- src/window.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/window.rs b/src/window.rs index 08f2d7843..603760e80 100644 --- a/src/window.rs +++ b/src/window.rs @@ -171,7 +171,7 @@ pub(crate) struct NafLookupTable8(pub(crate) [T; 64]); impl NafLookupTable8 { pub fn select(&self, x: usize) -> T { debug_assert_eq!(x & 1, 1); - debug_assert!(x < 256); + debug_assert!(x < 128); self.0[x / 2] }