diff --git a/mldsa/debug.c b/mldsa/debug.c index 17e6c56e..200d33fc 100644 --- a/mldsa/debug.c +++ b/mldsa/debug.c @@ -10,6 +10,7 @@ #if defined(MLDSA_DEBUG) +#include #include #include #include "debug.h" @@ -27,8 +28,8 @@ void mld_debug_check_assert(const char *file, int line, const int val) } void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, - unsigned len, int lower_bound_exclusive, - int upper_bound_exclusive) + unsigned len, int64_t lower_bound_exclusive, + int64_t upper_bound_exclusive) { int err = 0; unsigned i; @@ -37,12 +38,12 @@ void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, int32_t val = ptr[i]; if (!(val > lower_bound_exclusive && val < upper_bound_exclusive)) { - fprintf( - stderr, - MLD_DEBUG_ERROR_HEADER - "Bounds assertion failed: Index %u, value %d out of bounds (%d,%d)\n", - file, line, i, (int)val, lower_bound_exclusive, - upper_bound_exclusive); + fprintf(stderr, + MLD_DEBUG_ERROR_HEADER + "Bounds assertion failed: Index %u, value %d out of bounds " + "(%" PRId64 ",%" PRId64 ")\n", + file, line, i, (int)val, lower_bound_exclusive, + upper_bound_exclusive); err = 1; } } diff --git a/mldsa/debug.h b/mldsa/debug.h index 98ccd93c..af187bb9 100644 --- a/mldsa/debug.h +++ b/mldsa/debug.h @@ -43,8 +43,8 @@ void mld_debug_check_assert(const char *file, int line, const int val); **************************************************/ #define mld_debug_check_bounds MLD_NAMESPACE(mldsa_debug_check_bounds) void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, - unsigned len, int lower_bound_exclusive, - int upper_bound_exclusive); + unsigned len, int64_t lower_bound_exclusive, + int64_t upper_bound_exclusive); /* Check assertion, calling exit() upon failure * @@ -60,14 +60,15 @@ void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, * value_ub: Exclusive upper value bound */ #define mld_assert_bound(ptr, len, value_lb, value_ub) \ mld_debug_check_bounds(__FILE__, __LINE__, (const int32_t *)(ptr), (len), \ - (value_lb) - 1, (value_ub)) + ((int64_t)(value_lb)) - 1, (value_ub)) /* Check absolute bounds in array of int32_t's * ptr: Base of array, expression of type int32_t* * len: Number of int32_t in array * value_abs_bd: Exclusive absolute upper bound */ -#define mld_assert_abs_bound(ptr, len, value_abs_bd) \ - mld_assert_bound((ptr), (len), (-(value_abs_bd) + 1), (value_abs_bd)) +#define mld_assert_abs_bound(ptr, len, value_abs_bd) \ + mld_assert_bound((ptr), (len), (-((int64_t)(value_abs_bd)) + 1), \ + (value_abs_bd)) /* Version of bounds assertions for 2-dimensional arrays */ #define mld_assert_bound_2d(ptr, len0, len1, value_lb, value_ub) \ diff --git a/mldsa/ntt.h b/mldsa/ntt.h index 3dc88385..bf176cf9 100644 --- a/mldsa/ntt.h +++ b/mldsa/ntt.h @@ -12,7 +12,7 @@ /* Absolute exclusive upper bound for the output of the forward NTT */ #define MLD_NTT_BOUND (9 * MLDSA_Q) /* Absolute exclusive upper bound for the output of the inverse NTT*/ -#define MLD_INTT_BOUND 4211139 +#define MLD_INTT_BOUND (MLDSA_Q * 3 / 4) #define mld_ntt MLD_NAMESPACE(ntt) /************************************************* diff --git a/mldsa/poly.c b/mldsa/poly.c index 923e008c..6d32ddfc 100644 --- a/mldsa/poly.c +++ b/mldsa/poly.c @@ -18,9 +18,7 @@ void mld_poly_reduce(mld_poly *a) { unsigned int i; - /* TODO: Introduce the following after using inclusive lower bounds in - * the underlying debug function mld_debug_check_bounds(). */ - /* mld_assert_bound(a->coeffs, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); */ + mld_assert_bound(a->coeffs, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_N; ++i) __loop__( @@ -74,6 +72,8 @@ void mld_poly_add(mld_poly *r, const mld_poly *b) void mld_poly_sub(mld_poly *r, const mld_poly *b) { unsigned int i; + mld_assert_abs_bound(b->coeffs, MLDSA_N, MLDSA_Q); + mld_assert_abs_bound(r->coeffs, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_N; ++i) __loop__( @@ -84,6 +84,8 @@ void mld_poly_sub(mld_poly *r, const mld_poly *b) { r->coeffs[i] = r->coeffs[i] - b->coeffs[i]; } + + mld_assert_bound(r->coeffs, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); } void mld_poly_shiftl(mld_poly *a) @@ -102,6 +104,7 @@ void mld_poly_shiftl(mld_poly *a) */ a->coeffs[i] *= (1 << MLDSA_D); } + mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q); } #if !defined(MLD_USE_NATIVE_NTT) @@ -132,7 +135,7 @@ void mld_poly_invntt_tomont(mld_poly *a) { mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q); mld_intt_native(a->coeffs); - mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q); + mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_INTT_BOUND); } #endif /* MLD_USE_NATIVE_INTT */ @@ -141,6 +144,9 @@ void mld_poly_pointwise_montgomery(mld_poly *c, const mld_poly *a, { unsigned int i; + mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND); + mld_assert_abs_bound(b->coeffs, MLDSA_N, MLD_NTT_BOUND); + for (i = 0; i < MLDSA_N; ++i) __loop__( invariant(i <= MLDSA_N) @@ -149,6 +155,8 @@ void mld_poly_pointwise_montgomery(mld_poly *c, const mld_poly *a, { c->coeffs[i] = mld_montgomery_reduce((int64_t)a->coeffs[i] * b->coeffs[i]); } + + mld_assert_abs_bound(c->coeffs, MLDSA_N, MLDSA_Q); } void mld_poly_power2round(mld_poly *a1, mld_poly *a0, const mld_poly *a) @@ -210,6 +218,7 @@ unsigned int mld_poly_make_hint(mld_poly *h, const mld_poly *a0, } mld_assert(s <= MLDSA_N); + mld_assert_bound(h->coeffs, MLDSA_N, 0, 2); return s; } @@ -306,6 +315,7 @@ __contract__( { unsigned int ctr, pos; uint32_t t; + mld_assert_bound(a, offset, 0, MLDSA_Q); /* TODO: CBMC proof based on mld_rej_uniform_native */ #if defined(MLD_USE_NATIVE_REJ_UNIFORM) @@ -341,6 +351,8 @@ __contract__( } } + mld_assert_bound(a, ctr, 0, MLDSA_Q); + return ctr; } @@ -376,6 +388,7 @@ void mld_poly_uniform(mld_poly *a, const uint8_t seed[MLDSA_SEEDBYTES + 2]) ctr = mld_rej_uniform(a->coeffs, MLDSA_N, ctr, buf, buflen); } mld_xof128_release(&state); + mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q); /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); @@ -435,6 +448,11 @@ void mld_poly_uniform_4x(mld_poly *vec0, mld_poly *vec1, mld_poly *vec2, } mld_xof128_x4_release(&state); + mld_assert_bound(vec0->coeffs, MLDSA_N, 0, MLDSA_Q); + mld_assert_bound(vec1->coeffs, MLDSA_N, 0, MLDSA_Q); + mld_assert_bound(vec2->coeffs, MLDSA_N, 0, MLDSA_Q); + mld_assert_bound(vec3->coeffs, MLDSA_N, 0, MLDSA_Q); + /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); } @@ -496,6 +514,7 @@ __contract__( unsigned int ctr, pos; int t_valid; uint32_t t0, t1; + mld_assert_abs_bound(a, offset, MLDSA_ETA + 1); /* TODO: CBMC proof based on mld_rej_uniform_eta2_native */ #if MLDSA_ETA == 2 && defined(MLD_USE_NATIVE_REJ_UNIFORM_ETA2) @@ -573,6 +592,8 @@ __contract__( #endif /* MLDSA_ETA != 2 && MLDSA_ETA != 4 */ } + mld_assert_abs_bound(a, ctr, MLDSA_ETA + 1); + return ctr; } @@ -649,6 +670,11 @@ void mld_poly_uniform_eta_4x(mld_poly *r0, mld_poly *r1, mld_poly *r2, mld_xof256_x4_release(&state); + mld_assert_abs_bound(r0->coeffs, MLDSA_N, MLDSA_ETA + 1); + mld_assert_abs_bound(r1->coeffs, MLDSA_N, MLDSA_ETA + 1); + mld_assert_abs_bound(r2->coeffs, MLDSA_N, MLDSA_ETA + 1); + mld_assert_abs_bound(r3->coeffs, MLDSA_N, MLDSA_ETA + 1); + /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); mld_zeroize(extseed, sizeof(extseed)); @@ -676,6 +702,8 @@ void mld_poly_uniform_gamma1(mld_poly *a, const uint8_t seed[MLDSA_CRHBYTES], mld_xof256_release(&state); + mld_assert_bound(a->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); + /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); mld_zeroize(extseed, sizeof(extseed)); @@ -719,6 +747,11 @@ void mld_poly_uniform_gamma1_4x(mld_poly *r0, mld_poly *r1, mld_poly *r2, mld_polyz_unpack(r3, buf[3]); mld_xof256_x4_release(&state); + mld_assert_bound(r0->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); + mld_assert_bound(r1->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); + mld_assert_bound(r2->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); + mld_assert_bound(r3->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); + /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); mld_zeroize(extseed, sizeof(extseed)); @@ -796,11 +829,11 @@ void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) signs >>= 1; } + mld_assert_bound(c->coeffs, MLDSA_N, -1, 2); + /* FIPS 204. Section 3.6.3 Destruction of intermediate values. */ mld_zeroize(buf, sizeof(buf)); mld_zeroize(&signs, sizeof(signs)); - - mld_assert_bound(c->coeffs, MLDSA_N, -1, 2); } void mld_polyeta_pack(uint8_t *r, const mld_poly *a) diff --git a/mldsa/polyvec.c b/mldsa/polyvec.c index d4a22c8f..433cd828 100644 --- a/mldsa/polyvec.c +++ b/mldsa/polyvec.c @@ -6,6 +6,7 @@ #include #include "common.h" +#include "debug.h" #include "poly.h" #include "polyvec.h" @@ -120,6 +121,7 @@ void mld_polyvec_matrix_pointwise_montgomery(mld_polyveck *t, const mld_polyvecl *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -131,6 +133,8 @@ void mld_polyvec_matrix_pointwise_montgomery(mld_polyveck *t, { mld_polyvecl_pointwise_acc_montgomery(&t->vec[i], &mat[i], v); } + + mld_assert_abs_bound_2d(t->vec, MLDSA_K, MLDSA_N, MLDSA_Q); } /**************************************************************/ @@ -155,11 +159,15 @@ void mld_polyvecl_uniform_gamma1(mld_polyvecl *v, mld_poly_uniform_gamma1_4x(&v->vec[3], &v->vec[4], &v->vec[5], &v->vec[6], seed, nonce + 3, nonce + 4, nonce + 5, nonce + 6); #endif /* MLDSA_L == 7 */ + + mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, -(MLDSA_GAMMA1 - 1), + MLDSA_GAMMA1 + 1); } void mld_polyvecl_reduce(mld_polyvecl *v) { unsigned int i; + mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_L; ++i) __loop__( @@ -171,6 +179,9 @@ void mld_polyvecl_reduce(mld_polyvecl *v) { mld_poly_reduce(&v->vec[i]); } + + mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, -REDUCE32_RANGE_MAX, + REDUCE32_RANGE_MAX); } /* Reference: We use destructive version (output=first input) to avoid @@ -191,11 +202,13 @@ void mld_polyvecl_add(mld_polyvecl *u, const mld_polyvecl *v) { mld_poly_add(&u->vec[i], &v->vec[i]); } + mld_assert_bound_2d(u->vec, MLDSA_L, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); } void mld_polyvecl_ntt(mld_polyvecl *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_L; ++i) __loop__( @@ -206,11 +219,14 @@ void mld_polyvecl_ntt(mld_polyvecl *v) { mld_poly_ntt(&v->vec[i]); } + + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); } void mld_polyvecl_invntt_tomont(mld_polyvecl *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_L; ++i) __loop__( @@ -221,12 +237,16 @@ void mld_polyvecl_invntt_tomont(mld_polyvecl *v) { mld_poly_invntt_tomont(&v->vec[i]); } + + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_INTT_BOUND); } void mld_polyvecl_pointwise_poly_montgomery(mld_polyvecl *r, const mld_poly *a, const mld_polyvecl *v) { unsigned int i; + mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND); + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_L; ++i) __loop__( @@ -237,12 +257,16 @@ void mld_polyvecl_pointwise_poly_montgomery(mld_polyvecl *r, const mld_poly *a, { mld_poly_pointwise_montgomery(&r->vec[i], a, &v->vec[i]); } + + mld_assert_abs_bound_2d(r->vec, MLDSA_L, MLDSA_N, MLDSA_Q); } void mld_polyvecl_pointwise_acc_montgomery(mld_poly *w, const mld_polyvecl *u, const mld_polyvecl *v) { unsigned int i, j; + mld_assert_bound_2d(u->vec, MLDSA_L, MLDSA_N, 0, MLDSA_Q); + mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); /* The first input is bounded by [0, Q-1] inclusive * The second input is bounded by [-9Q+1, 9Q-1] inclusive . Hence, we can * safely accumulate in 64-bits without intermediate reductions as @@ -289,6 +313,8 @@ void mld_polyvecl_pointwise_acc_montgomery(mld_poly *w, const mld_polyvecl *u, cassert(r < MLDSA_Q); w->coeffs[i] = r; } + + mld_assert_abs_bound(w->coeffs, MLDSA_N, MLDSA_Q); } @@ -296,6 +322,8 @@ uint32_t mld_polyvecl_chknorm(const mld_polyvecl *v, int32_t bound) { unsigned int i; uint32_t t = 0; + mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, -REDUCE32_RANGE_MAX, + REDUCE32_RANGE_MAX); for (i = 0; i < MLDSA_L; ++i) __loop__( @@ -310,7 +338,6 @@ uint32_t mld_polyvecl_chknorm(const mld_polyvecl *v, int32_t bound) */ t |= mld_poly_chknorm(&v->vec[i], bound); } - return t; } @@ -321,6 +348,7 @@ uint32_t mld_polyvecl_chknorm(const mld_polyvecl *v, int32_t bound) void mld_polyveck_reduce(mld_polyveck *v) { unsigned int i; + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -333,11 +361,15 @@ void mld_polyveck_reduce(mld_polyveck *v) { mld_poly_reduce(&v->vec[i]); } + + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, -REDUCE32_RANGE_MAX, + REDUCE32_RANGE_MAX); } void mld_polyveck_caddq(mld_polyveck *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -348,6 +380,8 @@ void mld_polyveck_caddq(mld_polyveck *v) { mld_poly_caddq(&v->vec[i]); } + + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); } /* Reference: We use destructive version (output=first input) to avoid @@ -363,15 +397,19 @@ void mld_polyveck_add(mld_polyveck *u, const mld_polyveck *v) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, u->vec[k0].coeffs[k1] == loop_entry(*u).vec[k0].coeffs[k1]))) invariant(forall(k4, 0, i, forall(k5, 0, MLDSA_N, u->vec[k4].coeffs[k5] == loop_entry(*u).vec[k4].coeffs[k5] + v->vec[k4].coeffs[k5]))) + invariant(forall(k6, 0, i, array_bound(u->vec[k6].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) ) { mld_poly_add(&u->vec[i], &v->vec[i]); } + mld_assert_bound_2d(u->vec, MLDSA_L, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); } void mld_polyveck_sub(mld_polyveck *u, const mld_polyveck *v) { unsigned int i; + mld_assert_abs_bound_2d(u->vec, MLDSA_K, MLDSA_N, MLDSA_Q); + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -384,11 +422,14 @@ void mld_polyveck_sub(mld_polyveck *u, const mld_polyveck *v) { mld_poly_sub(&u->vec[i], &v->vec[i]); } + + mld_assert_bound_2d(u->vec, MLDSA_K, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); } void mld_polyveck_shiftl(mld_polyveck *v) { unsigned int i; + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, 1 << 10); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -401,11 +442,14 @@ void mld_polyveck_shiftl(mld_polyveck *v) { mld_poly_shiftl(&v->vec[i]); } + + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); } void mld_polyveck_ntt(mld_polyveck *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -416,11 +460,13 @@ void mld_polyveck_ntt(mld_polyveck *v) { mld_poly_ntt(&v->vec[i]); } + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLD_NTT_BOUND); } void mld_polyveck_invntt_tomont(mld_polyveck *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -431,12 +477,15 @@ void mld_polyveck_invntt_tomont(mld_polyveck *v) { mld_poly_invntt_tomont(&v->vec[i]); } + + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLD_INTT_BOUND); } void mld_polyveck_pointwise_poly_montgomery(mld_polyveck *r, const mld_poly *a, const mld_polyveck *v) { unsigned int i; + mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -447,6 +496,7 @@ void mld_polyveck_pointwise_poly_montgomery(mld_polyveck *r, const mld_poly *a, { mld_poly_pointwise_montgomery(&r->vec[i], a, &v->vec[i]); } + mld_assert_abs_bound_2d(r->vec, MLDSA_K, MLDSA_N, MLDSA_Q); } @@ -454,6 +504,8 @@ uint32_t mld_polyveck_chknorm(const mld_polyveck *v, int32_t bound) { unsigned int i; uint32_t t = 0; + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, -REDUCE32_RANGE_MAX, + REDUCE32_RANGE_MAX); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -476,6 +528,7 @@ void mld_polyveck_power2round(mld_polyveck *v1, mld_polyveck *v0, const mld_polyveck *v) { unsigned int i; + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -487,12 +540,18 @@ void mld_polyveck_power2round(mld_polyveck *v1, mld_polyveck *v0, { mld_poly_power2round(&v1->vec[i], &v0->vec[i], &v->vec[i]); } + + mld_assert_bound_2d(v0->vec, MLDSA_K, MLDSA_N, -(MLD_2_POW_D / 2) + 1, + (MLD_2_POW_D / 2) + 1); + mld_assert_bound_2d(v1->vec, MLDSA_K, MLDSA_N, 0, + ((MLDSA_Q - 1) / MLD_2_POW_D) + 1); } void mld_polyveck_decompose(mld_polyveck *v1, mld_polyveck *v0, const mld_polyveck *v) { unsigned int i; + mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -506,6 +565,10 @@ void mld_polyveck_decompose(mld_polyveck *v1, mld_polyveck *v0, { mld_poly_decompose(&v1->vec[i], &v0->vec[i], &v->vec[i]); } + + mld_assert_bound_2d(v1->vec, MLDSA_K, MLDSA_N, 0, + (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)); + mld_assert_abs_bound_2d(v0->vec, MLDSA_K, MLDSA_N, MLDSA_GAMMA2 + 1); } unsigned int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, @@ -524,6 +587,7 @@ unsigned int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, s += mld_poly_make_hint(&h->vec[i], &v0->vec[i], &v1->vec[i]); } + mld_assert_bound_2d(h->vec, MLDSA_K, MLDSA_N, 0, 2); return s; } @@ -531,6 +595,8 @@ void mld_polyveck_use_hint(mld_polyveck *w, const mld_polyveck *u, const mld_polyveck *h) { unsigned int i; + mld_assert_bound_2d(u->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); + mld_assert_bound_2d(h->vec, MLDSA_K, MLDSA_N, 0, 2); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -543,12 +609,17 @@ void mld_polyveck_use_hint(mld_polyveck *w, const mld_polyveck *u, { mld_poly_use_hint(&w->vec[i], &u->vec[i], &h->vec[i]); } + + mld_assert_bound_2d(w->vec, MLDSA_K, MLDSA_N, 0, + (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)); } void mld_polyveck_pack_w1(uint8_t r[MLDSA_K * MLDSA_POLYW1_PACKEDBYTES], const mld_polyveck *w1) { unsigned int i; + mld_assert_bound_2d(w1->vec, MLDSA_K, MLDSA_N, 0, + (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)); for (i = 0; i < MLDSA_K; ++i) __loop__( @@ -564,6 +635,7 @@ void mld_polyveck_pack_eta(uint8_t r[MLDSA_K * MLDSA_POLYETA_PACKEDBYTES], const mld_polyveck *p) { unsigned int i; + mld_assert_abs_bound_2d(p->vec, MLDSA_K, MLDSA_N, MLDSA_ETA + 1); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, object_whole(r)) @@ -578,6 +650,7 @@ void mld_polyvecl_pack_eta(uint8_t r[MLDSA_L * MLDSA_POLYETA_PACKEDBYTES], const mld_polyvecl *p) { unsigned int i; + mld_assert_abs_bound_2d(p->vec, MLDSA_L, MLDSA_N, MLDSA_ETA + 1); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, object_whole(r)) @@ -592,6 +665,8 @@ void mld_polyvecl_pack_z(uint8_t r[MLDSA_L * MLDSA_POLYZ_PACKEDBYTES], const mld_polyvecl *p) { unsigned int i; + mld_assert_bound_2d(p->vec, MLDSA_L, MLDSA_N, -(MLDSA_GAMMA1 - 1), + MLDSA_GAMMA1 + 1); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, object_whole(r)) @@ -607,6 +682,8 @@ void mld_polyveck_pack_t0(uint8_t r[MLDSA_K * MLDSA_POLYT0_PACKEDBYTES], const mld_polyveck *p) { unsigned int i; + mld_assert_bound_2d(p->vec, MLDSA_K, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1, + (1 << (MLDSA_D - 1)) + 1); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, object_whole(r)) @@ -625,6 +702,9 @@ void mld_polyvecl_unpack_eta( { mld_polyeta_unpack(&p->vec[i], r + i * MLDSA_POLYETA_PACKEDBYTES); } + + mld_assert_bound_2d(p->vec, MLDSA_L, MLDSA_N, MLD_POLYETA_UNPACK_LOWER_BOUND, + MLDSA_ETA + 1); } void mld_polyvecl_unpack_z(mld_polyvecl *z, @@ -635,6 +715,9 @@ void mld_polyvecl_unpack_z(mld_polyvecl *z, { mld_polyz_unpack(&z->vec[i], r + i * MLDSA_POLYZ_PACKEDBYTES); } + + mld_assert_bound_2d(z->vec, MLDSA_L, MLDSA_N, -(MLDSA_GAMMA1 - 1), + MLDSA_GAMMA1 + 1); } void mld_polyveck_unpack_eta( @@ -645,6 +728,9 @@ void mld_polyveck_unpack_eta( { mld_polyeta_unpack(&p->vec[i], r + i * MLDSA_POLYETA_PACKEDBYTES); } + + mld_assert_bound_2d(p->vec, MLDSA_K, MLDSA_N, MLD_POLYETA_UNPACK_LOWER_BOUND, + MLDSA_ETA + 1); } void mld_polyveck_unpack_t0(mld_polyveck *p, @@ -655,4 +741,7 @@ void mld_polyveck_unpack_t0(mld_polyveck *p, { mld_polyt0_unpack(&p->vec[i], r + i * MLDSA_POLYT0_PACKEDBYTES); } + + mld_assert_bound_2d(p->vec, MLDSA_K, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1, + (1 << (MLDSA_D - 1)) + 1); } diff --git a/mldsa/polyvec.h b/mldsa/polyvec.h index 06068eb2..12211ec0 100644 --- a/mldsa/polyvec.h +++ b/mldsa/polyvec.h @@ -271,6 +271,8 @@ __contract__( requires(forall(k2, 0, MLDSA_K, forall(k3, 0, MLDSA_N, (int64_t) u->vec[k2].coeffs[k3] + v->vec[k2].coeffs[k3] >= INT32_MIN))) assigns(object_whole(u)) ensures(forall(k4, 0, MLDSA_K, forall(k5, 0, MLDSA_N, u->vec[k4].coeffs[k5] == old(*u).vec[k4].coeffs[k5] + v->vec[k4].coeffs[k5]))) + ensures(forall(k6, 0, MLDSA_L, + array_bound(u->vec[k6].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) ); #define mld_polyveck_sub MLD_NAMESPACE(polyveck_sub) diff --git a/proofs/cbmc/polyveck_decompose/Makefile b/proofs/cbmc/polyveck_decompose/Makefile index 2014a65c..123f273d 100644 --- a/proofs/cbmc/polyveck_decompose/Makefile +++ b/proofs/cbmc/polyveck_decompose/Makefile @@ -38,7 +38,7 @@ FUNCTION_NAME = polyveck_decompose # EXPENSIVE = true # This function is large enough to need... -CBMC_OBJECT_BITS = 8 +CBMC_OBJECT_BITS = 10 # If you require access to a file-local ("static") function or object to conduct # your proof, set the following (and do not include the original source file diff --git a/proofs/cbmc/polyveck_power2round/Makefile b/proofs/cbmc/polyveck_power2round/Makefile index 56f78ada..d36267f5 100644 --- a/proofs/cbmc/polyveck_power2round/Makefile +++ b/proofs/cbmc/polyveck_power2round/Makefile @@ -37,7 +37,7 @@ FUNCTION_NAME = polyveck_power2round # EXPENSIVE = true # This function is large enough to need... -CBMC_OBJECT_BITS = 9 +CBMC_OBJECT_BITS = 10 # If you require access to a file-local ("static") function or object to conduct # your proof, set the following (and do not include the original source file