45template <
typename Builder>
54 lookup[ColumnIdx::C2][0],
55 lookup[ColumnIdx::C2][1],
56 lookup[ColumnIdx::C2][2],
57 lookup[ColumnIdx::C2][3],
60 lookup[ColumnIdx::C3][0],
61 lookup[ColumnIdx::C3][1],
62 lookup[ColumnIdx::C3][2],
63 lookup[ColumnIdx::C3][3],
82template <
typename Builder>
87 for (
auto& col : lookup_data.columns) {
88 for (
auto& elem : col) {
106template <
typename Builder>
111 Builder* ctx = w_in[0].get_context();
116 for (
size_t i = 0; i < 16; ++i) {
119 if ((ctx ==
nullptr) && w_in[i].get_context()) {
120 ctx = w_in[i].get_context();
125 for (
size_t i = 16; i < 64; ++i) {
126 auto& w_left = w_sparse[i - 15];
127 auto& w_right = w_sparse[i - 2];
129 if (!w_left.has_sparse_limbs) {
130 w_left = convert_witness(w_left.normal);
132 if (!w_right.has_sparse_limbs) {
133 w_right = convert_witness(w_right.normal);
139 w_left.sparse_limbs[0] * left_multipliers[0],
140 w_left.sparse_limbs[1] * left_multipliers[1],
141 w_left.sparse_limbs[2] * left_multipliers[2],
142 w_left.sparse_limbs[3] * left_multipliers[3],
148 w_right.sparse_limbs[0] * right_multipliers[0],
149 w_right.sparse_limbs[1] * right_multipliers[1],
150 w_right.sparse_limbs[2] * right_multipliers[2],
151 w_right.sparse_limbs[3] * right_multipliers[3],
159 left[0].
add_two(left[1], left[2]).
add_two(left[3], w_left.rotated_limb_corrections[1]) *
fr(4);
162 const field_pt xor_result_sparse = right[0]
164 .
add_two(right[3], w_right.rotated_limb_corrections[2])
165 .
add_two(w_right.rotated_limb_corrections[3], left_xor_sparse);
171 field_pt w_out_raw = xor_result.
add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
184 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
185 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
186 field_pt divisor = w_out_raw_inv_pow_two - w_out_inv_pow_two;
195 for (
size_t i = 0; i < 64; ++i) {
196 w_extended[i] = w_sparse[i].normal;
211template <
typename Builder>
231template <
typename Builder>
258template <
typename Builder>
263 constexpr fr SPARSE_MULT =
fr(7);
268 field_pt rotation_result = lookup[ColumnIdx::C3][0];
269 e.
sparse = lookup[ColumnIdx::C2][0];
270 field_pt sparse_L2 = lookup[ColumnIdx::C2][2];
273 field_pt xor_result = (rotation_result * SPARSE_MULT)
274 .add_two(e.
sparse * (rotation_coefficients[0] * SPARSE_MULT +
fr(1)),
275 sparse_L2 * (rotation_coefficients[2] * SPARSE_MULT));
283 return choose_result;
303template <
typename Builder>
308 constexpr fr SPARSE_MULT =
fr(4);
314 field_pt rotation_result = lookup[ColumnIdx::C3][0];
315 a.sparse = lookup[ColumnIdx::C2][0];
316 field_pt sparse_L1_acc = lookup[ColumnIdx::C2][1];
319 field_pt xor_result = (rotation_result * SPARSE_MULT)
320 .add_two(
a.sparse * (rotation_coefficients[0] * SPARSE_MULT +
fr(1)),
321 sparse_L1_acc * (rotation_coefficients[1] * SPARSE_MULT));
329 return majority_result;
342template <
typename Builder>
345 size_t overflow_bits)
350 Builder* ctx =
a.get_context() ?
a.get_context() :
b.get_context();
353 uint256_t normalized_sum =
static_cast<uint32_t
>(
sum.data[0]);
355 if (
a.is_constant() &&
b.is_constant()) {
356 return field_pt(ctx, normalized_sum);
359 fr overflow_value =
fr((
sum - normalized_sum) >> 32);
382template <
typename Builder>
394 auto b = map_into_maj_sparse_form(h_init[1]);
395 auto c = map_into_maj_sparse_form(h_init[2]);
398 auto f = map_into_choose_sparse_form(h_init[5]);
399 auto g = map_into_choose_sparse_form(h_init[6]);
416 for (
size_t i = 0; i < 64; ++i) {
417 auto ch = choose_with_sigma1(e, f,
g);
418 auto maj = majority_with_sigma0(
a,
b, c);
421 auto T1 = ch.add_two(h.
normal, w[i] +
fr(round_constants[i]));
430 a.normal = add_normalize_unsafe(T1, maj, 3);
436 apply_32_bit_range_constraint_via_lookup(
a.normal);
437 apply_32_bit_range_constraint_via_lookup(e.
normal);
442 output[0] = add_normalize_unsafe(
a.normal, h_init[0], 1);
443 output[1] = add_normalize_unsafe(
b.normal, h_init[1], 1);
444 output[2] = add_normalize_unsafe(c.normal, h_init[2], 1);
445 output[3] = add_normalize_unsafe(d.
normal, h_init[3], 1);
446 output[4] = add_normalize_unsafe(e.
normal, h_init[4], 1);
447 output[5] = add_normalize_unsafe(f.normal, h_init[5], 1);
448 output[6] = add_normalize_unsafe(
g.normal, h_init[6], 1);
449 output[7] = add_normalize_unsafe(h.
normal, h_init[7], 1);
453 for (
const auto& val : output) {
454 val.create_range_constraint(32);
static sparse_value map_into_maj_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Majority function.
static field_ct add_normalize_unsafe(const field_ct &a, const field_ct &b, size_t overflow_bits)
Compute (a + b) mod 2^32 with circuit constraints.
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
Extend the 16-word message block to 64 words per SHA-256 specification.
static field_ct choose_with_sigma1(sparse_value &e, const sparse_value &f, const sparse_value &g)
Compute Σ₁(e) + Ch(e,f,g) for SHA-256 compression rounds.
static sparse_witness_limbs convert_witness(const field_ct &input)
Convert a 32-bit value to sparse limbs form for message schedule extension.
static field_ct majority_with_sigma0(sparse_value &a, const sparse_value &b, const sparse_value &c)
Compute Σ₀(a) + Maj(a,b,c) for SHA-256 compression rounds.
static sparse_value map_into_choose_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Choose function.
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Apply the SHA-256 compression function to a single 512-bit message block.
static void apply_32_bit_range_constraint_via_lookup(const field_ct &input)
Apply an implicit 32-bit range constraint by performing a lookup on the input.
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
stdlib::witness_t< bb::UltraCircuitBuilder > witness_pt
stdlib::field_t< UltraCircuitBuilder > field_pt
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
Returns multipliers for computing Σ₁(e) rotations in choose_with_sigma1.
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
Returns multipliers for computing Σ₀(a) rotations in majority_with_sigma0.
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y)
void mark_witness_as_used(const field_t< Builder > &field)
Mark a field_t witness as used (for UltraBuilder only).
Entry point for Barretenberg command-line interface.
field< Bn254FrParams > fr
Inner sum(Cont< Inner, Args... > const &in)
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Plookup tables for SHA-256 using sparse form representation.
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::array< field_ct, 4 > rotated_limb_corrections
std::array< field_ct, 4 > sparse_limbs