Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
sha256.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: Complete, auditors: [Luke], commit: }
3// external_1: { status: not started, auditors: [], commit: }
4// external_2: { status: not started, auditors: [], commit: }
5// =====================
6
24#include "sha256.hpp"
25
31
32using namespace bb;
33
34namespace bb::stdlib {
35using namespace bb::plookup;
36
45template <typename Builder>
47{
49
50 sparse_witness_limbs result(input);
51 const auto lookup = plookup_read<Builder>::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, input);
52
54 lookup[ColumnIdx::C2][0],
55 lookup[ColumnIdx::C2][1],
56 lookup[ColumnIdx::C2][2],
57 lookup[ColumnIdx::C2][3],
58 };
60 lookup[ColumnIdx::C3][0],
61 lookup[ColumnIdx::C3][1],
62 lookup[ColumnIdx::C3][2],
63 lookup[ColumnIdx::C3][3],
64 };
65 result.has_sparse_limbs = true;
66
67 return result;
68}
69
82template <typename Builder>
84{
85 auto lookup_data = plookup_read<Builder>::get_lookup_accumulators(MultiTableId::SHA256_MAJ_INPUT, input);
86 // Mark all accumulator outputs as intentionally unused (they exist only for the range constraint side-effect)
87 for (auto& col : lookup_data.columns) {
88 for (auto& elem : col) {
90 }
91 }
92}
93
106template <typename Builder>
108{
110
111 Builder* ctx = w_in[0].get_context();
112
114
115 // Populate initial 16 words from input (sparse form computed lazily as needed)
116 for (size_t i = 0; i < 16; ++i) {
117 w_sparse[i] = SHA256<Builder>::sparse_witness_limbs(w_in[i]);
118 // Extract builder context from inputs
119 if ((ctx == nullptr) && w_in[i].get_context()) {
120 ctx = w_in[i].get_context();
121 }
122 }
123
124 // Compute extended words W[16..63]
125 for (size_t i = 16; i < 64; ++i) {
126 auto& w_left = w_sparse[i - 15];
127 auto& w_right = w_sparse[i - 2];
128
129 if (!w_left.has_sparse_limbs) {
130 w_left = convert_witness(w_left.normal);
131 }
132 if (!w_right.has_sparse_limbs) {
133 w_right = convert_witness(w_right.normal);
134 }
135
136 // Compute the (partially) rotated sparse limbs for σ₀
137 // Note: remaining contributions accounted for via w_left.rotated_limb_corrections
139 w_left.sparse_limbs[0] * left_multipliers[0],
140 w_left.sparse_limbs[1] * left_multipliers[1],
141 w_left.sparse_limbs[2] * left_multipliers[2],
142 w_left.sparse_limbs[3] * left_multipliers[3],
143 };
144
145 // Compute the (partially) rotated sparse limbs for σ₁
146 // Note: remaining contributions accounted for via w_right.rotated_limb_corrections
148 w_right.sparse_limbs[0] * right_multipliers[0],
149 w_right.sparse_limbs[1] * right_multipliers[1],
150 w_right.sparse_limbs[2] * right_multipliers[2],
151 w_right.sparse_limbs[3] * right_multipliers[3],
152 };
153
154 // Compute σ₀(w[i-15]) in sparse form where σ₀(x) = (x >>> 7) ⊕ (x >>> 18) ⊕ (x >> 3).
155 // Each sparse digit holds the sum of contributions from the three rotation/shift operations (digit value in
156 // {0,1,2,3}). The fr(4) scaling positions σ₀'s contribution in the upper 2 bits of each 4-bit digit slot: when
157 // combined with σ₁ (unscaled, in lower 2 bits), each digit becomes 4*σ₀_digit + σ₁_digit ∈ [0,15].
158 const field_pt left_xor_sparse =
159 left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limb_corrections[1]) * fr(4);
160
161 // Compute σ₀(w[i-15]) + σ₁(w[i-2]) in sparse form where σ₁(x) = (x >>> 17) ⊕ (x >>> 19) ⊕ (x >> 10).
162 const field_pt xor_result_sparse = right[0]
163 .add_two(right[1], right[2])
164 .add_two(right[3], w_right.rotated_limb_corrections[2])
165 .add_two(w_right.rotated_limb_corrections[3], left_xor_sparse);
166
167 // Normalize the sparse representation via a lookup to obtain the genuine result σ₀ + σ₁
169
170 // Compute W[i] = σ₁(W[i-2]) + W[i-7] + σ₀(W[i-15]) + W[i-16]
171 field_pt w_out_raw = xor_result.add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
172
173 // Natively compute value reduced to 32 bits per SHA-256 spec
174 const uint64_t w_out_modded = w_out_raw.get_value().from_montgomery_form().data[0] & 0xffffffffULL;
175
176 field_pt w_out;
177 if (w_out_raw.is_constant()) {
178 w_out = field_pt(ctx, fr(w_out_modded));
179 } else {
180 // Establish w_out as the 32-bit reduction of w_out_raw via w_out_raw = w_out + divisor*2^32
181 w_out = witness_t<Builder>(ctx, fr(w_out_modded));
182 static constexpr fr inv_pow_two = fr(2).pow(32).invert();
183
184 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
185 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
186 field_pt divisor = w_out_raw_inv_pow_two - w_out_inv_pow_two;
187 // Sum of four 32-bit values: divisor ≤ 3.
188 divisor.create_range_constraint(/*num_bits=*/2);
189 }
190
191 w_sparse[i] = sparse_witness_limbs(w_out);
192 }
193
194 std::array<field_pt, 64> w_extended;
195 for (size_t i = 0; i < 64; ++i) {
196 w_extended[i] = w_sparse[i].normal;
197 }
198 return w_extended;
199}
200
211template <typename Builder>
220
231template <typename Builder>
240
258template <typename Builder>
260{
262 // Separates rotation contributions (0-3) from Choose encoding (0-6) in each base-28 digit
263 constexpr fr SPARSE_MULT = fr(7);
264
266 const auto rotation_coefficients = sha256_tables::get_choose_rotation_multipliers();
267
268 field_pt rotation_result = lookup[ColumnIdx::C3][0];
269 e.sparse = lookup[ColumnIdx::C2][0];
270 field_pt sparse_L2 = lookup[ColumnIdx::C2][2];
271
272 // Compute e + 7*Σ₁(e) in sparse form
273 field_pt xor_result = (rotation_result * SPARSE_MULT)
274 .add_two(e.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)),
275 sparse_L2 * (rotation_coefficients[2] * SPARSE_MULT));
276
277 // Add 2f + 3g to get e + 7*Σ₁(e) + 2f + 3g (each digit in 0..27)
278 field_pt choose_result_sparse = xor_result.add_two(f.sparse + f.sparse, g.sparse + g.sparse + g.sparse);
279
280 // Normalize via lookup: each digit maps to Σ₁(e)_i + Ch(e,f,g)_i
281 field_pt choose_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_CH_OUTPUT, choose_result_sparse);
282
283 return choose_result;
284}
285
303template <typename Builder>
305{
307 // Separates rotation contributions (0-3) from Majority encoding (0-3) in each base-16 digit
308 constexpr fr SPARSE_MULT = fr(4);
309
311 const auto rotation_coefficients = sha256_tables::get_majority_rotation_multipliers();
312
313 // first row of 3rd column gives accumulating sum of "non-trivial" wraps
314 field_pt rotation_result = lookup[ColumnIdx::C3][0];
315 a.sparse = lookup[ColumnIdx::C2][0];
316 field_pt sparse_L1_acc = lookup[ColumnIdx::C2][1];
317
318 // Compute a + 4*Σ₀(a) in sparse form
319 field_pt xor_result = (rotation_result * SPARSE_MULT)
320 .add_two(a.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)),
321 sparse_L1_acc * (rotation_coefficients[1] * SPARSE_MULT));
322
323 // Add b + c to get a + 4*Σ₀(a) + b + c (each digit in 0..15)
324 field_pt majority_result_sparse = xor_result.add_two(b.sparse, c.sparse);
325
326 // Normalize via lookup: each digit maps to Σ₀(a)_i + Maj(a,b,c)_i
327 field_pt majority_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_MAJ_OUTPUT, majority_result_sparse);
328
329 return majority_result;
330}
331
342template <typename Builder>
344 const field_t<Builder>& b,
345 size_t overflow_bits)
346{
349
350 Builder* ctx = a.get_context() ? a.get_context() : b.get_context();
351
352 uint256_t sum = a.get_value() + b.get_value();
353 uint256_t normalized_sum = static_cast<uint32_t>(sum.data[0]); // lower 32 bits
354
355 if (a.is_constant() && b.is_constant()) {
356 return field_pt(ctx, normalized_sum);
357 }
358
359 fr overflow_value = fr((sum - normalized_sum) >> 32);
360 field_pt overflow = witness_pt(ctx, overflow_value);
361
362 field_pt result = a.add_two(b, overflow * field_pt(ctx, -fr(1ULL << 32ULL)));
363 overflow.create_range_constraint(overflow_bits);
364 return result;
365}
366
382template <typename Builder>
384 const std::array<field_t<Builder>, 16>& input)
385{
387
393 sparse_value a = sparse_value(h_init[0]); // delay conversion to maj sparse form
394 auto b = map_into_maj_sparse_form(h_init[1]);
395 auto c = map_into_maj_sparse_form(h_init[2]);
396 sparse_value d = sparse_value(h_init[3]);
397 sparse_value e = sparse_value(h_init[4]); // delay conversion to choose sparse form
398 auto f = map_into_choose_sparse_form(h_init[5]);
399 auto g = map_into_choose_sparse_form(h_init[6]);
400 sparse_value h = sparse_value(h_init[7]);
401
402 // Extend the 16-word message block to 64 words per SHA-256 specification
403 const std::array<field_t<Builder>, 64> w = extend_witness(input);
404
416 for (size_t i = 0; i < 64; ++i) {
417 auto ch = choose_with_sigma1(e, f, g); // ch = Σ1(e) + Ch(e,f,g) ≤ 2*(2^32-1)
418 auto maj = majority_with_sigma0(a, b, c); // maj = Σ0(a) + Maj(a,b,c) ≤ 2*(2^32-1)
419
420 // T1 = ch + h + K[i] + W[i] ≤ 5*(2^32-1)
421 auto T1 = ch.add_two(h.normal, w[i] + fr(round_constants[i]));
422
423 h = g;
424 g = f;
425 f = e;
426 e.normal = add_normalize_unsafe(d.normal, T1, /*overflow_bits=*/3); // d + T1: 6 × 32-bit, overflow ≤ 5
427 d = c;
428 c = b;
429 b = a;
430 a.normal = add_normalize_unsafe(T1, maj, /*overflow_bits=*/3); // T1 + Σ0+Maj: 7 × 32-bit, overflow ≤ 6
431 }
432
433 // Apply range constraints to `a` and `e` which are the only outputs of the previous loop not already
434 // lookup-constrained via sparse form conversion. Although not strictly necessary, this simplifies the analysis that
435 // the output of compression is fully constrained at minimal cost.
436 apply_32_bit_range_constraint_via_lookup(a.normal);
437 apply_32_bit_range_constraint_via_lookup(e.normal);
438
439 // Add round results into previous block output.
440 // Overflow bits = 1 since each summand is constrained to 32 bits.
442 output[0] = add_normalize_unsafe(a.normal, h_init[0], /*overflow_bits=*/1);
443 output[1] = add_normalize_unsafe(b.normal, h_init[1], /*overflow_bits=*/1);
444 output[2] = add_normalize_unsafe(c.normal, h_init[2], /*overflow_bits=*/1);
445 output[3] = add_normalize_unsafe(d.normal, h_init[3], /*overflow_bits=*/1);
446 output[4] = add_normalize_unsafe(e.normal, h_init[4], /*overflow_bits=*/1);
447 output[5] = add_normalize_unsafe(f.normal, h_init[5], /*overflow_bits=*/1);
448 output[6] = add_normalize_unsafe(g.normal, h_init[6], /*overflow_bits=*/1);
449 output[7] = add_normalize_unsafe(h.normal, h_init[7], /*overflow_bits=*/1);
450
451 // The final add_normalize outputs are not consumed by lookup tables, so they must be explicitly range-constrained.
452 // (Within the compression loop, lookup tables provide implicit 32-bit constraints on add_normalize outputs.)
453 for (const auto& val : output) {
454 val.create_range_constraint(32);
455 }
456
457 return output;
458}
459
461template class SHA256<bb::MegaCircuitBuilder>;
462
463} // namespace bb::stdlib
static sparse_value map_into_maj_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Majority function.
Definition sha256.cpp:232
static field_ct add_normalize_unsafe(const field_ct &a, const field_ct &b, size_t overflow_bits)
Compute (a + b) mod 2^32 with circuit constraints.
Definition sha256.cpp:343
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
Extend the 16-word message block to 64 words per SHA-256 specification.
Definition sha256.cpp:107
static field_ct choose_with_sigma1(sparse_value &e, const sparse_value &f, const sparse_value &g)
Compute Σ₁(e) + Ch(e,f,g) for SHA-256 compression rounds.
Definition sha256.cpp:259
static sparse_witness_limbs convert_witness(const field_ct &input)
Convert a 32-bit value to sparse limbs form for message schedule extension.
Definition sha256.cpp:46
static field_ct majority_with_sigma0(sparse_value &a, const sparse_value &b, const sparse_value &c)
Compute Σ₀(a) + Maj(a,b,c) for SHA-256 compression rounds.
Definition sha256.cpp:304
static sparse_value map_into_choose_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Choose function.
Definition sha256.cpp:212
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Apply the SHA-256 compression function to a single 512-bit message block.
Definition sha256.cpp:383
static void apply_32_bit_range_constraint_via_lookup(const field_ct &input)
Apply an implicit 32-bit range constraint by performing a lookup on the input.
Definition sha256.cpp:83
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:910
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
Definition field.cpp:829
bool is_constant() const
Definition field.hpp:430
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
Definition field.cpp:576
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
Definition plookup.cpp:19
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
Definition plookup.cpp:89
FF a
FF b
stdlib::witness_t< bb::UltraCircuitBuilder > witness_pt
stdlib::field_t< UltraCircuitBuilder > field_pt
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
Returns multipliers for computing Σ₁(e) rotations in choose_with_sigma1.
Definition sha256.hpp:409
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
Returns multipliers for computing Σ₀(a) rotations in majority_with_sigma0.
Definition sha256.hpp:392
@ SHA256_CH_INPUT
Definition types.hpp:92
@ SHA256_MAJ_OUTPUT
Definition types.hpp:95
@ SHA256_CH_OUTPUT
Definition types.hpp:93
@ SHA256_WITNESS_OUTPUT
Definition types.hpp:97
@ SHA256_MAJ_INPUT
Definition types.hpp:94
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y)
void mark_witness_as_used(const field_t< Builder > &field)
Mark a field_t witness as used (for UltraBuilder only).
Entry point for Barretenberg command-line interface.
Definition api.hpp:5
field< Bn254FrParams > fr
Definition fr.hpp:174
Inner sum(Cont< Inner, Args... > const &in)
Definition container.hpp:70
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
Plookup tables for SHA-256 using sparse form representation.
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::array< field_ct, 4 > rotated_limb_corrections
Definition sha256.hpp:115
std::array< field_ct, 4 > sparse_limbs
Definition sha256.hpp:113