Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
aes128.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: Planned, auditors: [], commit: }
3// external_1: { status: not started, auditors: [], commit: }
4// external_2: { status: not started, auditors: [], commit: }
5// =====================
6
7#include "./aes128.hpp"
8
12
15
16using namespace bb::crypto;
17
19template <typename Builder> using byte_pair = std::pair<field_t<Builder>, field_t<Builder>>;
20using namespace bb::plookup;
21
22constexpr uint32_t AES128_BASE = 9;
23constexpr size_t EXTENDED_KEY_LENGTH = 176;
24
26{
28 return result;
29}
30
35
36template <typename Builder>
38{
39 std::array<field_t<Builder>, 16> sparse_bytes;
40 auto block_data_copy = block_data;
41 if (block_data.is_constant()) {
42 // The algorithm expects that the sparse bytes are witnesses, so the block_data_copy must be a witness
43 block_data_copy.convert_constant_to_fixed_witness(ctx);
44 }
45 // Convert block data into sparse bytes using the AES_INPUT lookup table
46 auto lookup = plookup_read<Builder>::get_lookup_accumulators(AES_INPUT, block_data_copy);
47 for (size_t i = 0; i < 16; ++i) {
48 sparse_bytes[15 - i] = lookup[ColumnIdx::C2][i];
49 }
50 return sparse_bytes;
51}
52
53template <typename Builder> field_t<Builder> convert_from_sparse_bytes(Builder* ctx, field_t<Builder>* sparse_bytes)
54{
56
57 uint256_t accumulator = 0;
58 for (size_t i = 0; i < 16; ++i) {
59 uint64_t sparse_byte = uint256_t(sparse_bytes[i].get_value()).data[0];
60 uint256_t byte = numeric::map_from_sparse_form<AES128_BASE>(sparse_byte);
61 accumulator <<= 8;
62 accumulator += (byte);
63 }
64
65 field_t<Builder> result = witness_t(ctx, fr(accumulator));
66
68
69 for (size_t i = 0; i < 16; ++i) {
70 sparse_bytes[15 - i].assert_equal(lookup[ColumnIdx::C2][i]);
71 }
72
73 return result;
74}
75
106template <typename Builder>
108{
109 // Round constants (Rcon) from FIPS 197. Index 0 is a placeholder (never used);
110 // indices 1-10 are Rcon[1] through Rcon[10] = {0x01, 0x02, 0x04, ..., 0x36}.
111 // These are powers of 2 in GF(2^8): Rcon[i] = 2^(i-1) mod P(x).
112 constexpr std::array<uint8_t, 11> round_constants = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10,
113 0x20, 0x40, 0x80, 0x1b, 0x36 };
114 const auto sparse_round_constants = [&]() {
115 std::array<field_t<Builder>, 11> result;
116 for (size_t i = 0; i < 11; ++i) {
117 result[i] = field_t<Builder>(ctx, fr(numeric::map_into_sparse_form<AES128_BASE>(round_constants[i])));
118 }
119 return result;
120 }();
121
123 const auto sparse_key = convert_into_sparse_bytes(ctx, key);
124
126 std::array<uint64_t, 4> temp_add_counts{};
127 // Track the number of additions in each byte to normalize to prevent overflow in the sparse representation
129 for (size_t i = 0; i < EXTENDED_KEY_LENGTH; ++i) {
130 add_counts[i] = 1;
131 }
132
133 // For the first round (first 16 bytes of the expanded key), the round key is the same as the original key
134 for (size_t i = 0; i < 16; ++i) {
135 round_key[i] = sparse_key[i];
136 }
137
138 // Ittereate over the 40 words (4 words per round for 10 rounds)
139 for (size_t i = 4; i < 44; ++i) {
140 size_t k = (i - 1) * 4;
141 // Each word is 4 bytes, hence all the operations are done on 4 bytes at a time
142 temp_add_counts[0] = add_counts[k + 0];
143 temp_add_counts[1] = add_counts[k + 1];
144 temp_add_counts[2] = add_counts[k + 2];
145 temp_add_counts[3] = add_counts[k + 3];
146
147 temp[0] = round_key[k];
148 temp[1] = round_key[k + 1];
149 temp[2] = round_key[k + 2];
150 temp[3] = round_key[k + 3];
151
152 // If the word index is a multiple of 4, then we need to apply the RotWord and SubWord operations
153 if ((i & 0x03) == 0) {
154 // Apply the RotWord operation to the 4 bytes
155 const auto t = temp[0];
156 temp[0] = temp[1];
157 temp[1] = temp[2];
158 temp[2] = temp[3];
159 temp[3] = t;
160
161 // Apply the SubWord operation to the 4 bytes by looking up the S-box value in the AES_SBOX lookup table
162 temp[0] = apply_aes_sbox_map(ctx, temp[0]).first;
163 temp[1] = apply_aes_sbox_map(ctx, temp[1]).first;
164 temp[2] = apply_aes_sbox_map(ctx, temp[2]).first;
165 temp[3] = apply_aes_sbox_map(ctx, temp[3]).first;
166
167 // Add the round constant to the word. Since the round constants are 1 byte long we can just add them to the
168 // first byte of the word
169 temp[0] = temp[0] + sparse_round_constants[i >> 2];
170 ++temp_add_counts[0];
171 }
172
173 // The index of the expanded key bytes that need to be updated
174 size_t j = i * 4;
175 // The index if the key bytes corresponding to the previous word
176 k = (i - 4) * 4;
177 round_key[j] = round_key[k] + temp[0];
178 round_key[j + 1] = round_key[k + 1] + temp[1];
179 round_key[j + 2] = round_key[k + 2] + temp[2];
180 round_key[j + 3] = round_key[k + 3] + temp[3];
181
182 add_counts[j] = add_counts[k] + temp_add_counts[0];
183 add_counts[j + 1] = add_counts[k + 1] + temp_add_counts[1];
184 add_counts[j + 2] = add_counts[k + 2] + temp_add_counts[2];
185 add_counts[j + 3] = add_counts[k + 3] + temp_add_counts[3];
186
187 // Number of additions before we need to normalize the sparse form
188 constexpr uint64_t target = 3;
189 for (size_t k = 0; k < 4; ++k) {
190 // If the number of additions exceeds the target or the byte corresponds to a word index that is a multiple
191 // of 4 (i.e. the byte is used as input to the S-box) we normalize the sparse form
192 size_t byte_index = j + k;
193 if (add_counts[byte_index] > target || (add_counts[byte_index] > 1 && (byte_index & 12) == 12)) {
194 round_key[byte_index] = normalize_sparse_form(ctx, round_key[byte_index]);
195 // Reset the addition counter
196 add_counts[byte_index] = 1;
197 }
198 }
199 }
200
201 return round_key;
202}
203
211template <typename Builder> void shift_rows(byte_pair<Builder>* state)
212{
213 byte_pair<Builder> temp = state[1];
214 state[1] = state[5];
215 state[5] = state[9];
216 state[9] = state[13];
217 state[13] = temp;
218
219 temp = state[2];
220 state[2] = state[10];
221 state[10] = temp;
222 temp = state[6];
223 state[6] = state[14];
224 state[14] = temp;
225
226 temp = state[3];
227 state[3] = state[15];
228 state[15] = state[11];
229 state[11] = state[7];
230 state[7] = temp;
231}
232
262template <typename Builder>
263void mix_column_and_add_round_key(byte_pair<Builder>* column_pairs, field_t<Builder>* round_key, uint64_t round)
264{
265 // Intermediate values to reduce the number of additions (optimization)
266 // t0 = s0 + s3 + 3·s1
267 auto t0 = column_pairs[0].first.add_two(column_pairs[3].first, column_pairs[1].second);
268 // t1 = s1 + s2 + 3·s3
269 auto t1 = column_pairs[1].first.add_two(column_pairs[2].first, column_pairs[3].second);
270
271 // r0 = 2·s0 ⊕ 3·s1 ⊕ s2 ⊕ s3 = t0 + s2 + 3·s0 = (s0 + 3·s0) + 3·s1 + s2 + s3
272 auto r0 = t0.add_two(column_pairs[2].first, column_pairs[0].second);
273 // r1 = s0 ⊕ 2·s1 ⊕ 3·s2 ⊕ s3 = t0 + s1 + 3·s2 = s0 + (s1 + 3·s1) + 3·s2 + s3
274 auto r1 = t0.add_two(column_pairs[1].first, column_pairs[2].second);
275 // r2 = s0 ⊕ s1 ⊕ 2·s2 ⊕ 3·s3 = t1 + s0 + 3·s2 = s0 + s1 + (s2 + 3·s2) + 3·s3
276 auto r2 = t1.add_two(column_pairs[0].first, column_pairs[2].second);
277 // r3 = 3·s0 ⊕ s1 ⊕ s2 ⊕ 2·s3 = t1 + 3·s0 + s3 = 3·s0 + s1 + s2 + (s3 + 3·s3)
278 auto r3 = t1.add_two(column_pairs[0].second, column_pairs[3].first);
279
280 // Add round key and store result back (only .first is updated; .second will be recomputed by next SubBytes)
281 column_pairs[0].first = r0 + round_key[(round * 16U)];
282 column_pairs[1].first = r1 + round_key[(round * 16U) + 1];
283 column_pairs[2].first = r2 + round_key[(round * 16U) + 2];
284 column_pairs[3].first = r3 + round_key[(round * 16U) + 3];
285}
286
287template <typename Builder>
288void mix_columns_and_add_round_key(byte_pair<Builder>* state_pairs, field_t<Builder>* round_key, uint64_t round)
289{
290 mix_column_and_add_round_key(state_pairs, round_key, round);
291 mix_column_and_add_round_key(state_pairs + 4, round_key + 4, round);
292 mix_column_and_add_round_key(state_pairs + 8, round_key + 8, round);
293 mix_column_and_add_round_key(state_pairs + 12, round_key + 12, round);
294}
295
296template <typename Builder> void sub_bytes(Builder* ctx, byte_pair<Builder>* state_pairs)
297{
298 for (size_t i = 0; i < 16; ++i) {
299 state_pairs[i] = apply_aes_sbox_map(ctx, state_pairs[i].first);
300 }
301}
302
303template <typename Builder>
304void add_round_key(byte_pair<Builder>* sparse_state, field_t<Builder>* sparse_round_key, uint64_t round)
305{
306 for (size_t i = 0; i < 16; i += 4) {
307 for (size_t j = 0; j < 4; ++j) {
308 sparse_state[i + j].first += sparse_round_key[(round * 16U) + i + j];
309 }
310 }
311}
312
313template <typename Builder> void xor_with_iv(byte_pair<Builder>* state, field_t<Builder>* iv)
314{
315 for (size_t i = 0; i < 16; ++i) {
316 state[i].first += iv[i];
317 }
318}
319
320template <typename Builder>
321void aes128_cipher(Builder* ctx, byte_pair<Builder>* state, field_t<Builder>* sparse_round_key)
322{
323 add_round_key(state, sparse_round_key, 0);
324 for (size_t i = 0; i < 16; ++i) {
325 state[i].first = normalize_sparse_form(ctx, state[i].first);
326 }
327
328 for (size_t round = 1; round < 10; ++round) {
329 sub_bytes(ctx, state);
330 shift_rows(state);
331 mix_columns_and_add_round_key(state, sparse_round_key, round);
332 for (size_t i = 0; i < 16; ++i) {
333 state[i].first = normalize_sparse_form(ctx, state[i].first);
334 }
335 }
336
337 sub_bytes(ctx, state);
338 shift_rows(state);
339 add_round_key(state, sparse_round_key, 10);
340}
341
342template <typename Builder>
344 const field_t<Builder>& iv,
345 const field_t<Builder>& key)
346{
347 // Check if all inputs are constants
348 bool all_constants = key.is_constant() && iv.is_constant();
349 for (const auto& input_block : input) {
350 if (!input_block.is_constant()) {
351 all_constants = false;
352 break;
353 }
354 }
355
356 if (all_constants) {
357 // Compute result directly using native crypto implementation
359 std::vector<uint8_t> key_bytes(16);
360 std::vector<uint8_t> iv_bytes(16);
361 std::vector<uint8_t> input_bytes(input.size() * 16);
362
363 // Convert key to bytes
364 uint256_t key_value = key.get_value();
365 for (size_t i = 0; i < 16; ++i) {
366 key_bytes[15 - i] = static_cast<uint8_t>((key_value >> (i * 8)) & 0xFF);
367 }
368
369 // Convert IV to bytes
370 uint256_t iv_value = iv.get_value();
371 for (size_t i = 0; i < 16; ++i) {
372 iv_bytes[15 - i] = static_cast<uint8_t>((iv_value >> (i * 8)) & 0xFF);
373 }
374
375 // Convert input blocks to bytes
376 for (size_t block_idx = 0; block_idx < input.size(); ++block_idx) {
377 uint256_t block_value = input[block_idx].get_value();
378 for (size_t i = 0; i < 16; ++i) {
379 input_bytes[block_idx * 16 + 15 - i] = static_cast<uint8_t>((block_value >> (i * 8)) & 0xFF);
380 }
381 }
382
383 // Run native AES encryption
384 crypto::aes128_encrypt_buffer_cbc(input_bytes.data(), iv_bytes.data(), key_bytes.data(), input_bytes.size());
385
386 // Convert result back to field elements
387 for (size_t block_idx = 0; block_idx < input.size(); ++block_idx) {
388 uint256_t result_value = 0;
389 for (size_t i = 0; i < 16; ++i) {
390 result_value <<= 8;
391 result_value += input_bytes[block_idx * 16 + i];
392 }
393 result.push_back(field_t<Builder>(result_value));
394 }
395
396 return result;
397 }
398
399 // Find a valid context from any of the inputs
400 Builder* ctx = nullptr;
401 if (!key.is_constant()) {
402 ctx = key.get_context();
403 } else if (!iv.is_constant()) {
404 ctx = iv.get_context();
405 } else {
406 for (const auto& input_block : input) {
407 if (!input_block.is_constant()) {
408 ctx = input_block.get_context();
409 break;
410 }
411 }
412 }
413
414 BB_ASSERT(ctx);
415
416 auto round_key = expand_key(ctx, key);
417
418 const size_t num_blocks = input.size();
419
420 std::vector<byte_pair<Builder>> sparse_state;
421 for (size_t i = 0; i < num_blocks; ++i) {
422 auto bytes = convert_into_sparse_bytes(ctx, input[i]);
423 for (const auto& byte : bytes) {
424 sparse_state.push_back({ byte, field_t(ctx, fr(0)) });
425 }
426 }
427
428 auto sparse_iv = convert_into_sparse_bytes(ctx, iv);
429
430 for (size_t i = 0; i < num_blocks; ++i) {
431 byte_pair<Builder>* round_state = &sparse_state[i * 16];
432 xor_with_iv(round_state, &sparse_iv[0]);
433 aes128_cipher(ctx, round_state, &round_key[0]);
434
435 for (size_t j = 0; j < 16; ++j) {
436 sparse_iv[j] = round_state[j].first;
437 }
438 }
439
440 std::vector<field_t<Builder>> sparse_output;
441 for (auto& element : sparse_state) {
442 sparse_output.push_back(normalize_sparse_form(ctx, element.first));
443 }
444
446 for (size_t i = 0; i < num_blocks; ++i) {
447 output.push_back(convert_from_sparse_bytes(ctx, &sparse_output[i * 16]));
448 }
449 return output;
450}
451#define INSTANTIATE_ENCRYPT_BUFFER_CBC(Builder) \
452 template std::vector<field_t<Builder>> encrypt_buffer_cbc<Builder>( \
453 const std::vector<field_t<Builder>>&, const field_t<Builder>&, const field_t<Builder>&)
454
457} // namespace bb::stdlib::aes128
#define BB_ASSERT(expression,...)
Definition assert.hpp:70
void assert_equal(const field_t &rhs, std::string const &msg="field_t::assert_equal") const
Copy constraint: constrain that *this field is equal to rhs element.
Definition field.cpp:931
Builder * get_context() const
Definition field.hpp:420
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
Definition field.cpp:829
void convert_constant_to_fixed_witness(Builder *ctx)
Definition field.hpp:445
bool is_constant() const
Definition field.hpp:430
void aes128_encrypt_buffer_cbc(uint8_t *buffer, uint8_t *iv, const uint8_t *key, const size_t length)
Definition aes128.cpp:233
@ AES_NORMALIZE
Definition types.hpp:98
byte_pair< Builder > apply_aes_sbox_map(Builder *, field_t< Builder > &input)
Definition aes128.cpp:31
void mix_columns_and_add_round_key(byte_pair< Builder > *state_pairs, field_t< Builder > *round_key, uint64_t round)
Definition aes128.cpp:288
field_t< Builder > normalize_sparse_form(Builder *, field_t< Builder > &byte)
Definition aes128.cpp:25
void xor_with_iv(byte_pair< Builder > *state, field_t< Builder > *iv)
Definition aes128.cpp:313
constexpr uint32_t AES128_BASE
Definition aes128.cpp:22
std::array< field_t< Builder >, 16 > convert_into_sparse_bytes(Builder *ctx, const field_t< Builder > &block_data)
Definition aes128.cpp:37
void sub_bytes(Builder *ctx, byte_pair< Builder > *state_pairs)
Definition aes128.cpp:296
std::pair< field_t< Builder >, field_t< Builder > > byte_pair
Definition aes128.cpp:19
std::array< field_t< Builder >, EXTENDED_KEY_LENGTH > expand_key(Builder *ctx, const field_t< Builder > &key)
Expands a 128-bit AES key into the full key schedule (EXTENDED_KEY_LENGTH bytes / 11 round keys).
Definition aes128.cpp:107
constexpr size_t EXTENDED_KEY_LENGTH
Definition aes128.cpp:23
void add_round_key(byte_pair< Builder > *sparse_state, field_t< Builder > *sparse_round_key, uint64_t round)
Definition aes128.cpp:304
void aes128_cipher(Builder *ctx, byte_pair< Builder > *state, field_t< Builder > *sparse_round_key)
Definition aes128.cpp:321
void shift_rows(byte_pair< Builder > *state)
The SHIFTROW() operation as in FIPS 197, Section 5.1.2.
Definition aes128.cpp:211
field_t< Builder > convert_from_sparse_bytes(Builder *ctx, field_t< Builder > *sparse_bytes)
Definition aes128.cpp:53
std::vector< field_t< Builder > > encrypt_buffer_cbc(const std::vector< field_t< Builder > > &input, const field_t< Builder > &iv, const field_t< Builder > &key)
Definition aes128.cpp:343
void mix_column_and_add_round_key(byte_pair< Builder > *column_pairs, field_t< Builder > *round_key, uint64_t round)
Performs MixColumns on a single column and adds the round key (FIPS 197, Sections 5....
Definition aes128.cpp:263
std::conditional_t< IsGoblinBigGroup< C, Fq, Fr, G >, element_goblin::goblin_element< C, goblin_field< C >, Fr, G >, element_default::element< C, Fq, Fr, G > > element
element wraps either element_default::element or element_goblin::goblin_element depending on parametr...
Definition biggroup.hpp:995
field< Bn254FrParams > fr
Definition fr.hpp:174
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
#define INSTANTIATE_ENCRYPT_BUFFER_CBC(Builder)
Definition aes128.cpp:451