Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <numeric>
7#include <ranges>
8#include <stdexcept>
9
37
42
43namespace bb::avm2::tracegen {
44namespace {
45
46constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
47 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
48 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
49};
50constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
51 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
52 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
53 C::execution_sel_op_is_address_6_,
54};
55constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
56 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
57 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
58 C::execution_op_after_relative_6_,
59};
60constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
61 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
62 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
63};
64constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
65 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
66 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
67};
68constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
69 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
70 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
71 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
72 C::execution_sel_should_apply_indirection_6_,
73};
74constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
75 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
76 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
77 C::execution_sel_relative_overflow_6_,
78};
79constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS = {
80 C::execution_sel_op_do_overflow_check_0_, C::execution_sel_op_do_overflow_check_1_,
81 C::execution_sel_op_do_overflow_check_2_, C::execution_sel_op_do_overflow_check_3_,
82 C::execution_sel_op_do_overflow_check_4_, C::execution_sel_op_do_overflow_check_5_,
83 C::execution_sel_op_do_overflow_check_6_,
84};
85constexpr size_t TOTAL_INDIRECT_BITS = 16;
86static_assert(static_cast<size_t>(AVM_MAX_OPERANDS) * 2 <= TOTAL_INDIRECT_BITS);
87constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
88 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
89 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
90 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
91 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
92
93};
94constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
95 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
96 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
97 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
98 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
99};
100
101constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
102 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_,
103 C::execution_register_3_, C::execution_register_4_, C::execution_register_5_,
104};
105constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
106 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_,
107 C::execution_mem_tag_reg_3_, C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_,
108};
109constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
110 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_,
111 C::execution_rw_reg_3_, C::execution_rw_reg_4_, C::execution_rw_reg_5_,
112};
113constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
114 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
115 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
116};
117constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
118 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
119 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
120};
121constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
122 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
123 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
124};
125constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
126 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
127 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
128};
129
137C get_execution_opcode_selector(ExecutionOpCode exec_opcode)
138{
139 switch (exec_opcode) {
141 return C::execution_sel_execute_get_env_var;
143 return C::execution_sel_execute_mov;
145 return C::execution_sel_execute_jump;
147 return C::execution_sel_execute_jumpi;
149 return C::execution_sel_execute_call;
151 return C::execution_sel_execute_static_call;
153 return C::execution_sel_execute_internal_call;
155 return C::execution_sel_execute_internal_return;
157 return C::execution_sel_execute_return;
159 return C::execution_sel_execute_revert;
161 return C::execution_sel_execute_success_copy;
163 return C::execution_sel_execute_returndata_size;
165 return C::execution_sel_execute_debug_log;
167 return C::execution_sel_execute_sload;
169 return C::execution_sel_execute_sstore;
171 return C::execution_sel_execute_notehash_exists;
173 return C::execution_sel_execute_emit_notehash;
175 return C::execution_sel_execute_l1_to_l2_message_exists;
177 return C::execution_sel_execute_nullifier_exists;
179 return C::execution_sel_execute_emit_nullifier;
181 return C::execution_sel_execute_send_l2_to_l1_msg;
182 default:
183 throw std::runtime_error("Execution opcode does not have a corresponding selector");
184 }
185}
186
190struct FailingContexts {
191 bool app_logic_failure = false;
192 bool teardown_failure = false;
195 unordered_flat_set<uint32_t> does_context_fail;
196};
197
209FailingContexts preprocess_for_discard(
211{
212 FailingContexts dying_info;
213
214 // We use `after_context_event` to retrieve parent_id, context_id, and phase to be consistent with
215 // how these values are populated in the trace (see ExecutionTraceBuilder::process()). These values
216 // should not change during the life-cycle of an execution event though and before_context_event
217 // would lead to the same results.
218
219 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
220 for (const auto& ex_event : ex_events) {
221 bool is_exit = ex_event.is_exit();
222 bool is_top_level = ex_event.after_context_event.parent_id == 0;
223
224 if (is_exit && is_top_level) {
225 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
226 dying_info.app_logic_failure = ex_event.is_failure();
227 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
228 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
229 dying_info.teardown_failure = ex_event.is_failure();
230 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
231 break; // Teardown is the last phase we care about
232 }
233 }
234 }
235
236 // Preprocessing pass 2: find all contexts that fail and mark them
237 for (const auto& ex_event : ex_events) {
238 if (ex_event.is_failure()) {
239 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
240 }
241 }
242
243 return dying_info;
244}
245
253bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
254{
255 // Note that app logic also gets discarded if teardown failures
256 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
257 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
258}
259
267uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
268{
270 "Execution events must have app logic or teardown phase");
271
272 switch (phase) {
274 if (failures.app_logic_failure) {
275 return failures.app_logic_exit_context_id;
276 }
277
278 // Note that app logic also gets discarded if teardown failures
279 if (failures.teardown_failure) {
280 return failures.teardown_exit_context_id;
281 }
282
283 return 0;
284 }
286 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
287 default:
288 __builtin_unreachable(); // tell the compiler "we never reach here"
289 }
290}
291
292} // namespace
293
296{
297 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
298
299 // Preprocess events to determine which contexts will fail
300 const FailingContexts failures = preprocess_for_discard(ex_events);
301
302 // Some variables updated per loop iteration to track
303 // whether or not the upcoming row should "discard" [side effects].
304 uint32_t dying_context_id = 0;
305 // dying_context_id captures whether we discard or not. Namely, discard == 1 <=> dying_context_id != 0
306 // is a circuit invariant. For this reason, we use a lambda to preserve the invariant.
307 auto is_discarding = [&dying_context_id]() { return dying_context_id != 0; };
308 bool is_first_event_in_enqueued_call = true;
309 bool prev_row_was_enter_call = false;
310
311 for (const auto& ex_event : ex_events) {
312 // Check if this is the first event in an enqueued call and whether
313 // the phase should be discarded
314 if (!is_discarding() && is_first_event_in_enqueued_call &&
315 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
316 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
317 }
318
319 const bool has_parent = ex_event.after_context_event.parent_id != 0;
320
321 /**************************************************************************************************
322 * Setup.
323 **************************************************************************************************/
324
325 trace.set(
326 row,
327 { {
328 { C::execution_sel, 1 },
329 // Selectors that indicate "dispatch" from tx trace
330 // Note: Enqueued Call End is determined during the opcode execution temporality group
331 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
332 // Context
333 { C::execution_context_id, ex_event.after_context_event.id },
334 { C::execution_parent_id, ex_event.after_context_event.parent_id },
335 // Warning: pc in after_context_event is the pc of the next instruction, not the current instruction.
336 { C::execution_pc, ex_event.before_context_event.pc },
337 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
338 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
339 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
340 { C::execution_is_static, ex_event.after_context_event.is_static },
341 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
342 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
343 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
344 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
345 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
346 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
347 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2_gas },
348 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.da_gas },
349 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2_gas },
350 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.da_gas },
351 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2_gas },
352 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.da_gas },
353 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2_gas },
354 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.da_gas },
355 { C::execution_next_context_id, ex_event.next_context_id },
356 // Context - gas.
357 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2_gas },
358 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.da_gas },
359 // Context - tree states
360 // Context - tree states - Written public data slots tree
361 { C::execution_prev_written_public_data_slots_tree_root,
362 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
363 { C::execution_prev_written_public_data_slots_tree_size,
364 ex_event.before_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
365 { C::execution_written_public_data_slots_tree_root,
366 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
367 { C::execution_written_public_data_slots_tree_size,
368 ex_event.after_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
369 // Context - tree states - Nullifier tree
370 { C::execution_prev_nullifier_tree_root,
371 ex_event.before_context_event.tree_states.nullifier_tree.tree.root },
372 { C::execution_prev_nullifier_tree_size,
373 ex_event.before_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
374 { C::execution_prev_num_nullifiers_emitted,
375 ex_event.before_context_event.tree_states.nullifier_tree.counter },
376 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifier_tree.tree.root },
377 { C::execution_nullifier_tree_size,
378 ex_event.after_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
379 { C::execution_num_nullifiers_emitted,
380 ex_event.after_context_event.tree_states.nullifier_tree.counter },
381 // Context - tree states - Public data tree
382 { C::execution_prev_public_data_tree_root,
383 ex_event.before_context_event.tree_states.public_data_tree.tree.root },
384 { C::execution_prev_public_data_tree_size,
385 ex_event.before_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
386 { C::execution_public_data_tree_root,
387 ex_event.after_context_event.tree_states.public_data_tree.tree.root },
388 { C::execution_public_data_tree_size,
389 ex_event.after_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
390 // Context - tree states - Note hash tree
391 { C::execution_prev_note_hash_tree_root,
392 ex_event.before_context_event.tree_states.note_hash_tree.tree.root },
393 { C::execution_prev_note_hash_tree_size,
394 ex_event.before_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
395 { C::execution_prev_num_note_hashes_emitted,
396 ex_event.before_context_event.tree_states.note_hash_tree.counter },
397 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.note_hash_tree.tree.root },
398 { C::execution_note_hash_tree_size,
399 ex_event.after_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
400 { C::execution_num_note_hashes_emitted,
401 ex_event.after_context_event.tree_states.note_hash_tree.counter },
402 // Context - tree states - L1 to L2 message tree
403 { C::execution_l1_l2_tree_root,
404 ex_event.after_context_event.tree_states.l1_to_l2_message_tree.tree.root },
405 // Context - tree states - Retrieved bytecodes tree
406 { C::execution_prev_retrieved_bytecodes_tree_root,
407 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.root },
408 { C::execution_prev_retrieved_bytecodes_tree_size,
409 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
410 { C::execution_retrieved_bytecodes_tree_root,
411 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.root },
412 { C::execution_retrieved_bytecodes_tree_size,
413 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
414 // Context - side effects
415 { C::execution_prev_num_unencrypted_log_fields, ex_event.before_context_event.numUnencryptedLogFields },
416 { C::execution_num_unencrypted_log_fields, ex_event.after_context_event.numUnencryptedLogFields },
417 { C::execution_prev_num_l2_to_l1_messages, ex_event.before_context_event.numL2ToL1Messages },
418 { C::execution_num_l2_to_l1_messages, ex_event.after_context_event.numL2ToL1Messages },
419 // Helpers for identifying parent context
420 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
421 { C::execution_is_parent_id_inv, ex_event.after_context_event.parent_id }, // Will be inverted in batch.
422 } });
423
424 // Internal stack
425 // Important: It is crucial to use `before_context_event` to populate the internal call stack columns because
426 // these values are mutated by the internal call and return opcodes and therefore
427 // `after_context_event` would populate incorrect values.
428 const auto& internal_call_return_id = ex_event.before_context_event.internal_call_return_id;
429 trace.set(row,
430 { {
431 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
432 { C::execution_internal_call_return_id, internal_call_return_id },
433 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
434 } });
435
436 /**************************************************************************************************
437 * Temporality group 1: Bytecode retrieval.
438 **************************************************************************************************/
439
440 const bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_RETRIEVAL;
441 const bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
442 trace.set(row,
443 { {
444 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
445 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
446 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
447 { C::execution_bytecode_id, ex_event.after_context_event.bytecode_id },
448 } });
449
450 /**************************************************************************************************
451 * Temporality group 2: Instruction fetching. Mapping from wire to execution and addressing.
452 **************************************************************************************************/
453
454 // This will only have a value if instruction fetching succeeded.
456 const bool error_in_instruction_fetching = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
457 const bool instruction_fetching_success = !bytecode_retrieval_failed && !error_in_instruction_fetching;
458 trace.set(C::execution_sel_instruction_fetching_failure, row, error_in_instruction_fetching ? 1 : 0);
459
460 if (instruction_fetching_success) {
461 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
462 process_instr_fetching(ex_event.wire_instruction, trace, row);
463
464 // If we fetched an instruction successfully, we can set the next PC.
465 // In circuit, we enforce next_pc to be pc + instr_length, but in simulation,
466 // we set next_pc (as member of the context) to be the real pc of the next instruction
467 // which is different for JUMP, JUMPI, INTERNALCALL, and INTERNALRETURN.
468 // Therefore, we must not use after_context_event.pc (which is simulation next_pc) to set
469 // C::execution_next_pc.
470 trace.set(row,
471 { {
472 { C::execution_next_pc,
473 static_cast<uint32_t>(ex_event.before_context_event.pc +
474 ex_event.wire_instruction.size_in_bytes()) },
475 } });
476
477 // Along this function we need to set the info we get from the #[EXEC_SPEC_READ] lookup.
478 process_execution_spec(ex_event, trace, row);
479
480 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
481 }
482
483 const bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
484
485 /**************************************************************************************************
486 * Temporality group 3: Registers read.
487 **************************************************************************************************/
488
489 // Note that if addressing did not fail, register reading will not fail.
491 std::ranges::fill(registers, MemoryValue::from_tag(static_cast<MemoryTag>(0), 0));
492 const bool should_process_registers = instruction_fetching_success && !addressing_failed;
493 const bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
494 if (should_process_registers) {
496 *exec_opcode, ex_event.inputs, ex_event.output, registers, register_processing_failed, trace, row);
497 }
498
499 /**************************************************************************************************
500 * Temporality group 4: Gas (both base and dynamic).
501 **************************************************************************************************/
502
503 const bool should_check_gas = should_process_registers && !register_processing_failed;
504 if (should_check_gas) {
505 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
506
507 // To_Radix Dynamic Gas Factor related selectors.
508 // We need the register information to compute dynamic gas factor and process_gas() does not have
509 // access to it and nor should it.
510 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
511 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
512 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
513 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix_size(radix));
514 trace.set(row,
515 { {
516 // To Radix BE Dynamic Gas
517 { C::execution_two_five_six, 256 },
518 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
519 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
520 { C::execution_num_p_limbs, num_p_limbs },
521 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
522 // Don't set dyn gas factor here since already set in process_gas
523 } });
524 }
525 }
526
527 const bool oog = ex_event.error == ExecutionError::GAS;
528 /**************************************************************************************************
529 * Temporality group 5: Opcode execution.
530 **************************************************************************************************/
531
532 const bool should_execute_opcode = should_check_gas && !oog;
533
534 // These booleans are used after of the "opcode code execution" block but need
535 // to be set as part of the "opcode code execution" block.
536 bool sel_enter_call = false;
537 bool sel_exit_call = false;
538 bool should_execute_revert = false;
539
540 const bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
541 if (should_execute_opcode) {
542 // At this point we can assume instruction fetching succeeded, so this should never fail.
543 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(*exec_opcode);
544 trace.set(row,
545 { {
546 { C::execution_sel_should_execute_opcode, 1 },
547 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
548 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
549 } });
550
551 // Execution Trace opcodes - separating for clarity
552 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
553 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
554 }
555
556 // Execution trace opcodes specific logic.
557 // Note that the opcode selectors were set above. (e.g., sel_execute_call, sel_execute_static_call, ..).
558 if (*exec_opcode == ExecutionOpCode::CALL || *exec_opcode == ExecutionOpCode::STATICCALL) {
559 sel_enter_call = true;
560
561 const Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
562
563 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
564 bool is_l2_gas_left_gt_allocated = gas_left.l2_gas > allocated_l2_gas;
565
566 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
567 bool is_da_gas_left_gt_allocated = gas_left.da_gas > allocated_da_gas;
568
569 trace.set(row,
570 { {
571 { C::execution_sel_enter_call, 1 },
572 { C::execution_l2_gas_left, gas_left.l2_gas },
573 { C::execution_da_gas_left, gas_left.da_gas },
574 { C::execution_is_l2_gas_left_gt_allocated, is_l2_gas_left_gt_allocated ? 1 : 0 },
575 { C::execution_is_da_gas_left_gt_allocated, is_da_gas_left_gt_allocated ? 1 : 0 },
576 } });
577 } else if (*exec_opcode == ExecutionOpCode::RETURN) {
578 sel_exit_call = true;
579 trace.set(row,
580 { {
581 { C::execution_nested_return, has_parent ? 1 : 0 },
582 } });
583 } else if (*exec_opcode == ExecutionOpCode::REVERT) {
584 sel_exit_call = true;
585 should_execute_revert = true;
586 } else if (exec_opcode == ExecutionOpCode::GETENVVAR) {
587 BB_ASSERT_EQ(ex_event.addressing_event.resolution_info.size(),
588 static_cast<size_t>(2),
589 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
590 // rop[1] is the envvar enum
591 Operand envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
592 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
593 } else if (*exec_opcode == ExecutionOpCode::INTERNALRETURN) {
594 if (!opcode_execution_failed) {
595 // If we have an opcode error, we don't need to compute the inverse (see internal_call.pil)
596 trace.set(C::execution_internal_call_return_id_inv,
597 row,
598 internal_call_return_id); // Will be inverted in batch later.
599 trace.set(C::execution_sel_read_unwind_call_stack, row, 1);
600 }
601 } else if (*exec_opcode == ExecutionOpCode::SSTORE) {
602 // Equivalent to PIL's (MAX + INITIAL_SIZE - prev_written_public_data_slots_tree_size)
603 // since prev_size = counter + 1 and INITIAL_SIZE = 1.
604 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
605 ex_event.before_context_event.tree_states.public_data_tree.counter;
606
607 trace.set(row,
608 { {
609 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
610 { C::execution_remaining_data_writes_inv,
611 remaining_data_writes }, // Will be inverted in batch later.
612 { C::execution_sel_write_public_data, !opcode_execution_failed },
613 } });
614 } else if (*exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
615 uint64_t leaf_index = registers[1].as<uint64_t>();
616 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
617 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
618
619 trace.set(row,
620 { {
621 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
622 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
623 } });
624 } else if (*exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
625 uint32_t remaining_note_hashes =
626 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.note_hash_tree.counter;
627
628 trace.set(row,
629 { {
630 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
631 { C::execution_remaining_note_hashes_inv,
632 remaining_note_hashes }, // Will be inverted in batch later.
633 { C::execution_sel_write_note_hash, !opcode_execution_failed },
634 } });
635 } else if (*exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
636 uint64_t leaf_index = registers[1].as<uint64_t>();
637 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
638 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
639
640 trace.set(row,
641 { {
642 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
643 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
644 } });
645 //} else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
646 // no custom columns!
647 } else if (*exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
648 uint32_t remaining_nullifiers =
649 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifier_tree.counter;
650
651 trace.set(row,
652 { {
653 { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
654 { C::execution_remaining_nullifiers_inv,
655 remaining_nullifiers }, // Will be inverted in batch later.
656 { C::execution_sel_write_nullifier,
657 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
658 } });
659 } else if (*exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
660 uint32_t remaining_l2_to_l1_msgs =
661 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.numL2ToL1Messages;
662
663 trace.set(row,
664 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
665 { C::execution_remaining_l2_to_l1_msgs_inv,
666 remaining_l2_to_l1_msgs }, // Will be inverted in batch later.
667 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && !is_discarding() },
668 {
669 C::execution_public_inputs_index,
671 ex_event.before_context_event.numL2ToL1Messages,
672 } } });
673 }
674 }
675
676 /**************************************************************************************************
677 * Temporality group 6: Register write.
678 **************************************************************************************************/
679
680 const bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
681 if (should_process_register_write) {
682 process_registers_write(*exec_opcode, trace, row);
683 }
684
685 /**************************************************************************************************
686 * Discarding and error related selectors.
687 **************************************************************************************************/
688
689 const bool is_dying_context = ex_event.after_context_event.id == dying_context_id;
690 // Need to generate the item below for checking "is dying context" in circuit
691 // No need to condition by `!is_dying_context` as batch inversion skips 0.
692 const FF dying_context_diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
693
694 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
695 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
696 const bool is_err = ex_event.error != ExecutionError::NONE;
697 sel_exit_call = sel_exit_call || is_err; // sel_execute_revert || sel_execute_return || sel_error
698 const bool is_failure = should_execute_revert || is_err;
699 const bool enqueued_call_end = sel_exit_call && !has_parent;
700 const bool nested_failure = is_failure && has_parent;
701
702 trace.set(row,
703 { {
704 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
705 { C::execution_nested_failure, nested_failure ? 1 : 0 },
706 { C::execution_sel_error, is_err ? 1 : 0 },
707 { C::execution_sel_failure, is_failure ? 1 : 0 },
708 { C::execution_discard, is_discarding() ? 1 : 0 },
709 { C::execution_dying_context_id, dying_context_id },
710 { C::execution_dying_context_id_inv, dying_context_id }, // Will be inverted in batch.
711 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
712 { C::execution_dying_context_diff_inv, dying_context_diff }, // Will be inverted in batch.
713 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
714 } });
715
716 // Trace-generation is done for this event.
717 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event.
718 // Note: is_failure implies discard is true.
719 const bool event_kills_dying_context = is_failure && is_dying_context;
720
721 if (event_kills_dying_context) {
722 // Set/unset discard flag if the current event is the one that kills the dying context
723 dying_context_id = 0;
724 } else if (sel_enter_call && !is_discarding() &&
725 failures.does_context_fail.contains(ex_event.next_context_id)) {
726 // If making a nested call, and discard isn't already high...
727 // if the nested context being entered eventually dies, we set which context is dying (implicitly raise
728 // discard flag). NOTE: If a [STATIC]CALL instruction _itself_ errors, we don't set the discard flag
729 // because we aren't actually entering a new context. This is already captured by `sel_enter_call`
730 // boolean which is set to true only during opcode execution temporality group which cannot
731 // fail for CALL/STATICALL.
732 dying_context_id = ex_event.next_context_id;
733 }
734 // Otherwise, we aren't entering or exiting a dying context,
735 // so just propagate discard and dying context.
736 // Implicit: dying_context_id = dying_context_id; discard = discard;
737
738 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
739 // Update flag for next iteration.
740 is_first_event_in_enqueued_call = !has_parent && sel_exit_call;
741
742 // Track this bool for use determining whether the next row is the first in a context
743 prev_row_was_enter_call = sel_enter_call;
744
745 row++;
746 }
747
748 // Batch invert the columns.
750}
751
753 TraceContainer& trace,
754 uint32_t row)
755{
756 trace.set(row,
757 { {
758 { C::execution_sel_instruction_fetching_success, 1 },
759 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
760 { C::execution_addressing_mode, instruction.addressing_mode },
761 { C::execution_instr_length, instruction.size_in_bytes() },
762 } });
763
764 // At this point we can assume instruction fetching succeeded.
765 auto operands = instruction.operands;
766 BB_ASSERT_LTE(operands.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Operands size is out of range");
767 operands.resize(AVM_MAX_OPERANDS, Operand::from<FF>(0));
768
769 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
770 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
771 }
772}
773
775 TraceContainer& trace,
776 uint32_t row)
777{
778 // At this point we can assume instruction fetching succeeded, so this should never fail.
779 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
780 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
781 const auto& gas_cost = exec_spec.gas_cost;
782
783 // Gas.
784 trace.set(row,
785 { {
786 { C::execution_opcode_gas, gas_cost.opcode_gas },
787 { C::execution_base_da_gas, gas_cost.base_da },
788 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
789 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
790 } });
791
792 const auto& register_info = exec_spec.register_info;
793 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
794 trace.set(row,
795 { {
796 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
797 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
798 { REGISTER_EXPECTED_TAG_COLUMNS[i],
799 register_info.need_tag_check(i) ? static_cast<uint32_t>(*(register_info.expected_tag(i))) : 0 },
800 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
801 } });
802 }
803
804 // Set is_address columns
805 const auto& num_addresses = exec_spec.num_addresses;
806 for (size_t i = 0; i < num_addresses; i++) {
807 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
808 }
809
810 // At this point we can assume instruction fetching succeeded, so this should never fail.
811 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(exec_opcode);
812 trace.set(row,
813 { {
814 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
815 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
816 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
817 } });
818}
819
821 ExecutionOpCode exec_opcode,
822 TraceContainer& trace,
823 uint32_t row)
824{
825 bool oog = gas_event.oog_l2 || gas_event.oog_da;
826 trace.set(row,
827 { {
828 { C::execution_sel_should_check_gas, 1 },
829 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
830 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
831 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
832 // Addressing gas.
833 { C::execution_addressing_gas, gas_event.addressing_gas },
834 // Dynamic gas.
835 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2_gas },
836 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.da_gas },
837 // Derived cumulative gas used.
838 { C::execution_total_gas_l2, gas_event.total_gas_used_l2 },
839 { C::execution_total_gas_da, gas_event.total_gas_used_da },
840 } });
841
842 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
843 if (exec_spec.dyn_gas_id != 0) {
844 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
845 }
846}
847
850 TraceContainer& trace,
851 uint32_t row)
852{
853 // At this point we can assume instruction fetching succeeded, so this should never fail.
854 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
855 const ExecInstructionSpec& ex_spec = get_exec_instruction_spec().at(exec_opcode);
856
857 auto resolution_info_vec = addr_event.resolution_info;
859 resolution_info_vec.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Resolution info size is out of range");
860 // Pad with default values for the missing operands.
861 resolution_info_vec.resize(AVM_MAX_OPERANDS,
862 {
863 // This is the default we want: both tag and value 0.
864 .after_relative = FF::zero(),
865 .resolved_operand = Operand::from_tag(static_cast<ValueTag>(0), 0),
866 .error = std::nullopt,
867 });
868
869 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
872 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
873 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
875 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
876 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
877 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
878 uint8_t num_relative_operands = 0;
879
880 // The error about the base address being invalid is stored in every resolution_info member when it happens.
881 bool base_address_invalid = resolution_info_vec[0].error.has_value() &&
882 *resolution_info_vec[0].error == AddressingEventError::BASE_ADDRESS_INVALID;
883 bool do_base_check = false; // Whether we need to retrieve the base address,
884 // i.e., at least one operand is relative.
885
886 // Gather operand information.
887 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
888 const auto& resolution_info = resolution_info_vec[i];
889 bool op_is_address = i < ex_spec.num_addresses;
890 relative_oob[i] = resolution_info.error.has_value() &&
891 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
892 is_relative[i] = is_operand_relative(instruction.addressing_mode, i);
893 is_indirect[i] = is_operand_indirect(instruction.addressing_mode, i);
894 is_relative_effective[i] = op_is_address && is_relative[i];
895 is_indirect_effective[i] = op_is_address && is_indirect[i];
896 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i] && !base_address_invalid;
897 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
898 after_relative[i] = resolution_info.after_relative;
899 resolved_operand[i] = resolution_info.resolved_operand;
900 if (is_relative_effective[i]) {
901 do_base_check = true;
902 num_relative_operands++;
903 }
904 }
905
906 BB_ASSERT(do_base_check || !base_address_invalid, "Base address is invalid but we are not checking it.");
907
908 // Set the operand columns.
909 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
910 trace.set(row,
911 { {
912 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative[i] ? 1 : 0 },
913 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect[i] ? 1 : 0 },
914 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
915 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
916 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
917 { OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS[i],
918 (is_relative_effective[i] && !base_address_invalid) ? 1 : 0 },
919 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
920 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
921 } });
922 }
923
924 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
925 // See comment in PIL file about indirect upper bits.
926 for (size_t i = AVM_MAX_OPERANDS; i < TOTAL_INDIRECT_BITS / 2; i++) {
927 bool is_relative = is_operand_relative(instruction.addressing_mode, i);
928 bool is_indirect = is_operand_indirect(instruction.addressing_mode, i);
929 trace.set(row,
930 { {
931 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
932 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
933 } });
934 }
935
936 // Inverse of following difference is required when base address is invalid.
937 FF base_address_tag_diff = base_address_invalid ? FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) -
938 FF(static_cast<uint8_t>(MemoryTag::U32))
939 : 0;
940
941 // Tag check after indirection.
942 bool some_final_check_failed = std::ranges::any_of(addr_event.resolution_info, [](const auto& info) {
943 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
944 });
945 FF batched_tags_diff = 0;
946 if (some_final_check_failed) {
947 FF power_of_2 = 1;
948 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
949 if (should_apply_indirection[i]) {
950 batched_tags_diff += power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
951 }
952 power_of_2 *= 8; // 2^3
953 }
954 }
955
956 // Collect addressing errors. See PIL file for reference.
957 bool addressing_failed =
958 std::ranges::any_of(addr_event.resolution_info, [](const auto& info) { return info.error.has_value(); });
959 FF addressing_error_collection =
960 addressing_failed
961 ? FF(
962 // Base address invalid.
963 (base_address_invalid ? 1 : 0) +
964 // Relative overflow.
965 std::accumulate(addr_event.resolution_info.begin(),
966 addr_event.resolution_info.end(),
967 static_cast<uint32_t>(0),
968 [](uint32_t acc, const auto& info) {
969 return acc +
970 (info.error.has_value() &&
971 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
972 ? 1
973 : 0);
974 }) +
975 // Some invalid address after indirection.
976 (some_final_check_failed ? 1 : 0))
977 : 0;
978
979 trace.set(
980 row,
981 { {
982 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
983 { C::execution_addressing_error_collection_inv, addressing_error_collection }, // Will be inverted in batch.
984 { C::execution_base_address_val, addr_event.base_address.as_ff() },
985 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
986 { C::execution_base_address_tag_diff_inv, base_address_tag_diff }, // Will be inverted in batch.
987 { C::execution_batched_tags_diff_inv, batched_tags_diff }, // Will be inverted in batch.
988 { C::execution_sel_some_final_check_failed, some_final_check_failed ? 1 : 0 },
989 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
990 { C::execution_num_relative_operands_inv, num_relative_operands }, // Will be inverted in batch later.
991 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
992 { C::execution_highest_address, AVM_HIGHEST_MEM_ADDRESS },
993 } });
994}
995
997{
998 trace.invert_columns({ {
999 // Registers.
1000 C::execution_batched_tags_diff_inv_reg,
1001 // Context.
1002 C::execution_is_parent_id_inv,
1003 C::execution_internal_call_return_id_inv,
1004 // Trees.
1005 C::execution_remaining_data_writes_inv,
1006 C::execution_remaining_note_hashes_inv,
1007 C::execution_remaining_nullifiers_inv,
1008 // L1ToL2MsgExists.
1009 C::execution_remaining_l2_to_l1_msgs_inv,
1010 // Discard.
1011 C::execution_dying_context_id_inv,
1012 C::execution_dying_context_diff_inv,
1013 // Addressing.
1014 C::execution_addressing_error_collection_inv,
1015 C::execution_batched_tags_diff_inv,
1016 C::execution_base_address_tag_diff_inv,
1017 C::execution_num_relative_operands_inv,
1018 } });
1019}
1020
1023 const MemoryValue& output,
1025 bool register_processing_failed,
1026 TraceContainer& trace,
1027 uint32_t row)
1028{
1029 BB_ASSERT_EQ(registers.size(), static_cast<size_t>(AVM_MAX_REGISTERS), "Registers size is out of range");
1030 // At this point we can assume instruction fetching succeeded, so this should never fail.
1031 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1032
1033 // Registers. We set all of them here, even the write ones. This is fine because
1034 // if an error occured before the register write group, simulation would pass the default
1035 // value-tag (0, 0). Furthermore, the permutation of the memory write would not be activated.
1036 size_t input_counter = 0;
1037 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1038 if (register_info.is_active(i)) {
1039 if (register_info.is_write(i)) {
1040 // If this is a write operation, we need to get the value from the output.
1041 registers[i] = output;
1042 } else {
1043 // If this is a read operation, we need to get the value from the input.
1044
1045 // Register specifications must be consistent with the number of inputs.
1046 BB_ASSERT(inputs.size() > input_counter, "Not enough inputs for register read");
1047
1048 registers[i] = inputs.at(input_counter);
1049 input_counter++;
1050 }
1051 }
1052 }
1053
1054 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1055 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1056 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1057 // This one is special because it sets the reads (but not the writes).
1058 // If we got here, sel_should_read_registers=1.
1059 if (register_info.is_active(i) && !register_info.is_write(i)) {
1060 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1061 }
1062 }
1063
1064 FF batched_tags_diff_reg = 0;
1065 if (register_processing_failed) {
1066 FF power_of_2 = 1;
1067 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1068 if (register_info.need_tag_check(i)) {
1069 batched_tags_diff_reg += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1070 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1071 }
1072 power_of_2 *= 8; // 2^3
1073 }
1074 }
1075
1076 trace.set(row,
1077 { {
1078 { C::execution_sel_should_read_registers, 1 },
1079 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_reg }, // Will be inverted in batch.
1080 { C::execution_sel_register_read_error, register_processing_failed ? 1 : 0 },
1081 } });
1082}
1083
1085{
1086 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1087 trace.set(C::execution_sel_should_write_registers, row, 1);
1088
1089 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1090 // This one is special because it sets the writes.
1091 // If we got here, sel_should_write_registers=1.
1092 if (register_info.is_active(i) && register_info.is_write(i)) {
1093 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1094 }
1095 }
1096}
1097
1099 MemoryValue output,
1100 TraceContainer& trace,
1101 uint32_t row)
1102{
1103 BB_ASSERT_EQ(envvar_enum.get_tag(), ValueTag::U8, "Envvar enum tag is not U8");
1104 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1105
1106 trace.set(row,
1107 { {
1108 { C::execution_sel_execute_get_env_var, 1 },
1109 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1110 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1111 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1112 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1113 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1114 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1115 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1116 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1117 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1118 { C::execution_value_from_pi,
1119 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1120 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1121 } });
1122}
1123
1126 // Execution specification (precomputed)
1128 // Bytecode retrieval
1129 .add<lookup_execution_bytecode_retrieval_result_settings, InteractionType::LookupGeneric>()
1130 // Instruction fetching
1132 .add<lookup_execution_instruction_fetching_body_settings, InteractionType::LookupGeneric>()
1133 // Addressing
1135 .add<lookup_addressing_relative_overflow_result_1_settings, InteractionType::LookupGeneric>(C::gt_sel)
1137 .add<lookup_addressing_relative_overflow_result_3_settings, InteractionType::LookupGeneric>(C::gt_sel)
1139 .add<lookup_addressing_relative_overflow_result_5_settings, InteractionType::LookupGeneric>(C::gt_sel)
1141 // Internal Call Stack
1142 .add<perm_internal_call_push_call_stack_settings, InteractionType::Permutation>()
1144 // Gas
1145 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByClk>()
1147 .add<lookup_gas_is_out_of_gas_da_settings, InteractionType::LookupGeneric>(C::gt_sel)
1149 // Gas - ToRadix BE
1150 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>(C::gt_sel)
1152 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>(C::gt_sel)
1153 // Dynamic Gas - SStore
1155 // Context Stack
1156 .add<perm_context_ctx_stack_call_settings, InteractionType::Permutation>()
1158 .add<lookup_context_ctx_stack_return_settings, InteractionType::LookupGeneric>()
1159 // External Call
1161 .add<lookup_external_call_is_da_gas_left_gt_allocated_settings, InteractionType::LookupGeneric>(C::gt_sel)
1162 // GetEnvVar opcode
1164 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByClk>()
1166 // Sload opcode (cannot be sequential as public data tree check trace is sorted in tracegen)
1167 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1168 // Sstore opcode
1170 // NoteHashExists
1171 .add<lookup_notehash_exists_note_hash_read_settings, InteractionType::LookupSequential>()
1173 // NullifierExists opcode
1174 .add<lookup_nullifier_exists_nullifier_exists_check_settings, InteractionType::LookupSequential>()
1175 // EmitNullifier
1177 // EmitNoteHash
1178 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1179 // L1ToL2MsgExists
1181 C::gt_sel)
1182 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1183 // SendL2ToL1Msg
1185 // Dispatching to other sub-traces
1186 .add<lookup_execution_dispatch_to_alu_settings, InteractionType::LookupGeneric>()
1188 .add<perm_execution_dispatch_to_cd_copy_settings, InteractionType::Permutation>()
1190 .add<lookup_execution_dispatch_to_cast_settings, InteractionType::LookupGeneric>()
1192 .add<perm_execution_dispatch_to_get_contract_instance_settings, InteractionType::Permutation>()
1194 .add<perm_execution_dispatch_to_poseidon2_perm_settings, InteractionType::Permutation>()
1196 .add<perm_execution_dispatch_to_keccakf1600_settings, InteractionType::Permutation>()
1198 .add<perm_execution_dispatch_to_to_radix_settings, InteractionType::Permutation>();
1199
1200} // namespace bb::avm2::tracegen
#define BB_ASSERT(expression,...)
Definition assert.hpp:70
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:83
#define BB_ASSERT_LTE(left, right,...)
Definition assert.hpp:158
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define NOTE_HASH_TREE_LEAF_COUNT
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define AVM_MAX_REGISTERS
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define AVM_HIGHEST_MEM_ADDRESS
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
static TaggedValue from_tag(ValueTag tag, FF value)
ValueTag get_tag() const
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_registers(ExecutionOpCode exec_opcode, const std::vector< MemoryValue > &inputs, const MemoryValue &output, std::span< MemoryValue > registers, bool register_processing_failed, TraceContainer &trace, uint32_t row)
void process_get_env_var_opcode(simulation::Operand envvar_enum, MemoryValue output, TraceContainer &trace, uint32_t row)
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
#define info(...)
Definition log.hpp:93
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
unordered_flat_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
AvmProvingInputs inputs
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
const std::unordered_map< ExecutionOpCode, SubtraceInfo > & get_subtrace_info_map()
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
lookup_settings< lookup_execution_check_written_storage_slot_settings_ > lookup_execution_check_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_result_2_settings_ > lookup_addressing_relative_overflow_result_2_settings
permutation_settings< perm_execution_dispatch_to_emit_unencrypted_log_settings_ > perm_execution_dispatch_to_emit_unencrypted_log_settings
lookup_settings< lookup_addressing_relative_overflow_result_4_settings_ > lookup_addressing_relative_overflow_result_4_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
lookup_settings< lookup_external_call_is_l2_gas_left_gt_allocated_settings_ > lookup_external_call_is_l2_gas_left_gt_allocated_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is relative.
lookup_settings< lookup_emit_nullifier_write_nullifier_settings_ > lookup_emit_nullifier_write_nullifier_settings
size_t get_p_limbs_per_radix_size(size_t radix)
Definition to_radix.cpp:54
lookup_settings< lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings_ > lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings
permutation_settings< perm_execution_dispatch_to_sha256_compression_settings_ > perm_execution_dispatch_to_sha256_compression_settings
lookup_settings< lookup_gas_is_out_of_gas_l2_settings_ > lookup_gas_is_out_of_gas_l2_settings
lookup_settings< lookup_execution_dispatch_to_set_settings_ > lookup_execution_dispatch_to_set_settings
lookup_settings< lookup_context_ctx_stack_rollback_settings_ > lookup_context_ctx_stack_rollback_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is indirect.
lookup_settings< lookup_execution_dispatch_to_bitwise_settings_ > lookup_execution_dispatch_to_bitwise_settings
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > & get_exec_instruction_spec()
lookup_settings< lookup_internal_call_unwind_call_stack_settings_ > lookup_internal_call_unwind_call_stack_settings
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_addressing_relative_overflow_result_0_settings_ > lookup_addressing_relative_overflow_result_0_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
permutation_settings< perm_execution_dispatch_to_ecc_add_settings_ > perm_execution_dispatch_to_ecc_add_settings
lookup_settings< lookup_addressing_relative_overflow_result_6_settings_ > lookup_addressing_relative_overflow_result_6_settings
lookup_settings< lookup_execution_instruction_fetching_result_settings_ > lookup_execution_instruction_fetching_result_settings
lookup_settings< lookup_notehash_exists_note_hash_leaf_index_in_range_settings_ > lookup_notehash_exists_note_hash_leaf_index_in_range_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
permutation_settings< perm_execution_dispatch_to_rd_copy_settings_ > perm_execution_dispatch_to_rd_copy_settings
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const