Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
cycle_scalar.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: Complete, auditors: [Luke], commit: a48c205d6dcd4338f5b83b4fda18bff6015be07b}
3// external_1: { status: not started, auditors: [], commit: }
4// external_2: { status: not started, auditors: [], commit: }
5// =====================
6
7#include "./cycle_scalar.hpp"
8#include "./cycle_group.hpp"
12
13#include <tuple>
14
15namespace bb::stdlib {
16
27template <typename Builder>
28cycle_scalar<Builder>::cycle_scalar(const field_t& lo, const field_t& hi, [[maybe_unused]] SkipValidation flag)
29 : _lo(lo)
30 , _hi(hi)
31{}
32
47template <typename Builder>
49 : _lo(lo)
50 , _hi(hi)
51{
53}
54
61template <typename Builder> cycle_scalar<Builder>::cycle_scalar(const ScalarField& in)
62{
63 const uint256_t value(in);
64 const auto [lo_v, hi_v] = decompose_into_lo_hi_u256(value);
65 _lo = lo_v;
66 _hi = hi_v;
67}
68
79template <typename Builder>
81{
82 const uint256_t value_u256(value);
83 const auto [lo_v, hi_v] = decompose_into_lo_hi_u256(value_u256);
88
89 cycle_scalar result{ lo, hi };
90
91 return result;
92}
93
128template <typename Builder> cycle_scalar<Builder>::cycle_scalar(BigScalarField& scalar)
129{
130 constexpr uint64_t NUM_LIMB_BITS = BigScalarField::NUM_LIMB_BITS;
131
132 if (scalar.is_constant()) {
133 const uint256_t value((scalar.get_value() % uint512_t(ScalarField::modulus)).lo);
134 const auto [value_lo, value_hi] = decompose_into_lo_hi_u256(value);
135
136 _lo = value_lo;
137 _hi = value_hi;
138 _lo.set_origin_tag(scalar.get_origin_tag());
139 _hi.set_origin_tag(scalar.get_origin_tag());
140 return;
141 }
142
143 // Step 1: Ensure the bigfield scalar fits into LO_BITS + HI_BITS by reducing if necessary. Note: we can tolerate
144 // the scalar being > ScalarField::modulus, because performing a scalar mul implicitly performs a modular reduction.
145 if (scalar.get_maximum_value() >= (uint512_t(1) << (LO_BITS + HI_BITS))) {
146 scalar.self_reduce();
147 }
148
149 field_t limb0 = scalar.binary_basis_limbs[0].element;
150 field_t limb1 = scalar.binary_basis_limbs[1].element;
151 field_t limb2 = scalar.binary_basis_limbs[2].element;
152 field_t limb3 = scalar.binary_basis_limbs[3].element;
153
154 uint256_t limb1_max = scalar.binary_basis_limbs[1].maximum_value;
155
156 // Step 2: Ensure that limb0 only contains at most NUM_LIMB_BITS. If not, slice off the excess and add it into limb1
157 uint256_t limb0_max = scalar.binary_basis_limbs[0].maximum_value;
158 if (limb0_max > BigScalarField::DEFAULT_MAXIMUM_LIMB) {
159
160 // Split limb0 into lo (NUM_LIMB_BITS) and hi (remaining bits) slices. Note that no_wrap_split_at enforces range
161 // constraints of NUM_LIMB_BITS and (limb0_max_bits - NUM_LIMB_BITS) respectively on the slices.
162 const auto limb0_max_bits = static_cast<size_t>(limb0_max.get_msb() + 1);
163 auto [limb0_lo, limb0_hi] = limb0.no_wrap_split_at(NUM_LIMB_BITS, limb0_max_bits);
164
165 // Move the high bits from limb0 into limb1
166 limb0 = limb0_lo;
167 limb1 += limb0_hi;
168 uint256_t limb0_hi_max = limb0_max >> NUM_LIMB_BITS;
169 limb1_max += limb0_hi_max;
170 }
171
172 // Sanity check that limb1 is the limb that contributes both to *this.lo and *this.hi
173 BB_ASSERT_GT(NUM_LIMB_BITS * 2, LO_BITS);
174 BB_ASSERT_LT(NUM_LIMB_BITS, LO_BITS);
175
176 // Step 3: limb1 may contribute to both *this._lo and *this._hi. Compute the values of the two limb1 slices
177 const size_t lo_bits_in_limb_1 = LO_BITS - NUM_LIMB_BITS;
178
179 field_t limb1_lo;
180 field_t limb1_hi;
181
182 // Edge case: If limb1's maximum value fits entirely in the low bits, no split is needed.
183 if (limb1_max < (uint256_t(1) << lo_bits_in_limb_1)) {
184 limb1_lo = limb1;
185 limb1_hi = field_t(0);
186 } else { // Otherwise, split limb1 into lo and hi parts
187 const auto limb1_max_bits = static_cast<size_t>(limb1_max.get_msb() + 1);
188 std::tie(limb1_lo, limb1_hi) = limb1.no_wrap_split_at(lo_bits_in_limb_1, limb1_max_bits);
189 }
190
191 // Propagate the origin tag to the chunks of limb1
192 limb1_lo.set_origin_tag(limb1.get_origin_tag());
193 limb1_hi.set_origin_tag(limb1.get_origin_tag());
194
195 // Step 4: Construct *this._lo out of limb0 and limb1_lo
196 _lo = limb0 + (limb1_lo * BigScalarField::shift_1);
197
198 // Step 5: Construct *this._hi out of limb1_hi, limb2 and limb3
199 const uint256_t limb_2_shift = uint256_t(1) << ((2 * NUM_LIMB_BITS) - LO_BITS);
200 const uint256_t limb_3_shift = uint256_t(1) << ((3 * NUM_LIMB_BITS) - LO_BITS);
201 _hi = limb1_hi.add_two(limb2 * limb_2_shift, limb3 * limb_3_shift);
202
203 // Manually propagate the origin tag of the scalar to the lo/hi limbs
204 _lo.set_origin_tag(scalar.get_origin_tag());
205 _hi.set_origin_tag(scalar.get_origin_tag());
206
207 validate_scalar_is_in_field();
208};
209
210template <typename Builder> bool cycle_scalar<Builder>::is_constant() const
211{
212 return (_lo.is_constant() && _hi.is_constant());
213}
214
229template <typename Builder> void cycle_scalar<Builder>::validate_scalar_is_in_field() const
230{
231 // Using _unsafe variant: range constraints are deferred to batch_mul's decompose_into_default_range
232 validate_split_in_field_unsafe(_lo, _hi, LO_BITS, ScalarField::modulus);
233}
234
236{
237 uint256_t lo_v(_lo.get_value());
238 uint256_t hi_v(_hi.get_value());
239 return ScalarField(lo_v + (hi_v << LO_BITS));
240}
241
244
245} // namespace bb::stdlib
#define BB_ASSERT_GT(left, right,...)
Definition assert.hpp:113
#define BB_ASSERT_LT(left, right,...)
Definition assert.hpp:143
constexpr uint64_t get_msb() const
uint512_t get_value() const
uint512_t get_maximum_value() const
bb::OriginTag get_origin_tag() const
Definition bigfield.hpp:702
bool is_constant() const
Check if the bigfield is constant, i.e. its prime limb is constant.
Definition bigfield.hpp:599
std::array< Limb, NUM_LIMBS > binary_basis_limbs
Represents a bigfield element in the binary basis. A bigfield element is represented as a combination...
Definition bigfield.hpp:80
void self_reduce() const
Reduce the bigfield element modulo the target modulus.
Represents a member of the Grumpkin curve scalar field (i.e. BN254 base field).
typename Curve::ScalarField ScalarField
ScalarField get_value() const
cycle_scalar(const field_t &lo, const field_t &hi, SkipValidation flag)
Private constructor that skips field validation (for internal use only)
static cycle_scalar from_witness(Builder *context, const ScalarField &value)
Construct a cycle scalar from a witness value in the Grumpkin scalar field.
void validate_scalar_is_in_field() const
Validates that the scalar (lo + hi * 2^LO_BITS) is less than the Grumpkin scalar field modulus.
OriginTag get_origin_tag() const
Definition field.hpp:347
void set_free_witness_tag()
Set the free witness flag for the field element's tag.
Definition field.hpp:352
void set_origin_tag(const OriginTag &new_tag) const
Definition field.hpp:346
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
Definition field.cpp:576
std::pair< field_t< Builder >, field_t< Builder > > no_wrap_split_at(const size_t lsb_index, const size_t num_bits=grumpkin::MAX_NO_WRAP_INTEGER_BIT_LENGTH) const
Splits the field element into (lo, hi), where:
Definition field.cpp:1292
StrictMock< MockContext > context
void validate_split_in_field_unsafe(const field_t< Builder > &lo, const field_t< Builder > &hi, const size_t lo_bits, const uint256_t &field_modulus)
Validates that lo + hi * 2^lo_bits < field_modulus (assuming range constraints on lo and hi)