diff --git a/src/basic_field_ops.cairo b/src/basic_field_ops.cairo index 1742f8a..8ea50f9 100644 --- a/src/basic_field_ops.cairo +++ b/src/basic_field_ops.cairo @@ -2,6 +2,75 @@ from starkware.cairo.common.cairo_builtins import UInt384 from starkware.cairo.common.cairo_builtins import ModBuiltin from starkware.cairo.common.registers import get_fp_and_pc +const POW_2_32_252 = 0x100000000; +const POW_2_64_252 = 0x10000000000000000; + +// Compute u512 mod p, where u512 = high * 2^256 + low +// Each high/low limb is 32 bits big and passed in BE +func u512_mod_p{range_check96_ptr: felt*, add_mod_ptr: ModBuiltin*, mul_mod_ptr: ModBuiltin*}( + low: (v0: felt, v1: felt, v2: felt, v3: felt, v4: felt, v5: felt, v6: felt, v7: felt), + high: (v0: felt, v1: felt, v2: felt, v3: felt, v4: felt, v5: felt, v6: felt, v7: felt), + p: UInt384, +) -> (result: UInt384) { + let (_, pc) = get_fp_and_pc(); + + pc_labelx: + let add_offsets_ptr = pc + (add_offsets - pc_labelx); + let mul_offsets_ptr = pc + (mul_offsets - pc_labelx); + + // High limbs. + assert [range_check96_ptr] = high.v7 + high.v6 * POW_2_32_252 + high.v5 * POW_2_64_252; + assert [range_check96_ptr + 1] = high.v4 + high.v3 * POW_2_32_252 + high.v2 * POW_2_64_252; + assert [range_check96_ptr + 2] = high.v1 + high.v0 * POW_2_32_252; + assert [range_check96_ptr + 3] = 0; + + // Shift Limbs. + assert [range_check96_ptr + 4] = 0; + assert [range_check96_ptr + 5] = 0; + assert [range_check96_ptr + 6] = 0x10000000000000000; + assert [range_check96_ptr + 7] = 0; + + // Low limbs. + assert [range_check96_ptr + 8] = low.v7 + low.v6 * POW_2_32_252 + low.v5 * POW_2_64_252; + assert [range_check96_ptr + 9] = low.v4 + low.v3 * POW_2_32_252 + low.v2 * POW_2_64_252; + assert [range_check96_ptr + 10] = low.v1 + low.v0 * POW_2_32_252; + assert [range_check96_ptr + 11] = 0; + + assert add_mod_ptr[0] = ModBuiltin( + p=p, values_ptr=cast(range_check96_ptr, UInt384*), offsets_ptr=add_offsets_ptr, n=1 + ); + assert mul_mod_ptr[0] = ModBuiltin( + p=p, values_ptr=cast(range_check96_ptr, UInt384*), offsets_ptr=mul_offsets_ptr, n=1 + ); + %{ + from starkware.cairo.lang.builtins.modulo.mod_builtin_runner import ModBuiltinRunner + assert builtin_runners["add_mod_builtin"].instance_def.batch_size == 1 + assert builtin_runners["mul_mod_builtin"].instance_def.batch_size == 1 + + ModBuiltinRunner.fill_memory( + memory=memory, + add_mod=(ids.add_mod_ptr.address_, builtin_runners["add_mod_builtin"], 1), + mul_mod=(ids.mul_mod_ptr.address_, builtin_runners["mul_mod_builtin"], 1), + ) + %} + let range_check96_ptr = range_check96_ptr + 20; + let add_mod_ptr = add_mod_ptr + ModBuiltin.SIZE; + let mul_mod_ptr = mul_mod_ptr + ModBuiltin.SIZE; + return (result=[cast(range_check96_ptr - 4, UInt384*)]); + + mul_offsets: + // Compute High * Shift + dw 0; // [High] + dw 4; // [Shift] + dw 12; // [High * Shift] + + // Computes [Low + High * Shift] + add_offsets: + dw 8; // Low + dw 12; // [High * Shift] + dw 16; // [Low + High * Shift] +} + // Compute X + Y mod p. func add_mod_p{range_check96_ptr: felt*, add_mod_ptr: ModBuiltin*}( x: UInt384, y: UInt384, p: UInt384 diff --git a/src/ec_ops.cairo b/src/ec_ops.cairo index 2b1dd67..514ee87 100644 --- a/src/ec_ops.cairo +++ b/src/ec_ops.cairo @@ -12,7 +12,13 @@ from definitions import ( TRUE, FALSE, ) -from basic_field_ops import is_eq_mod_p, is_opposite_mod_p, is_zero_mod_p, assert_eq_mod_p +from basic_field_ops import ( + is_eq_mod_p, + is_opposite_mod_p, + is_zero_mod_p, + assert_eq_mod_p, + sub_mod_p, +) from precompiled_circuits.ec import ( get_IS_ON_CURVE_G1_G2_circuit, @@ -42,6 +48,12 @@ from utils import ( hash_efelt_transcript, ) +// Checks if a given point is on the G1 curve for the specified curve_id +// Parameters: +// curve_id: The ID of the elliptic curve +// point: The G1Point to check +// Returns: +// res: 1 if the point is on the curve, 0 otherwise func is_on_curve_g1{ range_check_ptr, range_check96_ptr: felt*, add_mod_ptr: ModBuiltin*, mul_mod_ptr: ModBuiltin* }(curve_id: felt, point: G1Point) -> (res: felt) { @@ -98,6 +110,27 @@ func is_on_curve_g1_g2{ return (res=1); } +// Subtract two EC points +// This function doesn't check if the inputs are on the curve or if they are the point at infinity +// Parameters: +// curve_id: The ID of the elliptic curve being used +// p: The first G1Point (minuend) +// q: The second G1Point (subtrahend) +// Returns: +// res: The result of p - q as a G1Point +func sub_ec_points{ + range_check_ptr, range_check96_ptr: felt*, add_mod_ptr: ModBuiltin*, mul_mod_ptr: ModBuiltin* +}(curve_id: felt, p: G1Point, q: G1Point) -> (res: G1Point) { + alloc_locals; + // Negate the second point + let (curve_p) = get_P(curve_id); + let (neg_Q_Y) = sub_mod_p(curve_p, q.y, curve_p); // -Q.y = P - Q.y + let neg_Q = G1Point(q.x, neg_Q_Y); + // Add the first point to the negated second point + let (result) = add_ec_points(curve_id, p, neg_Q); + return (res=result); +} + func all_g1_g2_pairs_are_on_curve{ range_check_ptr, range_check96_ptr: felt*, add_mod_ptr: ModBuiltin*, mul_mod_ptr: ModBuiltin* }(input: felt*, n: felt, curve_id: felt) -> (res: felt) { @@ -127,8 +160,10 @@ func add_ec_points{ let (opposite_y) = is_opposite_mod_p(P.y, Q.y, modulus); if (opposite_y != 0) { + // P + (-P) = O (point at infinity) return (res=G1Point(UInt384(0, 0, 0, 0), UInt384(0, 0, 0, 0))); } else { + // P = Q, so we need to double the point let (circuit) = get_DOUBLE_EC_POINT_circuit(curve_id); let (A) = get_a(curve_id); let (input: UInt384*) = alloc(); @@ -139,6 +174,7 @@ func add_ec_points{ return (res=[cast(res, G1Point*)]); } } else { + // P and Q have different x-coordinates, perform regular addition let (circuit) = get_ADD_EC_POINT_circuit(curve_id); let (input: UInt384*) = alloc(); assert input[0] = P.x; diff --git a/src/hash_to_field.cairo b/src/hash_to_field.cairo new file mode 100644 index 0000000..d96dba1 --- /dev/null +++ b/src/hash_to_field.cairo @@ -0,0 +1,327 @@ +from starkware.cairo.common.cairo_builtins import BitwiseBuiltin, UInt384, ModBuiltin +from starkware.cairo.common.alloc import alloc +from starkware.cairo.common.memcpy import memcpy +from starkware.cairo.common.uint256 import Uint256 +from starkware.cairo.common.bitwise import bitwise_xor + +from src.basic_field_ops import u512_mod_p +from src.definitions import get_P +from src.utils import felt_divmod +from src.sha import SHA256, HashUtils + +// HashToField functionality, using SHA256 and 32-byte messages +// DST is "BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_" +namespace HashToField32 { + const B_IN_BYTES = 32; // hash function output size + const B_IN_FELTS = 8; // 32 bytes, require 8 chunks + const Z_PAD_LEN = B_IN_FELTS * 2; + const BYTES_PER_CHUNK = 4; + const CURVE_M = 2; // extension degree of F + const CURVE_K = 128; // security level + const CURVE_L = 64; // ceil((CURVE.P.bitlength + CURVE_K) / 8) + const CURVE_L_IN_FELTS = 16; // 64 bits, require 16 chunks + + // Returns a zero-padded array of length CHUNKS_PER_BLOCK * 2 + func Z_PAD() -> felt* { + let (z_pad: felt*) = alloc(); + assert [z_pad] = 0; + assert [z_pad + 1] = 0; + assert [z_pad + 2] = 0; + assert [z_pad + 3] = 0; + assert [z_pad + 4] = 0; + assert [z_pad + 5] = 0; + assert [z_pad + 6] = 0; + assert [z_pad + 7] = 0; + assert [z_pad + 8] = 0; + assert [z_pad + 9] = 0; + assert [z_pad + 10] = 0; + assert [z_pad + 11] = 0; + assert [z_pad + 12] = 0; + assert [z_pad + 13] = 0; + assert [z_pad + 14] = 0; + assert [z_pad + 15] = 0; + + return z_pad; + } + + // Returns the concatenation of 0x01, DST, and DST length + func ONE_DST_PRIME() -> felt* { + let (one_dst: felt*) = alloc(); + assert [one_dst] = 0x01424C53; + assert [one_dst + 1] = 0x5F534947; + assert [one_dst + 2] = 0x5F424C53; + assert [one_dst + 3] = 0x31323338; + assert [one_dst + 4] = 0x3147325F; + assert [one_dst + 5] = 0x584D443A; + assert [one_dst + 6] = 0x5348412D; + assert [one_dst + 7] = 0x3235365F; + assert [one_dst + 8] = 0x53535755; + assert [one_dst + 9] = 0x5F524F5F; + assert [one_dst + 10] = 0x4E554C5F; + assert [one_dst + 11] = 0x2B; + + return one_dst; + } + + // Expands a message according to the algorithm specified in: + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-11#section-5.4.1 + // Current implementation only supports 32-byte messages. + // Parameters: + // - msg: the message to expand, in big-endian 4-byte chunks + // - msg_bytes_len: the length of the message in bytes + // - n_bytes: the number of bytes to output + // + // Returns: + // - result: the expanded message + func expand_msg_xmd{ + range_check_ptr, bitwise_ptr: BitwiseBuiltin*, sha256_ptr: felt*, pow2_array: felt* + }(msg: felt*, msg_bytes_len: felt, n_bytes: felt) -> (result: felt*) { + alloc_locals; + + // for now we only support 32 bytes messages. Some smaller changes are needed to support other msg lengths + assert msg_bytes_len = 32; + + let (q, r) = felt_divmod(n_bytes, 32); + local ell: felt; + if (r == 0) { + ell = q; + } else { + ell = q + 1; + } + + with_attr error_message("INVALID_XMD_LENGTH") { + assert [range_check_ptr] = 255 - ell; + tempvar range_check_ptr = range_check_ptr + 1; + } + + // Prepare the initial hash input: + // Z_pad || msg || N_BYTES || 0x0 || DST || len(DST) + let msg_hash_train = Z_PAD(); + memcpy(dst=msg_hash_train + Z_PAD_LEN, src=msg, len=8); + + // Append other required values (DST and lengths) + assert [msg_hash_train + 24] = 0x01000042; // to_bytes(n_bytes, 2) + 0x0 + DST (starts at 42) + assert [msg_hash_train + 25] = 0x4C535F53; + assert [msg_hash_train + 26] = 0x49475F42; + assert [msg_hash_train + 27] = 0x4C533132; + assert [msg_hash_train + 28] = 0x33383147; + assert [msg_hash_train + 29] = 0x325F584D; + assert [msg_hash_train + 30] = 0x443A5348; + assert [msg_hash_train + 31] = 0x412D3235; + assert [msg_hash_train + 32] = 0x365F5353; + assert [msg_hash_train + 33] = 0x57555F52; + assert [msg_hash_train + 34] = 0x4F5F4E55; + assert [msg_hash_train + 35] = 0x4C5F2B; // DST + DST.len + + // Compute the initial hash (b_0) + let (msg_hash) = SHA256.hash_bytes(msg_hash_train, 111 + msg_bytes_len); // 64b z_pad + msg_bytes_len + 2b block_size, 0x0 ++ 43b dst + 1b dst_len + + // Prepare input for the first block hash (b_1) + let (hash_args: felt*) = alloc(); + memcpy(dst=hash_args, src=msg_hash, len=B_IN_FELTS); + let one_dst_prime = ONE_DST_PRIME(); + memcpy(dst=hash_args + 8, src=one_dst_prime, len=12); + + // Compute the first block hash (b_1) + let (hash_1) = SHA256.hash_bytes(hash_args, 77); // 32b msg + 1b 0x1 + 43b dst + 1b dst_len + + // Create hash_train and copy first hash. The hash_train contains all + let (hash_train: felt*) = alloc(); + memcpy(dst=hash_train, src=hash_1, len=B_IN_FELTS); + + with one_dst_prime { + expand_msg_xmd_inner(msg_hash=msg_hash, hash_train=hash_train, ell=ell, index=0); + } + + // Copy the result. Potentially remove this + let (result: felt*) = alloc(); + memcpy(dst=result, src=hash_train, len=n_bytes / 4); + + return (result=result); + } + + // Inner recursive function for expand_msg_xmd + func expand_msg_xmd_inner{ + range_check_ptr, + bitwise_ptr: BitwiseBuiltin*, + sha256_ptr: felt*, + pow2_array: felt*, + one_dst_prime: felt*, + }(msg_hash: felt*, hash_train: felt*, ell: felt, index: felt) { + alloc_locals; + + if (index == ell) { + return (); + } + + // XOR the initial hash (b_0) with the previous block hash + let xored = _xor_hash_segments(msg_hash, hash_train + index * B_IN_FELTS); + + // Prepare the input for the next block hash: + // (b_0 XOR b_i) || i+1 || DST || len(DST) + assert [xored + 8] = [one_dst_prime] + 0x01000000 * (index + 1); + memcpy(dst=xored + 9, src=one_dst_prime + 1, len=11); + + // Compute the next block hash + let (hash) = SHA256.hash_bytes(xored, 77); + // Store the new block hash in the output array + memcpy(dst=hash_train + (index + 1) * B_IN_FELTS, src=hash, len=B_IN_FELTS); + + return expand_msg_xmd_inner( + msg_hash=msg_hash, hash_train=hash_train, ell=ell, index=index + 1 + ); + } + + // Hashes a message to a field element + // Implementation of algorithm from: + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-11#section-5.3 + // + // Parameters: + // - msg: a byte string containing the message to hash, chunked in big-endian 4-bytes + // - msg_bytes_len: length of the message in bytes + // - count: the number of field elements to output + // - ext_degree: the degree of the extension field + // + // Returns: + // - fields: an 2d array of field elements in the following format [[f_n; ext_degree]; count]] + func hash_to_field{ + range_check_ptr, + bitwise_ptr: BitwiseBuiltin*, + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, + sha256_ptr: felt*, + pow2_array: felt*, + }(msg: felt*, msg_bytes_len: felt, count: felt, ext_degree: felt) -> (fields: UInt384**) { + alloc_locals; + + let n_bytes = count * ext_degree * CURVE_L; + let (result) = expand_msg_xmd(msg=msg, msg_bytes_len=msg_bytes_len, n_bytes=n_bytes); + let (result_fields: UInt384**) = alloc(); + with result_fields, ext_degree { + hash_to_field_inner(expanded_msg=result, count=count, index=0); + } + + return (fields=result_fields); + } + + // Inner recursive function for hash_to_field + // Processes each count required in the hash_to_field function + func hash_to_field_inner{ + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, + pow2_array: felt*, + ext_degree: felt, + result_fields: UInt384**, + }(expanded_msg: felt*, count: felt, index: felt) { + alloc_locals; + + if (count == index) { + return (); + } + + let offset = index * CURVE_L_IN_FELTS * ext_degree; + let (fields: UInt384*) = alloc(); + with fields, ext_degree { + hash_to_field_inner_inner( + expanded_msg=expanded_msg, count_index=index, degree_index=0, offset=offset + ); + } + assert result_fields[index] = fields; + + return hash_to_field_inner(expanded_msg=expanded_msg, count=count, index=index + 1); + } + + // Innermost recursive function for hash_to_field + // Runs ext_degree times + func hash_to_field_inner_inner{ + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, + pow2_array: felt*, + ext_degree: felt, + fields: UInt384*, + }(expanded_msg: felt*, count_index: felt, degree_index: felt, offset: felt) { + if (degree_index == ext_degree) { + return (); + } + + let (result) = _u512_mod_p(expanded_msg + offset); + // %{ + // from garaga.hints.io import bigint_pack + // result = bigint_pack(ids.result, 4, 2**96) + // print(hex(result)) + // %} + assert fields[degree_index] = result; + + return hash_to_field_inner_inner( + expanded_msg=expanded_msg, + count_index=count_index, + degree_index=degree_index + 1, + offset=offset + CURVE_L_IN_FELTS, + ); + } + + // Converts a 512-bit byte array to UInt384 and calls u512_mod_p + // Inputs: + // value: felt* - an array of 32bit BE chunks. Must have a length of 16. + // Outputs: + // result: UInt384 - The result of (value mod p), where p is the BLS12-381 field modulus + func _u512_mod_p{ + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, + pow2_array: felt*, + }(value: felt*) -> (result: UInt384) { + let (p: UInt384) = get_P(1); + + let (result) = u512_mod_p( + low=( + v0=value[8], + v1=value[9], + v2=value[10], + v3=value[11], + v4=value[12], + v5=value[13], + v6=value[14], + v7=value[15], + ), + high=( + v0=value[0], + v1=value[1], + v2=value[2], + v3=value[3], + v4=value[4], + v5=value[5], + v6=value[6], + v7=value[7], + ), + p=p, + ); + + return (result=result); + } + + // XORs two 256-bit hashes + // Inputs: + // hash_a: felt* - an array of 32bit BE chunks. Must have a length of 8. + // hash_b: felt* - an array of 32bit BE chunks. Must have a length of 8. + // Outputs: + // result: felt* - XOR of the input, as 32bit BE chunks with length 8. + func _xor_hash_segments{range_check_ptr, bitwise_ptr: BitwiseBuiltin*, pow2_array: felt*}( + hash_a: felt*, hash_b: felt* + ) -> felt* { + alloc_locals; + + let hash_a_uint = HashUtils.chunks_to_uint256(hash_a); + let hash_b_uint = HashUtils.chunks_to_uint256(hash_b); + + let (result_low) = bitwise_xor(hash_a_uint.low, hash_b_uint.low); + let (result_high) = bitwise_xor(hash_a_uint.high, hash_b_uint.high); + + let (chunks) = HashUtils.chunk_uint256(Uint256(low=result_low, high=result_high)); + + return chunks; + } +} diff --git a/src/sha.cairo b/src/sha.cairo new file mode 100644 index 0000000..c8293de --- /dev/null +++ b/src/sha.cairo @@ -0,0 +1,255 @@ +from starkware.cairo.common.cairo_builtins import BitwiseBuiltin +from starkware.cairo.common.uint256 import Uint256 +from starkware.cairo.common.alloc import alloc +from starkware.cairo.common.memcpy import memcpy +from starkware.cairo.common.memset import memset +from starkware.cairo.common.cairo_sha256.sha256_utils import finalize_sha256 +from src.utils import felt_divmod, pow2alloc128 + +namespace SHA256 { + func init() -> (sha256_ptr: felt*, sha256_ptr_start: felt*) { + let (sha256_ptr: felt*) = alloc(); + let sha256_ptr_start = sha256_ptr; + + return (sha256_ptr=sha256_ptr, sha256_ptr_start=sha256_ptr_start); + } + + func finalize{range_check_ptr, bitwise_ptr: BitwiseBuiltin*}( + sha256_start_ptr: felt*, sha256_end_ptr: felt* + ) { + finalize_sha256(sha256_start_ptr, sha256_end_ptr); + return (); + } + + // Computes the SHA256 hash of a 64 byte input + // Inputs: + // input: felt* - the 64 byte input, chunked into 32bit BE chunks. Must have a length of 16. + // Outputs: + // output: felt* - an array of 32bit BE chunks. Must have a length of 8. + func hash_64{range_check_ptr, sha256_ptr: felt*, pow2_array: felt*}(input: felt*) -> ( + output: felt* + ) { + alloc_locals; + let (output) = sha256(data=input, n_bytes=64); + return (output=output); + } + + // Computes the SHA256 hash of an arbitrary length of bytes + // Inputs: + // input: felt* - the input bytes, chunked into 32bit BE chunks. + // n_bytes: felt - the number of bytes in the input + // Outputs: + // output: felt* - an array of 32bit BE chunks. Must have a length of 8. + func hash_bytes{range_check_ptr, sha256_ptr: felt*, pow2_array: felt*}( + input: felt*, n_bytes: felt + ) -> (output: felt*) { + alloc_locals; + let (output) = sha256(data=input, n_bytes=n_bytes); + return (output=output); + } +} + +namespace HashUtils { + func chunk_pair{range_check_ptr, pow2_array: felt*}(left: Uint256, right: Uint256) -> felt* { + let (leafs: Uint256*) = alloc(); + assert leafs[0] = left; + assert leafs[1] = right; + + let (output_ptr: felt*) = alloc(); + with output_ptr { + chunk_leafs(leafs=leafs, leafs_len=2, index=0); + } + return output_ptr; + } + + func chunk_leafs{range_check_ptr, pow2_array: felt*, output_ptr: felt*}( + leafs: Uint256*, leafs_len: felt, index: felt + ) { + if (index == leafs_len) { + return (); + } + + let leaf = [leafs]; + + // Process left-high + let (q0, r0) = felt_divmod(leaf.high, pow2_array[32]); + let (q1, r1) = felt_divmod(q0, pow2_array[32]); + let (q2, r2) = felt_divmod(q1, pow2_array[32]); + let (q3, r3) = felt_divmod(q2, pow2_array[32]); + assert [output_ptr] = r3; + assert [output_ptr + 1] = r2; + assert [output_ptr + 2] = r1; + assert [output_ptr + 3] = r0; + + // Proccess left-low + let (q4, r4) = felt_divmod(leaf.low, pow2_array[32]); + let (q5, r5) = felt_divmod(q4, pow2_array[32]); + let (q6, r6) = felt_divmod(q5, pow2_array[32]); + let (q7, r7) = felt_divmod(q6, pow2_array[32]); + assert [output_ptr + 4] = r7; + assert [output_ptr + 5] = r6; + assert [output_ptr + 6] = r5; + assert [output_ptr + 7] = r4; + + tempvar output_ptr = output_ptr + 8; + return chunk_leafs(leafs=leafs + Uint256.SIZE, leafs_len=leafs_len, index=index + 1); + } + + func chunk_uint256{range_check_ptr, pow2_array: felt*}(leaf: Uint256) -> (output: felt*) { + let (output: felt*) = alloc(); + + // Process left-high + let (q0, r0) = felt_divmod(leaf.high, pow2_array[32]); + let (q1, r1) = felt_divmod(q0, pow2_array[32]); + let (q2, r2) = felt_divmod(q1, pow2_array[32]); + let (q3, r3) = felt_divmod(q2, pow2_array[32]); + assert [output] = r3; + assert [output + 1] = r2; + assert [output + 2] = r1; + assert [output + 3] = r0; + + // Proccess left-low + let (q4, r4) = felt_divmod(leaf.low, pow2_array[32]); + let (q5, r5) = felt_divmod(q4, pow2_array[32]); + let (q6, r6) = felt_divmod(q5, pow2_array[32]); + let (q7, r7) = felt_divmod(q6, pow2_array[32]); + assert [output + 4] = r7; + assert [output + 5] = r6; + assert [output + 6] = r5; + assert [output + 7] = r4; + + return (output=output); + } + + func chunks_to_uint256{pow2_array: felt*}(output: felt*) -> Uint256 { + let low = [output + 4] * pow2_array[96] + [output + 5] * pow2_array[64] + [output + 6] * + pow2_array[32] + [output + 7]; + let high = [output] * pow2_array[96] + [output + 1] * pow2_array[64] + [output + 2] * + pow2_array[32] + [output + 3]; + return (Uint256(low=low, high=high)); + } +} + +const SHA256_INPUT_CHUNK_SIZE_FELTS = 16; +const SHA256_STATE_SIZE_FELTS = 8; + +// Hash an arbitrary length of bytes. Input must be BE 32bit chunks +func sha256{range_check_ptr, pow2_array: felt*, sha256_ptr: felt*}(data: felt*, n_bytes: felt) -> ( + output: felt* +) { + alloc_locals; + + // Maximum bytes_len is 2^32 - 1. This simplifies the padding calculation. + assert [range_check_ptr] = pow2_array[32] - n_bytes; + let range_check_ptr = range_check_ptr + 1; + + // Set the initial input state to IV. + assert sha256_ptr[16] = 0x6A09E667; + assert sha256_ptr[17] = 0xBB67AE85; + assert sha256_ptr[18] = 0x3C6EF372; + assert sha256_ptr[19] = 0xA54FF53A; + assert sha256_ptr[20] = 0x510E527F; + assert sha256_ptr[21] = 0x9B05688C; + assert sha256_ptr[22] = 0x1F83D9AB; + assert sha256_ptr[23] = 0x5BE0CD19; + + sha256_inner(data=data, n_bytes=n_bytes, remaining_bytes=n_bytes); + + let output = sha256_ptr; + let sha256_ptr = sha256_ptr + SHA256_STATE_SIZE_FELTS; + + return (output=output); +} + +func sha256_inner{range_check_ptr, pow2_array: felt*, sha256_ptr: felt*}( + data: felt*, n_bytes: felt, remaining_bytes: felt +) { + alloc_locals; + + // If we have > 64 bytes input, we need at least two blocks for the message alone (without padding) + let (additional_message_blocks, _) = felt_divmod(remaining_bytes, 64); + if (additional_message_blocks == 0) { + let (n_full_words, local len_last_word) = felt_divmod(remaining_bytes, 4); + + // write the full input words to the sha256_ptr + memcpy(dst=sha256_ptr, src=data, len=n_full_words); + // compute the last word and write it to the sha256_ptr + if (len_last_word != 0) { + // if the last word is not a full word, we need to left-shift it + let left_shift = pow2_array[(4 - len_last_word) * 8]; + assert sha256_ptr[n_full_words] = data[n_full_words] * left_shift + left_shift / 2; + } else { + // if the last word is a full word, we just append binary 1 + assert sha256_ptr[n_full_words] = 0x80000000; + } + + // If the msg >= 56 bytes, we need two blocks + let (required_two_blocks, _) = felt_divmod(remaining_bytes, 56); + if (required_two_blocks == 0) { + // msg.len <= 55 - Finalize hashing + memset(dst=sha256_ptr + n_full_words + 1, value=0, n=14 - n_full_words); + // append binary length + assert sha256_ptr[15] = n_bytes * 8; + _sha256_chunk(); // fill outputs + + tempvar sha256_ptr = sha256_ptr + SHA256_INPUT_CHUNK_SIZE_FELTS + + SHA256_STATE_SIZE_FELTS; + return (); + } else { + // 55 < msg.len < 64 -> We need two more blocks + + // Fill current block with required padding + memset(dst=sha256_ptr + n_full_words + 1, value=0, n=15 - n_full_words); + _sha256_chunk(); // fill outputs + tempvar sha256_ptr = sha256_ptr + SHA256_INPUT_CHUNK_SIZE_FELTS + + SHA256_STATE_SIZE_FELTS; + + // write the output to the state of the next block + memcpy(dst=sha256_ptr + 24, src=sha256_ptr, len=8); + tempvar sha256_ptr = sha256_ptr + SHA256_STATE_SIZE_FELTS; + + // Fill last block with padding and binary length + memset(dst=sha256_ptr, value=0, n=15); + assert sha256_ptr[15] = n_bytes * 8; + + _sha256_chunk(); // fill outputs + tempvar sha256_ptr = sha256_ptr + SHA256_INPUT_CHUNK_SIZE_FELTS + + SHA256_STATE_SIZE_FELTS; + + return (); + } + } else { + // otherwise we fill the entire block with our input + memcpy(dst=sha256_ptr, src=data, len=16); + _sha256_chunk(); // fill outputs + tempvar sha256_ptr = sha256_ptr + SHA256_INPUT_CHUNK_SIZE_FELTS + SHA256_STATE_SIZE_FELTS; + + // copy output to the state of the next block + memcpy(dst=sha256_ptr + 24, src=sha256_ptr, len=8); + tempvar sha256_ptr = sha256_ptr + SHA256_STATE_SIZE_FELTS; + + return sha256_inner(data=data + 16, n_bytes=n_bytes, remaining_bytes=remaining_bytes - 64); + } +} + +// Computes the sha256 hash of the input chunk from `message` to `message + SHA256_INPUT_CHUNK_SIZE_FELTS` +func _sha256_chunk{range_check_ptr, sha256_ptr: felt*}() { + let message = sha256_ptr; + let state = sha256_ptr + SHA256_INPUT_CHUNK_SIZE_FELTS; + let output = state + SHA256_STATE_SIZE_FELTS; + + %{ + from starkware.cairo.common.cairo_sha256.sha256_utils import ( + compute_message_schedule, sha2_compress_function) + + _sha256_input_chunk_size_felts = int(ids.SHA256_INPUT_CHUNK_SIZE_FELTS) + assert 0 <= _sha256_input_chunk_size_felts < 100 + _sha256_state_size_felts = int(ids.SHA256_STATE_SIZE_FELTS) + assert 0 <= _sha256_state_size_felts < 100 + w = compute_message_schedule(memory.get_range( + ids.message, _sha256_input_chunk_size_felts)) + new_state = sha2_compress_function(memory.get_range(ids.state, _sha256_state_size_felts), w) + segments.write_arg(ids.output, new_state) + %} + return (); +} diff --git a/src/utils.cairo b/src/utils.cairo index daca600..31064fe 100644 --- a/src/utils.cairo +++ b/src/utils.cairo @@ -552,3 +552,163 @@ func neg_3_pow_alloc_80() -> (array: felt*) { dw (-3) ** 80; dw (-3) ** 81; } + +// Returns q and r such that: +// 0 <= q < rc_bound, 0 <= r < div and value = q * div + r. +// +// Assumption: 0 < div <= PRIME / rc_bound. +// Prover assumption: value / div < rc_bound. +// Modified version of unsigned_div_rem with inlined range checks. +func felt_divmod{range_check_ptr}(value, div) -> (q: felt, r: felt) { + let r = [range_check_ptr]; + let q = [range_check_ptr + 1]; + %{ + from starkware.cairo.common.math_utils import assert_integer + assert_integer(ids.div) + assert 0 < ids.div <= PRIME // range_check_builtin.bound, \ + f'div={hex(ids.div)} is out of the valid range.' + ids.q, ids.r = divmod(ids.value, ids.div) + %} + assert [range_check_ptr + 2] = div - 1 - r; + let range_check_ptr = range_check_ptr + 3; + + assert value = q * div + r; + return (q, r); +} + +// Utility to get a pointer on an array of 2^i from i = 0 to 128. +func pow2alloc128() -> (array: felt*) { + let (data_address) = get_label_location(data); + return (data_address,); + + data: + dw 0x1; + dw 0x2; + dw 0x4; + dw 0x8; + dw 0x10; + dw 0x20; + dw 0x40; + dw 0x80; + dw 0x100; + dw 0x200; + dw 0x400; + dw 0x800; + dw 0x1000; + dw 0x2000; + dw 0x4000; + dw 0x8000; + dw 0x10000; + dw 0x20000; + dw 0x40000; + dw 0x80000; + dw 0x100000; + dw 0x200000; + dw 0x400000; + dw 0x800000; + dw 0x1000000; + dw 0x2000000; + dw 0x4000000; + dw 0x8000000; + dw 0x10000000; + dw 0x20000000; + dw 0x40000000; + dw 0x80000000; + dw 0x100000000; + dw 0x200000000; + dw 0x400000000; + dw 0x800000000; + dw 0x1000000000; + dw 0x2000000000; + dw 0x4000000000; + dw 0x8000000000; + dw 0x10000000000; + dw 0x20000000000; + dw 0x40000000000; + dw 0x80000000000; + dw 0x100000000000; + dw 0x200000000000; + dw 0x400000000000; + dw 0x800000000000; + dw 0x1000000000000; + dw 0x2000000000000; + dw 0x4000000000000; + dw 0x8000000000000; + dw 0x10000000000000; + dw 0x20000000000000; + dw 0x40000000000000; + dw 0x80000000000000; + dw 0x100000000000000; + dw 0x200000000000000; + dw 0x400000000000000; + dw 0x800000000000000; + dw 0x1000000000000000; + dw 0x2000000000000000; + dw 0x4000000000000000; + dw 0x8000000000000000; + dw 0x10000000000000000; + dw 0x20000000000000000; + dw 0x40000000000000000; + dw 0x80000000000000000; + dw 0x100000000000000000; + dw 0x200000000000000000; + dw 0x400000000000000000; + dw 0x800000000000000000; + dw 0x1000000000000000000; + dw 0x2000000000000000000; + dw 0x4000000000000000000; + dw 0x8000000000000000000; + dw 0x10000000000000000000; + dw 0x20000000000000000000; + dw 0x40000000000000000000; + dw 0x80000000000000000000; + dw 0x100000000000000000000; + dw 0x200000000000000000000; + dw 0x400000000000000000000; + dw 0x800000000000000000000; + dw 0x1000000000000000000000; + dw 0x2000000000000000000000; + dw 0x4000000000000000000000; + dw 0x8000000000000000000000; + dw 0x10000000000000000000000; + dw 0x20000000000000000000000; + dw 0x40000000000000000000000; + dw 0x80000000000000000000000; + dw 0x100000000000000000000000; + dw 0x200000000000000000000000; + dw 0x400000000000000000000000; + dw 0x800000000000000000000000; + dw 0x1000000000000000000000000; + dw 0x2000000000000000000000000; + dw 0x4000000000000000000000000; + dw 0x8000000000000000000000000; + dw 0x10000000000000000000000000; + dw 0x20000000000000000000000000; + dw 0x40000000000000000000000000; + dw 0x80000000000000000000000000; + dw 0x100000000000000000000000000; + dw 0x200000000000000000000000000; + dw 0x400000000000000000000000000; + dw 0x800000000000000000000000000; + dw 0x1000000000000000000000000000; + dw 0x2000000000000000000000000000; + dw 0x4000000000000000000000000000; + dw 0x8000000000000000000000000000; + dw 0x10000000000000000000000000000; + dw 0x20000000000000000000000000000; + dw 0x40000000000000000000000000000; + dw 0x80000000000000000000000000000; + dw 0x100000000000000000000000000000; + dw 0x200000000000000000000000000000; + dw 0x400000000000000000000000000000; + dw 0x800000000000000000000000000000; + dw 0x1000000000000000000000000000000; + dw 0x2000000000000000000000000000000; + dw 0x4000000000000000000000000000000; + dw 0x8000000000000000000000000000000; + dw 0x10000000000000000000000000000000; + dw 0x20000000000000000000000000000000; + dw 0x40000000000000000000000000000000; + dw 0x80000000000000000000000000000000; + dw 0x100000000000000000000000000000000; +} diff --git a/tests/fustat_programs/test_hash_to_field.cairo b/tests/fustat_programs/test_hash_to_field.cairo new file mode 100644 index 0000000..6ccf984 --- /dev/null +++ b/tests/fustat_programs/test_hash_to_field.cairo @@ -0,0 +1,62 @@ +%builtins range_check bitwise range_check96 add_mod mul_mod + +from starkware.cairo.common.cairo_builtins import BitwiseBuiltin, ModBuiltin +from starkware.cairo.common.alloc import alloc +from starkware.cairo.common.uint256 import Uint256 + +from src.hash_to_field import HashToField32 +from src.sha import SHA256, HashUtils +from src.utils import pow2alloc128 + +func main{ + range_check_ptr, + bitwise_ptr: BitwiseBuiltin*, + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, +}() { + alloc_locals; + + let (sha256_ptr, sha256_ptr_start) = SHA256.init(); + let (pow2_array) = pow2alloc128(); + + with sha256_ptr, pow2_array { + test_32_bytes_msg(); + } + + SHA256.finalize(sha256_ptr_start, sha256_ptr); + return (); +} + +func test_32_bytes_msg{ + range_check_ptr, + bitwise_ptr: BitwiseBuiltin*, + range_check96_ptr: felt*, + add_mod_ptr: ModBuiltin*, + mul_mod_ptr: ModBuiltin*, + sha256_ptr: felt*, + pow2_array: felt*, +}() { + alloc_locals; + + // This is the signing root of sepl block 5800064 + let value = Uint256( + low=0x135aa063454c6023e1fbafd896f89df9, high=0x18b90e7987b9393d878786da78fa13fd + ); + let (chunks) = HashUtils.chunk_uint256(value); + let (res) = HashToField32.hash_to_field(chunks, 32, 2, 2); + let f0 = res[0][0]; + let f1 = res[0][1]; + let f2 = res[1][0]; + let f3 = res[1][1]; + + %{ + from garaga.hints.io import bigint_pack + + assert bigint_pack(ids.f0, 4, 2**96) == 0xa1d5f7f60126ba84c5a337f4e9ae2d02ef018a7fd34c6eff78b3bcc327d8ca8f28ed9a9344b3cef5b6946d0078d34f3 + assert bigint_pack(ids.f1, 4, 2**96) == 0xe7f6f005fa2404be24f13fc6bf9facabc9f7b01d4a0fa1df8695251163c27437afccf0d3eb4e611fe8dd14bcd1bb881 + assert bigint_pack(ids.f2, 4, 2**96) == 0x184fd90d987275be14e967e4a4ee0e963544e6694c88d36358a05f0fb45ae538a0e3b687126e0962a40a3cdfa899d0aa + assert bigint_pack(ids.f3, 4, 2**96) == 0xd7d91409c8b0ed8914cc0c877eb255e05135240e28835ade65557b69b6b34b18fe99801702311eb0e1328e4de7ea38b + %} + return (); +} diff --git a/tests/fustat_programs/test_sha256.cairo b/tests/fustat_programs/test_sha256.cairo new file mode 100644 index 0000000..d9ba3c2 --- /dev/null +++ b/tests/fustat_programs/test_sha256.cairo @@ -0,0 +1,85 @@ +%builtins range_check bitwise +from starkware.cairo.common.cairo_builtins import BitwiseBuiltin +from starkware.cairo.common.uint256 import Uint256 +from starkware.cairo.common.alloc import alloc +from src.sha import SHA256, sha256, HashUtils +from src.utils import pow2alloc128 + +func main{range_check_ptr, bitwise_ptr: BitwiseBuiltin*}() { + alloc_locals; + + let (sha256_ptr, sha256_ptr_start) = SHA256.init(); + let (pow2_array) = pow2alloc128(); + local length: felt; + + %{ + import random + import os + + def generate_hex_array(N): + hex_array = [os.urandom(i).hex() for i in range(1, N + 1)] + return hex_array + + preimages = generate_hex_array(150) + print(preimages) + + ids.length = len(preimages) + %} + + with sha256_ptr, pow2_array { + run_test(index=length - 1); + } + + SHA256.finalize(sha256_start_ptr=sha256_ptr_start, sha256_end_ptr=sha256_ptr); + + return (); +} + +func run_test{range_check_ptr, bitwise_ptr: BitwiseBuiltin*, sha256_ptr: felt*, pow2_array: felt*}( + index: felt +) { + alloc_locals; + + if (index == 0) { + return (); + } + + let (input: felt*) = alloc(); + local n_bytes: felt; + local expected: Uint256; + %{ + import hashlib + # This function chunks from MSB to LSB + def hex_to_chunks_32(hex_string: str): + # Remove '0x' prefix if present + if hex_string.startswith(('0x', '0X')): + hex_string = hex_string[2:] + + # if we have an odd number of characters, prepend a 0 + if len(hex_string) % 2 == 1: + hex_string = '0' + hex_string + + # Now split into 8-character (32-bit) chunks + chunks = [int(hex_string[i:i+8], 16) for i in range(0, len(hex_string), 8)] + return chunks + + + preimage = int(preimages[ids.index], 16) + ids.n_bytes = (preimage.bit_length() + 7) // 8 + + expected = hashlib.sha256(preimage.to_bytes(length=ids.n_bytes, byteorder='big')).hexdigest() + ids.expected.high, ids.expected.low = divmod(int(expected, 16), 2**128) + + chunks = hex_to_chunks_32(hex(preimage)) + segments.write_arg(ids.input, chunks) + %} + + with sha256_ptr, pow2_array { + let (output) = sha256(data=input, n_bytes=n_bytes); + let hash = HashUtils.chunks_to_uint256(output=output); + assert hash.high = expected.high; + assert hash.low = expected.low; + } + + return run_test(index=index - 1); +}