Skip to content

Commit

Permalink
Merge with feat continuations
Browse files Browse the repository at this point in the history
  • Loading branch information
4l0n50 committed Jun 13, 2024
1 parent dbc5a6a commit cd3e17c
Show file tree
Hide file tree
Showing 115 changed files with 4,993 additions and 1,230 deletions.
4 changes: 2 additions & 2 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
* @muursh @Nashtare
/evm_arithmetization/ @wborgeaud @muursh @Nashtare
* @muursh @Nashtare @cpubot
/evm_arithmetization/ @wborgeaud @muursh @Nashtare @cpubot
13 changes: 13 additions & 0 deletions .github/workflows/audit.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: Security audit
on:
push:
paths:
- '**/Cargo.toml'
jobs:
security_audit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: rustsec/[email protected]
with:
token: ${{ secrets.GITHUB_TOKEN }}
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]

### Changed
- Add a few QoL useability functions to the interface ([#169](https://github.com/0xPolygonZero/zk_evm/pull/169))

## [0.3.1] - 2024-04-22

Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ rlp = "0.5.2"
rlp-derive = "0.1.0"
serde = "1.0.166"
serde_json = "1.0.96"
serde-big-array = "0.5.1"
thiserror = "1.0.49"

# plonky2-related dependencies
Expand Down
1 change: 1 addition & 0 deletions evm_arithmetization/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ static_assertions = "1.1.0"
hashbrown = { version = "0.14.0" }
tiny-keccak = "2.0.2"
serde_json = { workspace = true }
serde-big-array = { workspace = true }

# Local dependencies
mpt_trie = { version = "0.2.1", path = "../mpt_trie" }
Expand Down
71 changes: 67 additions & 4 deletions evm_arithmetization/src/all_stark.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use core::ops::Deref;
use std::iter;

use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
Expand All @@ -11,8 +12,8 @@ use starky::stark::Stark;
use crate::arithmetic::arithmetic_stark;
use crate::arithmetic::arithmetic_stark::ArithmeticStark;
use crate::byte_packing::byte_packing_stark::{self, BytePackingStark};
use crate::cpu::cpu_stark;
use crate::cpu::cpu_stark::CpuStark;
use crate::cpu::cpu_stark::{self, ctl_context_pruning_looked};
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::keccak::keccak_stark;
use crate::keccak::keccak_stark::KeccakStark;
Expand All @@ -21,8 +22,9 @@ use crate::keccak_sponge::keccak_sponge_stark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic;
use crate::logic::LogicStark;
use crate::memory::memory_stark;
use crate::memory::memory_stark::MemoryStark;
use crate::memory::memory_stark::{self, ctl_context_pruning_looking};
use crate::memory_continuation::memory_continuation_stark::{self, MemoryContinuationStark};

/// Structure containing all STARKs and the cross-table lookups.
#[derive(Clone)]
Expand All @@ -34,6 +36,8 @@ pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub(crate) keccak_sponge_stark: KeccakSpongeStark<F, D>,
pub(crate) logic_stark: LogicStark<F, D>,
pub(crate) memory_stark: MemoryStark<F, D>,
pub(crate) mem_before_stark: MemoryContinuationStark<F, D>,
pub(crate) mem_after_stark: MemoryContinuationStark<F, D>,
pub(crate) cross_table_lookups: Vec<CrossTableLookup<F>>,
}

Expand All @@ -49,6 +53,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Default for AllStark<F, D> {
keccak_sponge_stark: KeccakSpongeStark::default(),
logic_stark: LogicStark::default(),
memory_stark: MemoryStark::default(),
mem_before_stark: MemoryContinuationStark::default(),
mem_after_stark: MemoryContinuationStark::default(),
cross_table_lookups: all_cross_table_lookups(),
}
}
Expand All @@ -64,6 +70,8 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
self.keccak_sponge_stark.num_lookup_helper_columns(config),
self.logic_stark.num_lookup_helper_columns(config),
self.memory_stark.num_lookup_helper_columns(config),
self.mem_before_stark.num_lookup_helper_columns(config),
self.mem_after_stark.num_lookup_helper_columns(config),
]
}
}
Expand All @@ -80,6 +88,8 @@ pub enum Table {
KeccakSponge = 4,
Logic = 5,
Memory = 6,
MemBefore = 7,
MemAfter = 8,
}

impl Deref for Table {
Expand All @@ -88,12 +98,12 @@ impl Deref for Table {
fn deref(&self) -> &Self::Target {
// Hacky way to implement `Deref` for `Table` so that we don't have to
// call `Table::Foo as usize`, but perhaps too ugly to be worth it.
[&0, &1, &2, &3, &4, &5, &6][*self as TableIdx]
[&0, &1, &2, &3, &4, &5, &6, &7, &8][*self as TableIdx]
}
}

/// Number of STARK tables.
pub(crate) const NUM_TABLES: usize = Table::Memory as usize + 1;
pub(crate) const NUM_TABLES: usize = Table::MemAfter as usize + 1;

impl Table {
/// Returns all STARK table indices.
Expand All @@ -106,6 +116,8 @@ impl Table {
Self::KeccakSponge,
Self::Logic,
Self::Memory,
Self::MemBefore,
Self::MemAfter,
]
}
}
Expand All @@ -120,6 +132,9 @@ pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
ctl_keccak_outputs(),
ctl_logic(),
ctl_memory(),
ctl_mem_before(),
ctl_mem_after(),
ctl_context_pruning(),
]
}

Expand Down Expand Up @@ -287,6 +302,11 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
byte_packing_stark::ctl_looking_memory_filter(i),
)
});
let mem_before_ops = TableWithColumns::new(
*Table::MemBefore,
memory_continuation_stark::ctl_data_memory(),
memory_continuation_stark::ctl_filter(),
);
let all_lookers = vec![
cpu_memory_code_read,
cpu_push_write_ops,
Expand All @@ -297,6 +317,7 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
.chain(cpu_memory_gp_ops)
.chain(keccak_sponge_reads)
.chain(byte_packing_ops)
.chain(iter::once(mem_before_ops))
.collect();
let memory_looked = TableWithColumns::new(
*Table::Memory,
Expand All @@ -305,3 +326,45 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
);
CrossTableLookup::new(all_lookers, memory_looked)
}

/// `CrossTableLookup` for `Cpu` to propagate stale contexts to `Memory`.
fn ctl_context_pruning<F: Field>() -> CrossTableLookup<F> {
CrossTableLookup::new(
vec![ctl_context_pruning_looking()],
ctl_context_pruning_looked(),
)
}

/// `CrossTableLookup` for `MemBefore` table to connect it with the `Memory`
/// module.
fn ctl_mem_before<F: Field>() -> CrossTableLookup<F> {
let memory_looking = TableWithColumns::new(
*Table::Memory,
memory_stark::ctl_looking_mem(),
memory_stark::ctl_filter_mem_before(),
);
let all_lookers = vec![memory_looking];
let mem_before_looked = TableWithColumns::new(
*Table::MemBefore,
memory_continuation_stark::ctl_data(),
memory_continuation_stark::ctl_filter(),
);
CrossTableLookup::new(all_lookers, mem_before_looked)
}

/// `CrossTableLookup` for `MemAfter` table to connect it with the `Memory`
/// module.
fn ctl_mem_after<F: Field>() -> CrossTableLookup<F> {
let memory_looking = TableWithColumns::new(
*Table::Memory,
memory_stark::ctl_looking_mem(),
memory_stark::ctl_filter_mem_after(),
);
let all_lookers = vec![memory_looking];
let mem_after_looked = TableWithColumns::new(
*Table::MemAfter,
memory_continuation_stark::ctl_data(),
memory_continuation_stark::ctl_filter(),
);
CrossTableLookup::new(all_lookers, mem_after_looked)
}
5 changes: 3 additions & 2 deletions evm_arithmetization/src/arithmetic/addcy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,9 @@ const GOLDILOCKS_INVERSE_65536: u64 = 18446462594437939201;
///
/// If `N_LIMBS = 1`, then this amounts to verifying that either `x_0
/// + y_0 = z_0` or `x_0 + y_0 == z_0 + cy*2^16` (this is `t` on line
/// 127ff). Ok. Now assume the constraints are valid for `N_LIMBS =
/// n-1`. Then by induction,
/// 127ff). Ok. Now assume the constraints are valid for `N_LIMBS = n-1`.
///
/// Then by induction,
///
/// \sum_{i=0}^{n-1} (x_i + y_i) * 2^(16*i) + (x_n + y_n)*2^(16*n) ==
/// \sum_{i=0}^{n-1} z_i * 2^(16*i) + cy_{n-1}*2^(16*n) + z_n*2^(16*n)
Expand Down
2 changes: 1 addition & 1 deletion evm_arithmetization/src/byte_packing/byte_packing_stark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use plonky2::timed;
use plonky2::util::timing::TimingTree;
use plonky2::util::transpose;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use starky::evaluation_frame::StarkEvaluationFrame;
use starky::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use starky::lookup::{Column, Filter, Lookup};
use starky::stark::Stark;

Expand Down
14 changes: 10 additions & 4 deletions evm_arithmetization/src/cpu/clock.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
// In the context of continuations, we subdivide proofs into segments. To pass
// the necessary memory values from one segment to the next, we write those
// initial values at timestamp 0. For this reason, the clock has to be
// initialized to 1 at the start of a segment execution.

use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
Expand All @@ -12,8 +17,8 @@ pub(crate) fn eval_packed<P: PackedField>(
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// The clock is 0 at the beginning.
yield_constr.constraint_first_row(lv.clock);
// The clock is 1 at the beginning.
yield_constr.constraint_first_row(lv.clock - P::ONES);
// The clock is incremented by 1 at each row.
yield_constr.constraint_transition(nv.clock - lv.clock - P::ONES);
}
Expand All @@ -26,8 +31,9 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// The clock is 0 at the beginning.
yield_constr.constraint_first_row(builder, lv.clock);
let first_clock = builder.add_const_extension(lv.clock, F::NEG_ONE);
// The clock is 1 at the beginning.
yield_constr.constraint_first_row(builder, first_clock);
// The clock is incremented by 1 at each row.
{
let new_clock = builder.add_const_extension(lv.clock, F::ONE);
Expand Down
21 changes: 21 additions & 0 deletions evm_arithmetization/src/cpu/columns/general.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pub(crate) union CpuGeneralColumnsView<T: Copy> {
jumps: CpuJumpsView<T>,
shift: CpuShiftView<T>,
stack: CpuStackView<T>,
context_pruning: CpuContextPruningView<T>,
}

impl<T: Copy> CpuGeneralColumnsView<T> {
Expand Down Expand Up @@ -75,6 +76,18 @@ impl<T: Copy> CpuGeneralColumnsView<T> {
pub(crate) fn stack_mut(&mut self) -> &mut CpuStackView<T> {
unsafe { &mut self.stack }
}

/// View of the column for context pruning.
/// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn context_pruning(&self) -> &CpuContextPruningView<T> {
unsafe { &self.context_pruning }
}

/// Mutable view of the column for context pruning.
/// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn context_pruning_mut(&mut self) -> &mut CpuContextPruningView<T> {
unsafe { &mut self.context_pruning }
}
}

impl<T: Copy + PartialEq> PartialEq<Self> for CpuGeneralColumnsView<T> {
Expand Down Expand Up @@ -142,6 +155,14 @@ pub(crate) struct CpuShiftView<T: Copy> {
pub(crate) high_limb_sum_inv: T,
}

/// View of the first `CpuGeneralColumns` storing a flag for context pruning.
#[derive(Copy, Clone)]
pub(crate) struct CpuContextPruningView<T: Copy> {
/// The flag is 1 if the OP flag `context_op` is set, the operation is
/// `SET_CONTEXT` and `new_ctx < old_ctx`, and 0 otherwise.
pub(crate) pruning_flag: T,
}

/// View of the last four `CpuGeneralColumns` storing stack-related variables.
/// The first three are used for conditionally enabling and disabling channels
/// when reading the next `stack_top`, and the fourth one is used to check for
Expand Down
42 changes: 38 additions & 4 deletions evm_arithmetization/src/cpu/contextops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume

use super::columns::ops::OpsColumnsView;
use super::cpu_stark::{disable_unused_channels, disable_unused_channels_circuit};
use super::kernel::aggregator::KERNEL;
use crate::cpu::columns::CpuColumnsView;
use crate::memory::segments::Segment;

Expand Down Expand Up @@ -89,6 +90,9 @@ fn eval_packed_get<P: PackedField>(
yield_constr.constraint(filter * limb);
}

// We cannot prune a context in GET_CONTEXT.
yield_constr.constraint(filter * lv.general.context_pruning().pruning_flag);

// Constrain new stack length.
yield_constr.constraint(filter * (nv.stack_len - (lv.stack_len + P::ONES)));

Expand Down Expand Up @@ -121,6 +125,10 @@ fn eval_ext_circuit_get<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint(builder, constr);
}

// We cannot prune a context in GET_CONTEXT.
let constr = builder.mul_extension(filter, lv.general.context_pruning().pruning_flag);
yield_constr.constraint(builder, constr);

// Constrain new stack length.
{
let new_len = builder.add_const_extension(lv.stack_len, F::ONE);
Expand Down Expand Up @@ -148,12 +156,24 @@ fn eval_packed_set<P: PackedField>(

// The next row's context is read from stack_top.
yield_constr.constraint(filter * (stack_top[2] - nv.context));
for (_, &limb) in stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
// The stack top contains the new context in the third limb, and a flag
// indicating whether the old context should be pruned in the first limb. The
// other limbs should be 0.
for (_, &limb) in stack_top[1..].iter().enumerate().filter(|(i, _)| *i != 1) {
yield_constr.constraint(filter * limb);
}

// The old SP is decremented (since the new context was popped) and stored in
// memory. The new SP is loaded from memory.
// Check that the pruning flag is binary.
yield_constr.constraint(
lv.op.context_op
* lv.general.context_pruning().pruning_flag
* (lv.general.context_pruning().pruning_flag - P::Scalar::ONES),
);
// stack_top[0] contains a flag indicating whether the context should be pruned.
yield_constr.constraint(filter * (lv.general.context_pruning().pruning_flag - stack_top[0]));

// The old SP is decremented (since the new context was popped)
// and stored in memory. The new SP is loaded from memory.
// This is all done with CTLs: nothing is constrained here.

// Constrain stack_inv_aux_2.
Expand Down Expand Up @@ -197,11 +217,25 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for (_, &limb) in stack_top.iter().enumerate().filter(|(i, _)| *i != 2) {
for (_, &limb) in stack_top[1..].iter().enumerate().filter(|(i, _)| *i != 1) {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}

// Check that the pruning flag is binary.
let diff = builder.mul_sub_extension(
lv.general.context_pruning().pruning_flag,
lv.general.context_pruning().pruning_flag,
lv.general.context_pruning().pruning_flag,
);
let constr = builder.mul_extension(lv.op.context_op, diff);
yield_constr.constraint(builder, constr);

// stack_top[0] contains a flag indicating whether the context should be pruned.
let diff = builder.sub_extension(lv.general.context_pruning().pruning_flag, stack_top[0]);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);

// The old SP is decremented (since the new context was popped) and stored in
// memory. The new SP is loaded from memory.
// This is all done with CTLs: nothing is constrained here.
Expand Down
Loading

0 comments on commit cd3e17c

Please sign in to comment.