#! Stores the layer commitments C followed by [d_size, t_depth, a1, a0] and [poe, p, e1, e0] where: #! 1) d_size is the domain size divided by 4 of the domain corresponding to C. #! 2) t_depth is the tree depth of the Merkle tree with commitment C. #! 3) (a0, a1) is the folding challenge to create the next layer. #! 4) p is the query index and (e0, e1) is the evaluation at the first layer and poe is g^p with #! g being the initial domain generator. #! TODO: This pre-processing function should in fact compute d_size and t_depth for each C #! starting from the original domain size. export.preprocess.4 locaddr.3 adv_push.1 #[num_queries, query_ptr, g, ..] sub.1 push.0.0.0.0 push.1 while.true adv_loadw #[Q, num_queries, ptr, ..] dup.5 #[ptr, Q, num_queries, ptr,..] u32wrapping_add.1 #[ptr+1, Q, num_queries, ptr, ..] swap.6 #[ptr, Q, num_queries, ptr+1, ..] mem_storew #[Q, num_queries, ptr+1, ..] dup.4 sub.1 #[num_queries-1, Q, num_queries, ptr+1, ..] swap.5 #[num_queries, Q, num_queries-1, ptr+1, ..] neq.0 #[?, Q, num_queries-1, ptr+1, ..] end #=> [X, x, layer_ptr, g] drop #=> [X, layer_ptr, g] dup.4 movdn.5 #=> [X, layer_ptr, layer_ptr, g] adv_push.1 mul.2 sub.1 movdn.4 #=> [X, layer_ptr, num_layers, layer_ptr, g] push.1 while.true adv_loadw dup.5 u32wrapping_add.1 swap.6 mem_storew dup.4 sub.1 swap.5 neq.0 end #=> [X, x, remainder_ptr, layer_ptr, g] drop #=> [X, remainder_ptr, layer_ptr, g] dup.4 movdn.5 #=> [X, remainder_ptr, remainder_ptr, layer_ptr, g] adv_push.1 sub.1 movdn.4 #=> [X, remainder_ptr, len_remainder/2, remainder_ptr, layer_ptr, g] push.1 while.true adv_loadw dup.5 u32wrapping_add.1 swap.6 mem_storew dup.4 sub.1 swap.5 neq.0 end #=> [X, x, x, remainder_ptr, layer_ptr, g] dropw drop drop swap locaddr.3 #=> [query_ptr, layer_ptr, remainder_ptr, g] end #! Checks that, for a query with index p at layer i, the folding procedure to create layer (i + 1) #! was performed correctly. This also advances layer_ptr by 2 to point to the next query layer. #! #! Input: [layer_ptr, layer_ptr, poe, p, e1, e0, layer_ptr, rem_ptr, x, x, x, x, x, x, x, x, ...] #! Output: [layer_ptr + 2, layer_ptr + 2, poe^4, f_pos, ne1, ne0, layer_ptr + 2, rem_ptr, x, x, x, x, x, x, x, x, ...] #! #! Cycles: 76 export.verify_query_layer.3 # load layer commitment C as well as [a0, a1, t_depth, d_size] (7 cycles) swapdw movup.8 add.1 mem_loadw # load [a0, a1, t_depth, d_size] from layer_ptr + 1 swapw movup.8 mem_loadw # load C from layer_ptr # => [C, d_size, t_depth, a1, a0, poe, p, e1, e0, layer_ptr, rem_ptr, ...] # verify Merkle auth path for (index = f_pos, depth = t_depth, Root = C) (19 cycles) swapw.2 # [poe, p, e1, e0, d_size, t_depth, a1, a0, C, layer_ptr, rem_ptr, ...] swap # [p, poe, e1, e0, d_size, t_depth, a1, a0, C, layer_ptr, rem_ptr, ...] movup.4 # [d_size, p, poe, e1, e0, t_depth, a1, a0, C, layer_ptr, rem_ptr, ...] u32divmod # p and d_size must be u32 values movup.5 movupw.2 dup.5 movup.5 # [t_depth, f_pos, C, f_pos, d_seg, poe, e1, e0, a1, a0, layer_ptr, rem_ptr, ...] mtree_get # [V, C, f_pos, d_seg, poe, e1, e0, a1, a0, layer_ptr, rem_ptr, ...] adv.push_mapval swapw # => [V, C, f_pos, d_seg, poe, e1, e0, a1, a0, layer_ptr, rem_ptr, ...] # where f_pos = p % d_size and d_seg = p / 4 # unhash V and save the pre-image in locaddr.0 and locaddr.1; we don't clear values of C # because adv_pipe overwrites the first 8 elements of the stack (15 cycles) locaddr.1 movdn.4 push.0.0.0.0 swapw push.0.0.0.0 adv_pipe hperm # => [T2, T1, T0, ptr, V, f_pos, d_seg, poe, e1, e0, a1, a0, layer_ptr, rem_ptr, ..] # assert T1 == V (16 cycles) swapw.3 drop movup.3 assert_eq movup.2 assert_eq assert_eq movup.9 assert_eq # load (v7, ..v0) from memory (8 cycles) loc_loadw.1 swapw loc_loadw.2 # => [v7, ..., v0, f_pos, d_seg, poe, e1, e0, a1, a0, layer_ptr, rem_ptr, ...] # fold by 4 (1 cycle) fri_ext2fold4 # => [x, x, x, x, x, x, x, x, x, x, layer_ptr + 2, poe^4, f_pos, ne1, ne0, rem_ptr, ...] # prepare for next iteration (10 cycles) swapdw dup.2 movdn.7 drop drop dup dup.7 dup.1 neq # => [?, layer_ptr + 2, layer_ptr + 2, poe^4, f_pos, ne1, ne0, layer_ptr + 2, rem_ptr, x, x, x, x, x, x, x, x, ...] end #! Verifies one FRI query. #! #! Input: [poe, p, e1, e0, layer_ptr, rem_ptr, ...] #! Output: [x, x, x, x, x, x, x, x, x, x, ...] #! #! - poe is g^p. #! - p is a query index at the first layer. #! - (e0, e1) is an extension field element corresponding to the value of the first layer at index p. #! - layer_ptr is the memory address of the layer data (Merkle tree root, alpha etc.) for the next #! layer. #! - rem_ptr is the memory address of the remainder codeword. #! #! Cycles: 40 + num_layers * 76 export.verify_query # prepare stack to be in a form that leverages the fri_ext2fold4 instruction output stack state # (16 cycles) dup.5 dup.5 push.0.0.0.0 push.0.0.0.0 swapdw dup dup movup.3 neq # => [?, layer_ptr, layer_ptr, poe, p, e1, e0, layer_ptr, rem_ptr, 0, 0, 0, 0, 0, 0, 0, 0, ...] # verify correctness of layer folding while.true exec.verify_query_layer end # => [rem_ptr, rem_ptr, poe^(2^n), f_pos, ne1, ne0, rem_ptr, rem_ptr, x, x, x, x, x, x, x, x, ...] # check that remainder[f_pos] == (ne0, ne1) # Since each memory address contains two extension field elements, we have to determine which # of the two elements we should compare against. (7 cycles) movup.3 push.2 u32divmod # f_pos must be a u32 value movdn.4 dup.1 dup.1 add # => [rem_ptr + offset, x, x, x, x, ?, ne1, ne0, rem_ptr, rem_ptr, x, x, x, x, x, x, x, x, ..] mem_loadw # => [e1', e0', e1, e0, ?, ne1, ne0, rem_ptr, rem_ptr, x, x, x, x, x, x, x, x, ..] # compare (ne0, ne1) to the appropriate tuple from the remainder word (14 cycles) movup.2 swap dup.4 cdrop movdn.3 movup.2 cdrop swap.2 assert_eq assert_eq # => [x, x, x, x, x, x, x, x, x, x, ...] end #! Verifies a FRI proof where the proof was generated over the quadratic extension of the base #! field and layer folding was performed using folding factor 4. #! Note that the check that the remainder codeword corresponds to the remainder polynomial received #! by the verifier should now be performed by the calling procedure. #! #! Input: [query_ptr, layer_ptr, rem_ptr, g, ...] #! Output: [...] #! #! - query_ptr is a pointer to a list of tuples of the form (e0, e1, p, poe) where poe is equal #! to g^p with g being the initial FRI domain generator. p is the query index at the first layer #! and (e0, e1) is an extension field element corresponding to the value of the first layer at index p. #! - layer_ptr is a pointer to the first layer commitment denoted throughout the code by C. #! layer_ptr + 1 points to the first [alpha0, alpha1, t_depth, d_size] where d_size is the size #! of initial domain divided by 4, t_depth is the depth of the Merkle tree commitment to the #! first layer and (alpha0, alpha1) is the first challenge used in folding the first layer. #! Both t_depth and d_size are expected to be smaller than 2^32. Otherwise, the result of #! this procedure is undefined. #! - rem_ptr is a pointer to the first tuple of two consecutive degree 2 extension field #! elements making up the remainder codeword. This codeword can be of length either 32 or 64. #! #! The memory referenced above is used contiguously, as follows: #! #! [query_ptr ... layer_ptr ... rem_ptr ...] #! #! This means for example that: #! 1. rem_ptr - 1 points to the last (alpha0, alpha1, t_depth, d_size) tuple. #! 2. layer_ptr - 1 points to the last (e0, e1, p, poe) tuple. #! #! Cycles: 7 + 4 + num_queries * (40 + num_layers * 76 + 26) export.verify.1 # store [query_ptr, layer_ptr, rem_ptr, g] to keep track of all queries # (3 cycles) loc_storew.0 # [(query_ptr == layer_ptr), query_ptr, layer_ptr, rem_ptr, g] # (4 cycles) dup dup.2 neq while.true # load [e0, e1, p, poe] from memory i.e. next query data (7 cycles) push.0.0.0.0 movup.4 mem_loadw # => [poe, p, e1, e0, layer_ptr, rem_ptr, g, ...] # we now have everything to verify query p exec.verify_query # prepare for next iteration (18 cycles) # => [x, x, x, x, x, x, x, x, x, x, g, ...] dropw drop drop drop loc_loadw.0 # load [query_ptr, layer_ptr, rem_ptr, g] add.1 loc_storew.0 # store [query_ptr + 1, layer_ptr, rem_ptr, g] dup dup.2 neq #=> [?, query_ptr + 1, layer_ptr, rem_ptr, g, ...] end #=> [X, ..] dropw end