Skip to content

Commit

Permalink
chore: Adhere to maximum line length
Browse files Browse the repository at this point in the history
changelog: ignore
  • Loading branch information
jan-ferdinand committed Dec 23, 2024
1 parent 60ba090 commit 52b5057
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 40 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ jobs:
- name: Install nextest
uses: taiki-e/install-action@nextest

- name: Check max line length
# rustfmt gives up on lines that are too long
if: runner.os == 'Linux'
run: "! grep -rP '^.{101}' --include='*.rs' ."

- name: Run fmt
run: cargo fmt --all -- --check

Expand Down
4 changes: 2 additions & 2 deletions triton-air/src/table/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ impl AIR for ProgramTable {
let index_in_chunk_next_is_not_max =
one - max_minus_index_in_chunk_inv_next * index_in_chunk_next_is_max.clone();

let send_chunk_running_eval_absorbs_chunk_iff_index_in_chunk_next_is_max_and_not_padding_row =
let send_chunk_running_eval_absorbs_chunk_iff_index_in_chunk_next_is_max_and_not_padding =
send_chunk_running_evaluation_absorbs_next_chunk
* next_row_is_table_padding_row
* index_in_chunk_next_is_not_max
Expand All @@ -246,7 +246,7 @@ impl AIR for ProgramTable {
table_padding_starts_when_hash_input_padding_is_active_and_index_in_chunk_is_zero,
log_derivative_updates_if_and_only_if_not_a_padding_row,
prepare_chunk_running_evaluation_resets_every_rate_rows_and_absorbs_next_instruction,
send_chunk_running_eval_absorbs_chunk_iff_index_in_chunk_next_is_max_and_not_padding_row,
send_chunk_running_eval_absorbs_chunk_iff_index_in_chunk_next_is_max_and_not_padding,
]
}

Expand Down
16 changes: 9 additions & 7 deletions triton-constraint-builder/src/substitutions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,15 +286,17 @@ impl Substitutions {
.and(row_indices.view())
.par_for_each( |mut section_row, &current_row_index| {
let next_row_index = current_row_index + 1;
let current_main_row_slice = original_part.slice(s![current_row_index..=current_row_index, ..]);
let next_main_row_slice = original_part.slice(s![next_row_index..=next_row_index, ..]);
let current_main_row_slice =
original_part.slice(s![current_row_index..=current_row_index, ..]);
let next_main_row_slice =
original_part.slice(s![next_row_index..=next_row_index, ..]);
let mut current_main_row = current_main_row_slice.row(0).to_owned();
let next_main_row = next_main_row_slice.row(0);
#(
section_row[#indices] = #substitutions;
current_main_row.push(Axis(0), section_row.slice(s![#indices])).unwrap();
)*
});
});
)
}

Expand All @@ -321,10 +323,10 @@ impl Substitutions {
|main_table_row, original_row, mut section_row| {
let mut auxiliary_row = original_row.to_owned();
#(
let (original_row_auxiliary_row, mut det_col) =
section_row.multi_slice_mut((s![..#indices],s![#indices..=#indices]));
det_col[0] = #substitutions;
auxiliary_row.push(Axis(0), det_col.slice(s![0])).unwrap();
let (original_row_auxiliary_row, mut det_col) =
section_row.multi_slice_mut((s![..#indices],s![#indices..=#indices]));
det_col[0] = #substitutions;
auxiliary_row.push(Axis(0), det_col.slice(s![0])).unwrap();
)*
}
);
Expand Down
62 changes: 31 additions & 31 deletions triton-vm/src/example_programs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -405,79 +405,79 @@ pub(crate) fn calculate_new_mmr_peaks_from_append_with_safe_lists() -> Program {
halt

// Main function
// BEFORE: _ old_leaf_count_hi old_leaf_count_lo *peaks [digest]
// BEFORE: _ [old_leaf_count: u64] *peaks [digest]
// AFTER: _ *new_peaks *auth_path
tasm_mmr_calculate_new_peaks_from_append_safe:
dup 5 dup 5 dup 5 dup 5 dup 5 dup 5
call tasm_list_safe_u32_push_digest
pop 5 // _ old_leaf_count_hi old_leaf_count_lo *peaks
pop 5 // _ [old_leaf_count: u64] *peaks

// Create auth_path return value (vector living in RAM)
// All MMR auth paths have capacity for 64 digests
push 64 // _ old_leaf_count_hi old_leaf_count_lo *peaks 64
push 64 // _ [old_leaf_count: u64] *peaks 64
call tasm_list_safe_u32_new_digest

swap 1
// stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks
// _ [old_leaf_count: u64] *auth_path *peaks

dup 3 dup 3
// stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks old_leaf_count_hi old_leaf_count_lo
// _ [old_leaf_count: u64] *auth_path *peaks [old_leaf_count: u64]

call tasm_arithmetic_u64_incr
call tasm_arithmetic_u64_index_of_last_nonzero_bit

call tasm_mmr_calculate_new_peaks_from_append_safe_while
// stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks (rll = 0)
// _ [old_leaf_count: u64] *auth_path *peaks (rll = 0)

pop 1
swap 3 pop 1 swap 1 pop 1
// stack: _ *peaks *auth_path
// _ *peaks *auth_path

return

// Stack start and end: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks rll
// Stack start and end: _ *auth_path *peaks rll
tasm_mmr_calculate_new_peaks_from_append_safe_while:
dup 0
push 0
eq
skiz
return
// Stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks rll
// _ *auth_path *peaks rll

swap 2 swap 1
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks
// _ rll *auth_path *peaks

dup 0
dup 0
call tasm_list_safe_u32_pop_digest
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digest (new_hash)]
// _ rll *auth_path *peaks *peaks [digest (new_hash)]

dup 5
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digest (new_hash)] *peaks
// _ rll *auth_path *peaks *peaks [digest (new_hash)] *peaks

call tasm_list_safe_u32_pop_digest
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digest (new_hash)] [digests (previous_peak)]
// _ rll *auth_path *peaks *peaks [digest (new_hash)] [digests (old_peak)]

// Update authentication path with latest previous_peak
dup 12
dup 5 dup 5 dup 5 dup 5 dup 5
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digest (new_hash)] [digests (previous_peak)] *auth_path [digests (previous_peak)]
// _ rll *auth_path *peaks *peaks [digest (new_hash)] [digests (old_peak)] *auth_path

dup 5 dup 5 dup 5 dup 5 dup 5
call tasm_list_safe_u32_push_digest
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digest (new_hash)] [digests (previous_peak)]
// _ rll *auth_path *peaks *peaks [digest (new_hash)] [digests (old_peak)]

hash
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks *peaks [digests (new_peak)]
// _ rll *auth_path *peaks *peaks [digests (new_peak)]

call tasm_list_safe_u32_push_digest
// Stack: _ old_leaf_count_hi old_leaf_count_lo rll *auth_path *peaks
// _ rll *auth_path *peaks

swap 1 swap 2
// Stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks rll
// _ *auth_path *peaks rll

push -1
add
// Stack: _ old_leaf_count_hi old_leaf_count_lo *auth_path *peaks (rll - 1)
// _ *auth_path *peaks (rll - 1)

recurse

Expand Down Expand Up @@ -595,35 +595,35 @@ pub(crate) fn calculate_new_mmr_peaks_from_append_with_safe_lists() -> Program {
swap 1
push 1
dup 1
// stack: _ value_lo value_hi 1 value_hi
// _ value_lo value_hi 1 value_hi

skiz call tasm_arithmetic_u64_log_2_floor_then
skiz call tasm_arithmetic_u64_log_2_floor_else
// stack: _ log2_floor(value)
// _ log2_floor(value)

return

tasm_arithmetic_u64_log_2_floor_then:
// value_hi != 0
// stack: _ value_lo value_hi 1
// _ value_lo value_hi 1
swap 1
swap 2
pop 2
// stack: _ value_hi
// _ value_hi

log_2_floor
push 32
add
// stack: _ (log2_floor(value_hi) + 32)
// _ (log2_floor(value_hi) + 32)

push 0
// stack: _ (log2_floor(value_hi) + 32) 0
// _ (log2_floor(value_hi) + 32) 0

return

tasm_arithmetic_u64_log_2_floor_else:
// value_hi == 0
// stack: _ value_lo value_hi
// _ value_lo value_hi
pop 1
log_2_floor
return
Expand Down Expand Up @@ -666,11 +666,11 @@ pub(crate) fn calculate_new_mmr_peaks_from_append_with_safe_lists() -> Program {
tasm_arithmetic_u64_and:
swap 3
and
// stack: _ lhs_lo rhs_lo (lhs_hi & rhs_hi)
// _ lhs_lo rhs_lo (lhs_hi & rhs_hi)

swap 2
and
// stack: _ (lhs_hi & rhs_hi) (rhs_lo & lhs_lo)
// _ (lhs_hi & rhs_hi) (rhs_lo & lhs_lo)

return

Expand Down Expand Up @@ -748,11 +748,11 @@ pub(crate) fn calculate_new_mmr_peaks_from_append_with_safe_lists() -> Program {
tasm_arithmetic_u64_xor:
swap 3
xor
// stack: _ lhs_lo rhs_lo (lhs_hi ^ rhs_hi)
// _ lhs_lo rhs_lo (lhs_hi ^ rhs_hi)

swap 2
xor
// stack: _ (lhs_hi ^ rhs_hi) (rhs_lo ^ lhs_lo)
// _ (lhs_hi ^ rhs_hi) (rhs_lo ^ lhs_lo)

return
)
Expand Down

0 comments on commit 52b5057

Please sign in to comment.