Add CI workflow, resolve clippy warning (#29)

* feat: resolve clippy warning, remove unuse deps, add ci workflow

* chore: add protoc install to CI

* chore: add foundry for test job

* chore: fix benchmark and add comparison

* chore: just run bench 1 time

* chore: try boa-dev/criterion-compare-action@v3

* chore: run all benchmark

* chore: rerun CI

* chore: only run prover_bench|generate_proof

* chore: use matrix strategy for each bench

* feat: add osv-scanner workflow

* chore: not add manually, ill use github UI

* chore: remove changes from Cargo.lock and all Cargo.toml files to avoid conflict with feature/revise_deps branch

* chore: resolve clippy module_inception

* chore: try no bench = false again

* chore: run cargo-udeps with --all-features flags

* chore: update default prover run command

* chore: lint fix

* chore: add osv-scanner.yml to CI workflow

* chore: try osv-scanner v2.2.2
This commit is contained in:
Vinh Trịnh
2025-08-28 16:49:25 +07:00
committed by GitHub
parent ae3b72a807
commit eec2ae76e9
14 changed files with 204 additions and 38 deletions

116
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,116 @@
on:
push:
branches:
- master
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
pull_request:
paths-ignore:
- "**.md"
- "!.github/workflows/*.yml"
name: CI
env:
PROTOC_VERSION: "3.25.3"
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 60
name: Lint
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- name: Install protoc
uses: taiki-e/install-action@v2
with:
tool: protoc@${{ env.PROTOC_VERSION }}
- uses: Swatinem/rust-cache@v2
with:
shared-key: "stable"
- name: Check formatting
if: success() || failure()
run: cargo fmt --all -- --check
- name: Check clippy
if: success() || failure()
run: cargo clippy --all-targets --release -- -D warnings
test:
runs-on: ubuntu-latest
timeout-minutes: 60
name: Test
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- name: Install protoc
uses: taiki-e/install-action@v2
with:
tool: protoc@${{ env.PROTOC_VERSION }}
- uses: Swatinem/rust-cache@v2
with:
shared-key: "stable"
- name: Install foundry
uses: foundry-rs/foundry-toolchain@v1
- name: Test
run: cargo test --all-targets --all-features --release
unused-deps:
needs: [lint, test]
runs-on: ubuntu-latest
timeout-minutes: 60
name: Unused Dependencies
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install nightly toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Install protoc
uses: taiki-e/install-action@v2
with:
tool: protoc@${{ env.PROTOC_VERSION }}
- uses: Swatinem/rust-cache@v2
with:
shared-key: "nightly"
- name: Install cargo-udeps
uses: taiki-e/install-action@v2
with:
tool: cargo-udeps
- name: Check unused dependencies
run: cargo +nightly udeps --all-targets --all-features
benchmark:
needs: [lint, test]
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
matrix:
benchmark: ["generate_proof", "prover_bench"]
name: Benchmark ${{ matrix.benchmark }}
steps:
- name: Checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install stable toolchain
uses: dtolnay/rust-toolchain@stable
- name: Install protoc
uses: taiki-e/install-action@v2
with:
tool: protoc@${{ env.PROTOC_VERSION }}
- uses: Swatinem/rust-cache@v2
with:
shared-key: "stable"
- name: Run benchmark comparison
uses: boa-dev/criterion-compare-action@v3
with:
branchName: ${{ github.base_ref }}
benchName: ${{ matrix.benchmark }}

53
.github/workflows/osv-scanner.yml vendored Normal file
View File

@@ -0,0 +1,53 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: OSV-Scanner
on:
pull_request:
branches: ["main"]
merge_group:
types: [checks_requested]
schedule:
- cron: "12 12 * * 1"
push:
branches: ["main"]
permissions:
# Required to upload SARIF file to CodeQL. See: https://github.com/github/codeql-action/issues/2117
actions: read
# Require writing security events to upload SARIF file to security tab
security-events: write
# Read commit contents
contents: read
jobs:
scan-scheduled:
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' }}
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@9d4732e8b9db0915df9608123133640b58bb6750" # v2.2.2
with:
# Example of specifying custom arguments
scan-args: |-
--include-git-root
-r
./
scan-pr:
if: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }}
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@9d4732e8b9db0915df9608123133640b58bb6750" # v2.2.2
with:
# Example of specifying custom arguments
scan-args: |-
--include-git-root
-r
./

1
.gitignore vendored
View File

@@ -2,7 +2,6 @@
# local run
/storage
config.toml
# heaptrack
*.zst

View File

@@ -7,7 +7,7 @@
## Run prover
PRIVATE_KEY=__MY_PRIVATE_KEY__ RUST_LOG=debug cargo run -p prover_cli
PRIVATE_KEY=__MY_PRIVATE_KEY__ RUST_LOG=debug cargo run -p prover_cli -- --no-config
### Run prover + Mock

View File

@@ -78,7 +78,7 @@ async fn proof_sender(port: u16, addresses: Vec<Address>, proof_count: usize) {
let request = tonic::Request::new(request_0);
let response: Response<SendTransactionReply> =
client.send_transaction(request).await.unwrap();
assert_eq!(response.into_inner().result, true);
assert!(response.into_inner().result);
}
}
@@ -103,18 +103,12 @@ async fn proof_collector(port: u16, proof_count: usize) -> Vec<RlnProofReply> {
}
}
let res = std::mem::take(&mut *result.write());
// println!("[Proof collector] Received {} proofs", res.len());
res
std::mem::take(&mut *result.write())
}
fn proof_generation_bench(c: &mut Criterion) {
let start = std::time::Instant::now();
let rayon_num_threads = std::env::var("RAYON_NUM_THREADS")
.unwrap_or("".to_string());
let rayon_num_threads = std::env::var("RAYON_NUM_THREADS").unwrap_or("".to_string());
let proof_service_count_default = 4;
let proof_service_count = std::env::var("PROOF_SERVICE_COUNT")
.map(|c| u16::from_str(c.as_str()).unwrap_or(proof_service_count_default))
@@ -175,7 +169,7 @@ fn proof_generation_bench(c: &mut Criterion) {
let addresses_0 = addresses.clone();
// Wait for proof_collector to be connected and waiting for some proofs
let _res = rt.block_on(async move {
rt.block_on(async move {
notify_start_2.notified().await;
println!("Prover is ready, registering users...");
register_users(port, addresses_0).await;
@@ -191,7 +185,10 @@ fn proof_generation_bench(c: &mut Criterion) {
let proof_count = proof_count as usize;
group.throughput(Throughput::Elements(proof_count as u64));
let benchmark_name = format!("prover_proof_{}_proof_service_{}_rt_{}", proof_count, proof_service_count, rayon_num_threads);
let benchmark_name = format!(
"prover_proof_{}_proof_service_{}_rt_{}",
proof_count, proof_service_count, rayon_num_threads
);
group.bench_with_input(
BenchmarkId::new(benchmark_name, proof_count),
&proof_count,
@@ -212,7 +209,6 @@ fn proof_generation_bench(c: &mut Criterion) {
group.finish();
println!("Benchmark finished in {:?}", start.elapsed());
}
criterion_group!(

View File

@@ -102,7 +102,7 @@ pub struct AppArgs {
#[arg(
long = "no-config",
help = "Dont read a config file",
default_missing_value = "false",
default_missing_value = "true",
action = SetTrue,
help_heading = "config"
)]

View File

@@ -424,7 +424,7 @@ mod tests {
#[test]
fn test_compute_current_epoch_slice() {
let day = NaiveDate::from_ymd_opt(2025, 5, 14).unwrap();
let now_date = day.clone();
let now_date = day;
let now_f = move || {
let now_0: NaiveDateTime = day.and_hms_opt(0, 4, 0).unwrap();

View File

@@ -1,5 +1,5 @@
#[cfg(test)]
mod epoch_service_tests {
mod tests {
// std
use std::sync::Arc;
@@ -38,7 +38,7 @@ mod epoch_service_tests {
let res = tokio::try_join!(
epoch_service
.listen_for_new_epoch()
.map_err(|e| AppErrorExt::AppError(e)),
.map_err(AppErrorExt::AppError),
// Wait for 3 epoch slices
// + WAIT_UNTIL_MIN_DURATION * 2 (expect a maximum of 2 retry)
// + 500 ms (to wait to receive notif + counter incr)

View File

@@ -1,5 +1,4 @@
mod args;
// mod epoch_service;
mod epoch_service;
mod error;
mod grpc_service;

View File

@@ -1,5 +1,5 @@
#[cfg(test)]
mod proof_service_tests {
mod tests {
use std::io::Cursor;
use std::path::PathBuf;
use std::sync::Arc;
@@ -216,7 +216,7 @@ mod proof_service_tests {
}
debug!("Now recovering secret hash...");
let proof_values_0 = proof_values_store.get(0).unwrap();
let proof_values_0 = proof_values_store.first().unwrap();
let proof_values_1 = proof_values_store.get(1).unwrap();
println!("proof_values_0: {:?}", proof_values_0);
println!("proof_values_1: {:?}", proof_values_1);
@@ -225,7 +225,7 @@ mod proof_service_tests {
// Note: if not in test, should check for external nullifier
let recovered_identity_secret_hash =
compute_id_secret(share1, share2).map_err(|e| AppErrorExt::RecoverSecretFailed(e))?;
compute_id_secret(share1, share2).map_err(AppErrorExt::RecoverSecretFailed)?;
debug!(
"recovered_identity_secret_hash: {:?}",
@@ -254,7 +254,7 @@ mod proof_service_tests {
user_identity: user_db.get_user(&sender).unwrap(),
rln_identifier: rln_identifier.clone(),
tx_counter: 0,
tx_sender: sender.clone(),
tx_sender: sender,
tx_hash: tx_hashes.0.to_vec(),
})
.await

View File

@@ -287,11 +287,11 @@ mod tests {
let buffer = index.to_le_bytes();
let mut db_batch = WriteBatch::default();
db_batch.merge(key_1, &buffer);
db_batch.merge(key_1, &buffer);
db_batch.merge(key_1, buffer);
db_batch.merge(key_1, buffer);
db.write(db_batch).unwrap();
let get_key_1 = db.get(&key_1).unwrap().unwrap();
let get_key_1 = db.get(key_1).unwrap().unwrap();
let value = u64::from_le_bytes(get_key_1.try_into().unwrap());
assert_eq!(value, index * 2); // 2x merge
@@ -325,14 +325,14 @@ mod tests {
db_batch.merge(key_1, &buffer);
db.write(db_batch).unwrap();
let get_key_1 = db.get(&key_1).unwrap().unwrap();
let get_key_1 = db.get(key_1).unwrap().unwrap();
let (_, get_value_k1) = epoch_counter_deser.deserialize(&get_key_1).unwrap();
// Applied EpochIncr 2x
assert_eq!(get_value_k1.epoch_counter, 4);
assert_eq!(get_value_k1.epoch_slice_counter, 4);
let get_key_2 = db.get(&key_2).unwrap();
let get_key_2 = db.get(key_2).unwrap();
assert!(get_key_2.is_none());
// new epoch slice
@@ -347,7 +347,7 @@ mod tests {
epoch_incr_ser.serialize(&value_2, &mut buffer);
db.merge(key_1, buffer).unwrap();
let get_key_1 = db.get(&key_1).unwrap().unwrap();
let get_key_1 = db.get(key_1).unwrap().unwrap();
let (_, get_value_2) = epoch_counter_deser.deserialize(&get_key_1).unwrap();
assert_eq!(
@@ -373,7 +373,7 @@ mod tests {
epoch_incr_ser.serialize(&value_3, &mut buffer);
db.merge(key_1, buffer).unwrap();
let get_key_1 = db.get(&key_1).unwrap().unwrap();
let get_key_1 = db.get(key_1).unwrap().unwrap();
let (_, get_value_3) = epoch_counter_deser.deserialize(&get_key_1).unwrap();
assert_eq!(

View File

@@ -1,5 +1,5 @@
#[cfg(test)]
mod user_db_tests {
mod tests {
// std
use std::path::PathBuf;
use std::sync::Arc;
@@ -149,9 +149,9 @@ mod user_db_tests {
)
.unwrap();
assert_eq!(user_db.has_user(&addr).unwrap(), false);
assert_eq!(user_db.has_user(&ADDR_1).unwrap(), true);
assert_eq!(user_db.has_user(&ADDR_2).unwrap(), true);
assert!(!user_db.has_user(&addr).unwrap());
assert!(user_db.has_user(&ADDR_1).unwrap());
assert!(user_db.has_user(&ADDR_2).unwrap());
assert_eq!(
user_db.get_tx_counter(&ADDR_1).unwrap(),
(2.into(), 2.into())

View File

@@ -143,7 +143,7 @@ async fn proof_sender(port: u16, addresses: Vec<Address>, proof_count: usize) {
let request = tonic::Request::new(request_0);
let response: Response<SendTransactionReply> =
client.send_transaction(request).await.unwrap();
assert_eq!(response.into_inner().result, true);
assert!(response.into_inner().result);
count += 1;
}

View File

@@ -65,9 +65,12 @@ async fn main() -> Result<(), RlnScError> {
}
// Connect to KarmaRLN contract with signer
let rln_contract =
KarmaRLNSC::KarmaRLNSCInstance::try_new_with_signer(url, contract_addr, Zeroizing::new(args.private_key))
.await?;
let rln_contract = KarmaRLNSC::KarmaRLNSCInstance::try_new_with_signer(
url,
contract_addr,
Zeroizing::new(args.private_key),
)
.await?;
println!("Successfully connected to RLN contract with signer at {contract_addr}",);