mirror of
https://github.com/vacp2p/de-mls.git
synced 2026-01-09 21:48:02 -05:00
Compare commits
13 Commits
v1.0.0
...
de-mls-api
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d4cc6ef60 | ||
|
|
ee133746b5 | ||
|
|
555af7ba7a | ||
|
|
6a860bfd11 | ||
|
|
be9dbfdbf9 | ||
|
|
ae4ee902d5 | ||
|
|
4ea1136012 | ||
|
|
e37a4b435f | ||
|
|
559cc856c0 | ||
|
|
867d48730d | ||
|
|
53fab78ef6 | ||
|
|
8ebeb4d898 | ||
|
|
d93ac900ae |
@@ -1,4 +0,0 @@
|
||||
.env/
|
||||
.idea/
|
||||
target/
|
||||
frontend/
|
||||
126
.github/workflows/ci.yml
vendored
126
.github/workflows/ci.yml
vendored
@@ -7,59 +7,91 @@ on:
|
||||
|
||||
name: "CI"
|
||||
|
||||
jobs:
|
||||
# ds_test:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v3
|
||||
# - name: Install stable toolchain
|
||||
# uses: actions-rs/toolchain@v1
|
||||
# with:
|
||||
# profile: minimal
|
||||
# toolchain: stable
|
||||
# override: true
|
||||
# - uses: Swatinem/rust-cache@v2
|
||||
# - name: cargo test
|
||||
# run: |
|
||||
# cargo test --release
|
||||
# working-directory: ds
|
||||
user_test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.x'
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: cargo test
|
||||
run: |
|
||||
cargo test --release
|
||||
working-directory: tests
|
||||
env:
|
||||
PROTOC_VERSION: "3.25.3"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20.x'
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install stable toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: cargo fmt
|
||||
if: success() || failure()
|
||||
run: cargo fmt -- --check
|
||||
- name: cargo clippy
|
||||
if: success() || failure()
|
||||
with:
|
||||
shared-key: "stable"
|
||||
- name: Install protoc
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: protoc@${{ env.PROTOC_VERSION }}
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
cargo clippy --release -- -D warnings
|
||||
sudo apt update
|
||||
sudo apt install libwebkit2gtk-4.1-dev \
|
||||
build-essential \
|
||||
curl \
|
||||
wget \
|
||||
file \
|
||||
libxdo-dev \
|
||||
libssl-dev \
|
||||
libayatana-appindicator3-dev \
|
||||
librsvg2-dev
|
||||
- name: Check formatting
|
||||
run: cargo fmt --all --check
|
||||
- name: Run clippy
|
||||
run: cargo clippy -p de_mls_desktop_ui --all-features --tests -- -D warnings
|
||||
|
||||
docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install protoc
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: protoc@${{ env.PROTOC_VERSION }}
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "stable"
|
||||
- name: Generate documentation
|
||||
run: |
|
||||
cargo doc --lib --no-deps --all-features --document-private-items
|
||||
env:
|
||||
RUSTDOCFLAGS: -Dwarnings
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "stable"
|
||||
- name: Install protoc
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: protoc@${{ env.PROTOC_VERSION }}
|
||||
- name: Test
|
||||
run: cargo test --release
|
||||
|
||||
unused_dependencies:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
shared-key: "nightly"
|
||||
- name: Install protoc
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: protoc@${{ env.PROTOC_VERSION }}
|
||||
- name: Install cargo-udeps
|
||||
uses: taiki-e/install-action@cargo-udeps
|
||||
- name: Check for unused dependencies
|
||||
run: cargo +nightly udeps --all-targets
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,10 +3,6 @@
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
@@ -16,6 +12,7 @@ Cargo.lock
|
||||
.DS_Store
|
||||
src/.DS_Store
|
||||
.idea
|
||||
apps/de_mls_desktop_ui/logs
|
||||
|
||||
# files
|
||||
*.env
|
||||
|
||||
9683
Cargo.lock
generated
Normal file
9683
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
81
Cargo.toml
81
Cargo.toml
@@ -1,36 +1,32 @@
|
||||
[workspace]
|
||||
members = ["ds", "mls_crypto"]
|
||||
# [workspace.dependencies]
|
||||
# foundry-contracts = { path = "crates/bindings" }
|
||||
members = [
|
||||
"apps/de_mls_desktop_ui",
|
||||
"crates/de_mls_gateway",
|
||||
"crates/de_mls_ui_protocol",
|
||||
"crates/ui_bridge",
|
||||
"ds",
|
||||
"mls_crypto",
|
||||
]
|
||||
|
||||
[package]
|
||||
name = "de-mls"
|
||||
version = "1.0.0"
|
||||
name = "de_mls"
|
||||
version = "2.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "de-mls"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# foundry-contracts.workspace = true
|
||||
openmls = { version = "=0.5.0", features = ["test-utils"] }
|
||||
openmls_basic_credential = "=0.2.0"
|
||||
openmls_rust_crypto = "=0.2.0"
|
||||
openmls_traits = "=0.2.0"
|
||||
openmls = { version = "0.6.0" }
|
||||
openmls_basic_credential = "0.3.0"
|
||||
openmls_rust_crypto = "0.3.0"
|
||||
openmls_traits = "0.3.0"
|
||||
|
||||
axum = { version = "0.6.10", features = ["ws"] }
|
||||
futures = "0.3.26"
|
||||
tower-http = { version = "0.4.0", features = ["cors"] }
|
||||
tokio = { version = "=1.38.0", features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
"full",
|
||||
] }
|
||||
futures = "0.3.31"
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tokio = { version = "1.47.1", features = ["macros", "rt-multi-thread", "full"] }
|
||||
tokio-util = "0.7.13"
|
||||
alloy = { git = "https://github.com/alloy-rs/alloy", features = [
|
||||
alloy = { version = "1.0.37", features = [
|
||||
"providers",
|
||||
"node-bindings",
|
||||
"network",
|
||||
@@ -40,14 +36,14 @@ alloy = { git = "https://github.com/alloy-rs/alloy", features = [
|
||||
] }
|
||||
kameo = "0.13.0"
|
||||
|
||||
waku-bindings = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "force-cluster-15", subdir = "waku-bindings" }
|
||||
waku-sys = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "force-cluster-15", subdir = "waku-sys" }
|
||||
waku-bindings = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "rln-fix-deps" }
|
||||
waku-sys = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "rln-fix-deps" }
|
||||
|
||||
rand = "=0.8.5"
|
||||
serde_json = "=1.0"
|
||||
serde = { version = "=1.0.204", features = ["derive"] }
|
||||
tls_codec = "=0.3.0"
|
||||
chrono = "=0.4.38"
|
||||
rand = "0.8.5"
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0.163", features = ["derive"] }
|
||||
chrono = "0.4"
|
||||
sha2 = "0.10.8"
|
||||
|
||||
secp256k1 = { version = "0.30.0", features = [
|
||||
"rand",
|
||||
@@ -58,13 +54,30 @@ secp256k1 = { version = "0.30.0", features = [
|
||||
ecies = "0.2.7"
|
||||
libsecp256k1 = "0.7.1"
|
||||
|
||||
anyhow = "=1.0.81"
|
||||
thiserror = "=1.0.61"
|
||||
anyhow = "1.0.81"
|
||||
thiserror = "1.0.39"
|
||||
uuid = "1.11.0"
|
||||
bounded-vec-deque = "0.1.1"
|
||||
|
||||
env_logger = "0.11.5"
|
||||
log = "0.4.22"
|
||||
tracing = "0.1.41"
|
||||
|
||||
ds = { path = "ds" }
|
||||
mls_crypto = { path = "mls_crypto" }
|
||||
prost = "0.13.5"
|
||||
bytes = "1.10.1"
|
||||
tower-layer = "0.3.3"
|
||||
http = "1.3.1"
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = "0.13.5"
|
||||
|
||||
[profile]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
|
||||
[profile.server-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[profile.android-dev]
|
||||
inherits = "dev"
|
||||
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,25 +0,0 @@
|
||||
####################################################################################################
|
||||
## Build image
|
||||
####################################################################################################
|
||||
FROM rust:latest as builder
|
||||
|
||||
WORKDIR /app
|
||||
RUN apt-get update && apt-get install -y libssl-dev pkg-config gcc clang
|
||||
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
COPY --from=golang:1.20 /usr/local/go/ /usr/local/go/
|
||||
|
||||
# Cache build dependencies
|
||||
RUN echo "fn main() {}" > dummy.rs
|
||||
COPY ["Cargo.toml", "./Cargo.toml"]
|
||||
COPY ["ds/", "./ds/"]
|
||||
COPY ["mls_crypto/", "./mls_crypto/"]
|
||||
RUN sed -i 's#src/main.rs#dummy.rs#' Cargo.toml
|
||||
RUN cargo build --release
|
||||
RUN sed -i 's#dummy.rs#src/main.rs#' Cargo.toml
|
||||
|
||||
# Build the actual app
|
||||
COPY ["src/", "./src/"]
|
||||
RUN cargo build --release
|
||||
|
||||
CMD ["/app/target/release/de-mls"]
|
||||
201
LICENSE
201
LICENSE
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
203
LICENSE-APACHE
Normal file
203
LICENSE-APACHE
Normal file
@@ -0,0 +1,203 @@
|
||||
Copyright (c) 2022 Vac Research
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
LICENSE-MIT
Normal file
25
LICENSE-MIT
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright (c) 2022 Vac Research
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE O THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
122
README.md
122
README.md
@@ -1,30 +1,120 @@
|
||||
# de-mls
|
||||
# De-MLS
|
||||
|
||||
Decentralized MLS PoC using a smart contract for group coordination
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
> Note: The frontend implementation is based on [chatr](https://github.com/0xLaurens/chatr), a real-time chat application built with Rust and SvelteKit
|
||||
Decentralized MLS proof-of-concept that coordinates secure group membership through
|
||||
off-chain consensus and a Waku relay.
|
||||
This repository now ships a native desktop client built with Dioxus that drives the MLS core directly.
|
||||
|
||||
## Run Test Waku Node
|
||||
## What’s Included
|
||||
|
||||
- **de-mls** – core library that manages MLS groups, consensus, and Waku integration
|
||||
- **crates/de_mls_gateway** – bridges UI commands (`AppCmd`) to the core runtime and streams `AppEvent`s back
|
||||
- **crates/ui_bridge** – bootstrap glue that hosts the async command loop for desktop clients
|
||||
- **apps/de_mls_desktop_ui** – Dioxus desktop UI with login, chat, stewardship, and voting flows
|
||||
- **tests/** – integration tests that exercise the MLS state machine and consensus paths
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Launch a test Waku relay
|
||||
|
||||
Run a lightweight `nwaku` node that your local clients can connect to:
|
||||
|
||||
```bash
|
||||
docker run -p 8645:8645 -p 60000:60000 wakuorg/nwaku:v0.33.1 --cluster-id=15 --rest --relay --rln-relay=false --pubsub-topic=/waku/2/rs/15/0
|
||||
docker run \
|
||||
-p 8645:8645 \
|
||||
-p 60000:60000 \
|
||||
wakuorg/nwaku:v0.33.1 \
|
||||
--cluster-id=15 \
|
||||
--rest \
|
||||
--relay \
|
||||
--rln-relay=false \
|
||||
--pubsub-topic=/waku/2/rs/15/1
|
||||
```
|
||||
|
||||
## Run User Instance
|
||||
Take note of the node multiaddr printed in the logs (looks like `/ip4/127.0.0.1/tcp/60000/p2p/<peer-id>`).
|
||||
|
||||
Create a `.env` file in the `.env` folder for each client containing the following variables:
|
||||
### 2. Set the runtime environment
|
||||
|
||||
The desktop app reads the same environment variables the MLS core uses:
|
||||
|
||||
```bash
|
||||
export NODE_PORT=60001 # UDP/TCP port the embedded Waku client will bind to
|
||||
export PEER_ADDRESSES=/ip4/127.0.0.1/tcp/60000/p2p/<peer-id>
|
||||
export RUST_LOG=info,de_mls_gateway=info # optional; controls UI + gateway logging
|
||||
```
|
||||
|
||||
Use a unique `NODE_PORT` per local client so the embedded Waku nodes do not collide.
|
||||
`PEER_ADDRESSES` accepts a comma-separated list if you want to bootstrap from multiple relays.
|
||||
|
||||
### 3. Launch the desktop application
|
||||
|
||||
```bash
|
||||
cargo run -p de_mls_desktop_ui
|
||||
```
|
||||
|
||||
The first run creates `apps/de_mls_desktop_ui/logs/de_mls_ui.log` and starts the event bridge
|
||||
and embedded Waku client.
|
||||
Repeat steps 2–3 in another terminal with a different `NODE_PORT` to simulate multiple users.
|
||||
|
||||
## Using the Desktop UI
|
||||
|
||||
- **Login screen** – paste an Ethereum-compatible secp256k1 private key (hex, with or without `0x`)
|
||||
and click `Enter`.
|
||||
On success the app derives your wallet address, stores it in session state,
|
||||
and navigates to the home layout.
|
||||
|
||||
- **Header bar** – shows the derived address and allows runtime log-level changes (`error`→`trace`).
|
||||
Log files rotate daily under `apps/de_mls_desktop_ui/logs/`.
|
||||
|
||||
- **Groups panel** – lists every MLS group returned by the gateway.
|
||||
Use `Create` or `Join` to open a modal, enter the group name,
|
||||
and the UI automatically refreshes the list and opens the group.
|
||||
|
||||
- **Chat panel** – displays live conversation messages for the active group.
|
||||
Compose text messages at the bottom; the UI also offers:
|
||||
- `Leave group` to request a self-ban (the backend fills in your address)
|
||||
- `Request ban` to request ban for another user
|
||||
Member lists are fetched automatically when a group is opened so you can
|
||||
pick existing members from the ban modal.
|
||||
|
||||
- **Consensus panel** – keeps stewards and members aligned:
|
||||
- Shows whether you are a steward for the active group
|
||||
- Lists pending steward requests collected during the current epoch
|
||||
- Surfaces the proposal currently open for voting with `YES`/`NO` buttons
|
||||
- Stores the latest proposal decisions with timestamps for quick auditing
|
||||
|
||||
## Steward State Machine
|
||||
|
||||
- **Working** – normal mode; all MLS messages are allowed
|
||||
- **Waiting** – a steward epoch is active; only the steward may push `BATCH_PROPOSALS_MESSAGE`
|
||||
- **Voting** – the consensus phase; everyone may submit `VOTE`/`USER_VOTE`,
|
||||
the steward can still publish proposal metadata
|
||||
|
||||
Transitions:
|
||||
|
||||
```text
|
||||
NAME=client1
|
||||
BACKEND_PORT=3000
|
||||
FRONTEND_PORT=4000
|
||||
NODE_NAME=<waku-node-ip>
|
||||
Working --start_steward_epoch()--> Waiting (if proposals exist)
|
||||
Working --start_steward_epoch()--> Working (if no proposals)
|
||||
Waiting --start_voting()---------> Voting
|
||||
Waiting --no_proposals_found()---> Working
|
||||
Voting --complete_voting(YES)----> Waiting --apply_proposals()--> Working
|
||||
Voting --complete_voting(NO)-----> Working
|
||||
```
|
||||
|
||||
Run docker compose up for the user instance
|
||||
Stewards always return to `Working` after an epoch finishes;
|
||||
edge cases such as missing proposals are handled defensively with detailed tracing.
|
||||
|
||||
```bash
|
||||
docker-compose --env-file ./.env/client1.env up --build
|
||||
```
|
||||
## Development Tips
|
||||
|
||||
For each client, run the following command to start the frontend on the local host with the port specified in the `.env` file
|
||||
- `cargo test` – runs the Rust unit + integration test suite
|
||||
- `cargo fmt --all check` / `cargo clippy` – keep formatting and linting consistent with the codebase
|
||||
- `RUST_BACKTRACE=full` – helpful when debugging state-machine transitions during development
|
||||
|
||||
Logs for the desktop UI live in `apps/de_mls_desktop_ui/logs/`; core logs are emitted to stdout as well.
|
||||
|
||||
## Contributing
|
||||
|
||||
Issues and pull requests are welcome. Please include reproduction steps, relevant logs,
|
||||
and test coverage where possible.
|
||||
|
||||
32
apps/de_mls_desktop_ui/Cargo.toml
Normal file
32
apps/de_mls_desktop_ui/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "de_mls_desktop_ui"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
de_mls_ui_protocol = { path = "../../crates/de_mls_ui_protocol" }
|
||||
de_mls_gateway = { path = "../../crates/de_mls_gateway" }
|
||||
ui_bridge = { path = "../../crates/ui_bridge" }
|
||||
de_mls = { path = "../../" }
|
||||
mls_crypto = { path = "../../mls_crypto" }
|
||||
|
||||
dioxus = { version = "0.6.2", features = ["signals", "router", "desktop"] }
|
||||
dioxus-desktop = "0.6.3"
|
||||
tokio = { version = "1.47.1", features = [
|
||||
"rt-multi-thread",
|
||||
"macros",
|
||||
"sync",
|
||||
"time",
|
||||
] }
|
||||
futures = "0.3.31"
|
||||
anyhow = "1.0.100"
|
||||
thiserror = "2.0.17"
|
||||
uuid = { version = "1.18.1", features = ["v4", "serde"] }
|
||||
once_cell = "1.21.3"
|
||||
parking_lot = "0.12.5"
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["fmt", "env-filter"] }
|
||||
tracing-appender = "0.2.3"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
619
apps/de_mls_desktop_ui/assets/main.css
Normal file
619
apps/de_mls_desktop_ui/assets/main.css
Normal file
@@ -0,0 +1,619 @@
|
||||
:root {
|
||||
--bg: #0b0d10;
|
||||
--card: #14161c;
|
||||
--text: #e5e7ec;
|
||||
--muted: #9094a2;
|
||||
--primary: #00b2ff;
|
||||
--primary-2: #007ad9;
|
||||
--border: #1c1e25;
|
||||
--good: #00f5a0;
|
||||
--bad: #ff005c;
|
||||
}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
html,
|
||||
body,
|
||||
#main {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background: var(--bg);
|
||||
color: var(--text);
|
||||
font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial, Noto Sans, Apple Color Emoji, Segoe UI Emoji;
|
||||
}
|
||||
|
||||
.page {
|
||||
max-width: 1400px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin: 0 0 16px 0;
|
||||
font-size: 22px;
|
||||
}
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
padding: 8px 12px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
position: sticky;
|
||||
top: 0;
|
||||
z-index: 5;
|
||||
}
|
||||
|
||||
.header .brand {
|
||||
font-weight: 700;
|
||||
letter-spacing: .5px;
|
||||
}
|
||||
|
||||
.header .user-hint {
|
||||
color: var(--muted);
|
||||
font-size: 12px;
|
||||
padding: 4px 8px;
|
||||
border: 1px dashed var(--border);
|
||||
border-radius: 8px;
|
||||
max-width: 420px;
|
||||
}
|
||||
|
||||
.header .spacer {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.header .label {
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.header .level {
|
||||
padding: 6px 8px;
|
||||
border-radius: 8px;
|
||||
border: 1px solid var(--border);
|
||||
background: var(--card);
|
||||
color: var(--text);
|
||||
outline: none;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.page.login {
|
||||
max-width: 520px;
|
||||
margin-top: 32px;
|
||||
}
|
||||
|
||||
.form-row {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 6px;
|
||||
margin: 12px 0;
|
||||
}
|
||||
|
||||
input,
|
||||
select {
|
||||
padding: 10px 12px;
|
||||
border-radius: 8px;
|
||||
border: 1px solid var(--border);
|
||||
background: var(--card);
|
||||
color: var(--text);
|
||||
outline: none;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.input-error {
|
||||
color: var(--bad);
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
button {
|
||||
border: 1px solid var(--border);
|
||||
background: var(--card);
|
||||
color: var(--text);
|
||||
padding: 10px 14px;
|
||||
border-radius: 10px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
button.primary {
|
||||
background: var(--primary);
|
||||
border-color: var(--primary);
|
||||
color: white;
|
||||
}
|
||||
|
||||
button.primary:hover {
|
||||
background: var(--primary-2);
|
||||
}
|
||||
|
||||
button.secondary {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
button.ghost {
|
||||
background: transparent;
|
||||
border-color: var(--border);
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
button.icon {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 8px;
|
||||
}
|
||||
|
||||
button.mini {
|
||||
padding: 4px 8px;
|
||||
border-radius: 8px;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
.alerts {
|
||||
position: fixed;
|
||||
top: 64px;
|
||||
right: 24px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
z-index: 20;
|
||||
}
|
||||
|
||||
.alert {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: 10px;
|
||||
min-width: 260px;
|
||||
max-width: 420px;
|
||||
padding: 12px 14px;
|
||||
border-radius: 10px;
|
||||
border: 1px solid var(--border);
|
||||
background: var(--card);
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.alert.error {
|
||||
border-color: rgba(255, 0, 92, 0.4);
|
||||
background: rgba(255, 0, 92, 0.1);
|
||||
color: var(--bad);
|
||||
}
|
||||
|
||||
.alert .message {
|
||||
flex: 1;
|
||||
font-size: 13px;
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.member-picker {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.member-picker .helper {
|
||||
color: var(--muted);
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.member-list {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
max-height: 260px;
|
||||
overflow-y: auto;
|
||||
padding-right: 4px;
|
||||
}
|
||||
|
||||
.member-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
gap: 10px;
|
||||
background: transparent;
|
||||
box-shadow: none;
|
||||
border: none;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.member-item .member-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
gap: 10px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.member-item .member-id {
|
||||
font-size: 12px;
|
||||
font-weight: 400;
|
||||
letter-spacing: 0.02em;
|
||||
color: var(--text);
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
overflow-wrap: anywhere;
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.member-item .member-choose {
|
||||
font-size: 11px;
|
||||
font-weight: 700;
|
||||
letter-spacing: 0.04em;
|
||||
text-transform: none;
|
||||
padding: 6px 12px;
|
||||
border-radius: 999px;
|
||||
border: none;
|
||||
background: var(--primary);
|
||||
color: white;
|
||||
cursor: pointer;
|
||||
box-shadow: 0 6px 18px rgba(0, 178, 255, 0.35);
|
||||
}
|
||||
|
||||
.member-item .member-choose:hover {
|
||||
background: var(--primary-2);
|
||||
box-shadow: 0 6px 18px rgba(0, 122, 217, 0.45);
|
||||
}
|
||||
|
||||
/* Home layout */
|
||||
.page.home {
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
.layout {
|
||||
display: grid;
|
||||
grid-template-columns: 280px 1fr 500px;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.mono {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
}
|
||||
|
||||
.ellipsis {
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
|
||||
.panel {
|
||||
background: var(--card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 12px;
|
||||
padding: 12px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.panel h2 {
|
||||
margin: 0 0 6px 0;
|
||||
font-size: 18px;
|
||||
}
|
||||
|
||||
.hint {
|
||||
color: var(--muted);
|
||||
padding: 8px 0;
|
||||
}
|
||||
|
||||
.panel.groups .group-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
/* Group list a bit wider rows to align with long names */
|
||||
.group-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 10px 12px;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
}
|
||||
|
||||
.group-row .title {
|
||||
font-weight: 600;
|
||||
max-width: 220px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.panel.groups .footer {
|
||||
margin-top: auto;
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.panel.groups .footer .primary {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
/* Chat */
|
||||
.panel.chat .messages {
|
||||
min-height: 360px;
|
||||
height: 58vh;
|
||||
overflow-y: auto;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 12px;
|
||||
padding: 12px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.panel.chat .chat-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
.msg {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 4px;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.msg.me {
|
||||
align-items: flex-end;
|
||||
}
|
||||
|
||||
.msg.me .body {
|
||||
background: rgba(79, 140, 255, 0.15);
|
||||
border: 1px solid rgba(79, 140, 255, 0.35);
|
||||
padding: 8px 10px;
|
||||
border-radius: 10px;
|
||||
}
|
||||
|
||||
.msg.system {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.msg.system .body {
|
||||
font-style: italic;
|
||||
color: var(--muted);
|
||||
background: transparent;
|
||||
border: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.msg .from {
|
||||
color: var(--muted);
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
.msg .body {
|
||||
color: var(--text);
|
||||
background: rgba(255, 255, 255, 0.05);
|
||||
border: 1px solid var(--border);
|
||||
padding: 8px 10px;
|
||||
border-radius: 10px;
|
||||
}
|
||||
|
||||
.composer {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.composer input {
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.composer button {
|
||||
flex: 0 0 auto;
|
||||
}
|
||||
|
||||
/* Consensus panel */
|
||||
.panel.consensus .status {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.panel.consensus .status .good {
|
||||
color: var(--good);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.panel.consensus .status .bad {
|
||||
color: var(--bad);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(6rem, max-content) 1fr;
|
||||
align-items: start;
|
||||
gap: 8px;
|
||||
padding: 6px 8px;
|
||||
border-radius: 6px;
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item .action {
|
||||
color: var(--primary);
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item .value {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
color: var(--text);
|
||||
font-size: 12px;
|
||||
overflow-wrap: anywhere;
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item.proposal-id {
|
||||
background: rgba(0, 178, 255, 0.08);
|
||||
border-color: rgba(0, 178, 255, 0.45);
|
||||
box-shadow: inset 0 0 0 1px rgba(0, 178, 255, 0.15);
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item.proposal-id .action {
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.panel.consensus .proposal-item.proposal-id .value {
|
||||
font-weight: 700;
|
||||
font-size: 13px;
|
||||
letter-spacing: 0.03em;
|
||||
}
|
||||
|
||||
/* Consensus sections */
|
||||
.panel.consensus {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.panel.consensus .status {
|
||||
flex-shrink: 0;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
|
||||
.panel.consensus .consensus-section {
|
||||
margin: 8px 0;
|
||||
padding: 12px;
|
||||
border-radius: 10px;
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
border: 1px solid var(--border);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.panel.consensus .consensus-section h3 {
|
||||
margin: 0 0 12px 0;
|
||||
font-size: 14px;
|
||||
color: var(--primary);
|
||||
border-bottom: 1px solid var(--border);
|
||||
padding-bottom: 8px;
|
||||
}
|
||||
|
||||
.panel.consensus .no-data {
|
||||
color: var(--muted);
|
||||
font-style: italic;
|
||||
text-align: center;
|
||||
padding: 20px;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.panel.consensus .proposals-window {
|
||||
overflow-y: auto;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.panel.consensus .vote-actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
justify-content: flex-end;
|
||||
margin-top: 12px;
|
||||
}
|
||||
|
||||
/* Consensus results window */
|
||||
.panel.consensus .results-window {
|
||||
overflow-y: auto;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 8px;
|
||||
padding: 8px;
|
||||
background: rgba(255, 255, 255, 0.02);
|
||||
max-height: 200px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.panel.consensus .result-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 6px 8px;
|
||||
border-radius: 6px;
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--border);
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .proposal-id {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
color: var(--muted);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .outcome {
|
||||
font-weight: 600;
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .outcome.accepted {
|
||||
color: var(--good);
|
||||
background: rgba(23, 201, 100, 0.1);
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .outcome.rejected {
|
||||
color: var(--bad);
|
||||
background: rgba(243, 18, 96, 0.1);
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .outcome.unspecified {
|
||||
color: var(--muted);
|
||||
background: rgba(163, 167, 179, 0.1);
|
||||
}
|
||||
|
||||
.panel.consensus .result-item .timestamp {
|
||||
color: var(--muted);
|
||||
font-size: 11px;
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
}
|
||||
|
||||
/* Modal */
|
||||
.modal-backdrop {
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
background: rgba(0, 0, 0, .45);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.modal {
|
||||
width: 520px;
|
||||
max-width: calc(100vw - 32px);
|
||||
background: var(--card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 14px;
|
||||
box-shadow: 0 0 6px var(--primary);
|
||||
}
|
||||
|
||||
.modal-head {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 12px 14px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.modal-body {
|
||||
padding: 14px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
justify-content: flex-end;
|
||||
margin-top: 6px;
|
||||
}
|
||||
63
apps/de_mls_desktop_ui/src/logging.rs
Normal file
63
apps/de_mls_desktop_ui/src/logging.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use once_cell::sync::OnceCell;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use tracing_appender::rolling;
|
||||
use tracing_subscriber::{
|
||||
fmt,
|
||||
layer::SubscriberExt,
|
||||
reload::{Handle, Layer as ReloadLayer},
|
||||
util::SubscriberInitExt,
|
||||
EnvFilter, Registry,
|
||||
};
|
||||
|
||||
/// Global reload handle so the UI can change the filter at runtime.
|
||||
static RELOAD: OnceCell<Mutex<Handle<EnvFilter, Registry>>> = OnceCell::new();
|
||||
|
||||
/// Initialize logging: console + rolling daily file ("logs/de_mls_ui.log").
|
||||
/// Returns the initial level string actually applied.
|
||||
pub fn init_logging(default_level: &str) -> String {
|
||||
// Use env var if present, else the provided default
|
||||
let env_level = std::env::var("RUST_LOG").unwrap_or_else(|_| default_level.to_string());
|
||||
|
||||
// Build a reloadable EnvFilter
|
||||
let filter = EnvFilter::try_new(&env_level).unwrap_or_else(|_| EnvFilter::new("info"));
|
||||
let (reload_layer, handle) = ReloadLayer::new(filter);
|
||||
|
||||
// File sink (non-blocking)
|
||||
let file_appender = rolling::daily("logs", "de_mls_ui.log");
|
||||
let (file_writer, guard) = tracing_appender::non_blocking(file_appender);
|
||||
// Keep guard alive for the whole process to flush on drop
|
||||
Box::leak(Box::new(guard));
|
||||
|
||||
// Build the subscriber: registry + reloadable filter + console + file
|
||||
tracing_subscriber::registry()
|
||||
.with(reload_layer)
|
||||
.with(fmt::layer().with_writer(std::io::stdout)) // console
|
||||
.with(fmt::layer().with_writer(file_writer).with_ansi(false)) // file
|
||||
.init();
|
||||
|
||||
RELOAD.set(Mutex::new(handle)).ok();
|
||||
|
||||
// Return the level we consider “active” for the UI dropdown
|
||||
std::env::var("RUST_LOG").unwrap_or(env_level)
|
||||
}
|
||||
|
||||
/// Set the global log level dynamically, e.g. "error", "warn", "info", "debug", "trace",
|
||||
/// or a full filter string like "info,de_mls_gateway=debug".
|
||||
pub fn set_level(new_level: &str) -> Result<(), String> {
|
||||
let handle = RELOAD
|
||||
.get()
|
||||
.ok_or_else(|| "logger not initialized".to_string())?
|
||||
.lock()
|
||||
.map_err(|_| "reload handle poisoned".to_string())?;
|
||||
|
||||
let filter = EnvFilter::try_new(new_level)
|
||||
.map_err(|e| format!("invalid level/filter '{new_level}': {e}"))?;
|
||||
|
||||
// Replace the inner EnvFilter of the reloadable layer
|
||||
handle
|
||||
.modify(|inner: &mut EnvFilter| *inner = filter)
|
||||
.map_err(|e| format!("failed to apply filter: {e}"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
971
apps/de_mls_desktop_ui/src/main.rs
Normal file
971
apps/de_mls_desktop_ui/src/main.rs
Normal file
@@ -0,0 +1,971 @@
|
||||
// apps/de_mls_desktop_ui/src/main.rs
|
||||
#![allow(non_snake_case)]
|
||||
use dioxus::prelude::*;
|
||||
use dioxus_desktop::{launch::launch as desktop_launch, Config, LogicalSize, WindowBuilder};
|
||||
use std::sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
use de_mls::{
|
||||
bootstrap_core_from_env,
|
||||
message::convert_group_requests_to_display,
|
||||
protos::{
|
||||
consensus::v1::{Outcome, ProposalResult, VotePayload},
|
||||
de_mls::messages::v1::ConversationMessage,
|
||||
},
|
||||
};
|
||||
use de_mls_gateway::GATEWAY;
|
||||
use de_mls_ui_protocol::v1::{AppCmd, AppEvent};
|
||||
use mls_crypto::normalize_wallet_address_str;
|
||||
|
||||
mod logging;
|
||||
|
||||
static CSS: Asset = asset!("/assets/main.css");
|
||||
static NEXT_ALERT_ID: AtomicU64 = AtomicU64::new(1);
|
||||
const MAX_VISIBLE_ALERTS: usize = 5;
|
||||
|
||||
// Helper function to format timestamps
|
||||
fn format_timestamp(timestamp_ms: u64) -> String {
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
// Convert to SystemTime and format
|
||||
let timestamp = UNIX_EPOCH + std::time::Duration::from_secs(timestamp_ms);
|
||||
let datetime: chrono::DateTime<chrono::Utc> = timestamp.into();
|
||||
datetime.format("%H:%M:%S").to_string()
|
||||
}
|
||||
|
||||
// ─────────────────────────── App state ───────────────────────────
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
struct SessionState {
|
||||
address: String,
|
||||
key: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
struct GroupsState {
|
||||
items: Vec<String>, // names only
|
||||
loaded: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
struct ChatState {
|
||||
opened_group: Option<String>, // which group is “Open” in the UI
|
||||
messages: Vec<ConversationMessage>, // all messages; filtered per view
|
||||
members: Vec<String>, // cached member addresses for opened group
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
struct ConsensusState {
|
||||
is_steward: bool,
|
||||
pending: Option<VotePayload>, // active/pending proposal for opened group
|
||||
// Store results with timestamps for better display
|
||||
latest_results: Vec<(u32, Outcome, u64)>, // (vote_id, result, timestamp_ms)
|
||||
// Store current epoch proposals for stewards
|
||||
current_epoch_proposals: Vec<(String, String)>, // (action, address) pairs
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
struct Alert {
|
||||
id: u64,
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
struct AlertsState {
|
||||
errors: Vec<Alert>,
|
||||
}
|
||||
|
||||
fn record_error(alerts: &mut Signal<AlertsState>, message: impl Into<String>) {
|
||||
let raw = message.into();
|
||||
let summary = summarize_error(&raw);
|
||||
tracing::error!("ui error: {}", raw);
|
||||
let id = NEXT_ALERT_ID.fetch_add(1, Ordering::Relaxed);
|
||||
let mut state = alerts.write();
|
||||
state.errors.push(Alert {
|
||||
id,
|
||||
message: summary,
|
||||
});
|
||||
if state.errors.len() > MAX_VISIBLE_ALERTS {
|
||||
state.errors.remove(0);
|
||||
}
|
||||
}
|
||||
|
||||
fn dismiss_error(alerts: &mut Signal<AlertsState>, alert_id: u64) {
|
||||
alerts.write().errors.retain(|alert| alert.id != alert_id);
|
||||
}
|
||||
|
||||
fn summarize_error(raw: &str) -> String {
|
||||
let mut summary = raw
|
||||
.lines()
|
||||
.next()
|
||||
.map(|line| line.trim().to_string())
|
||||
.unwrap_or_else(|| raw.trim().to_string());
|
||||
const MAX_LEN: usize = 160;
|
||||
if summary.len() > MAX_LEN {
|
||||
summary.truncate(MAX_LEN.saturating_sub(1));
|
||||
summary.push('…');
|
||||
}
|
||||
if summary.is_empty() {
|
||||
"Unexpected error".to_string()
|
||||
} else {
|
||||
summary
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────── Routing ───────────────────────────
|
||||
|
||||
#[derive(Routable, Clone, PartialEq)]
|
||||
enum Route {
|
||||
#[route("/")]
|
||||
Login,
|
||||
#[route("/home")]
|
||||
Home, // unified page
|
||||
}
|
||||
|
||||
// ─────────────────────────── Entry ───────────────────────────
|
||||
|
||||
fn main() {
|
||||
let initial_level = logging::init_logging("info");
|
||||
tracing::info!("🚀 DE-MLS Desktop UI starting… level={}", initial_level);
|
||||
|
||||
// Build a small RT to run the async bootstrap before the UI
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("rt");
|
||||
|
||||
rt.block_on(async {
|
||||
let boot = bootstrap_core_from_env()
|
||||
.await
|
||||
.expect("bootstrap_core_from_env failed");
|
||||
// hand CoreCtx to the gateway via the UI bridge
|
||||
ui_bridge::start_ui_bridge(boot.core.clone());
|
||||
boot.core
|
||||
});
|
||||
|
||||
let config = Config::new().with_window(
|
||||
WindowBuilder::new()
|
||||
.with_title("DE-MLS Desktop UI")
|
||||
.with_inner_size(LogicalSize::new(1280, 820))
|
||||
.with_resizable(true),
|
||||
);
|
||||
|
||||
tracing::info!("Launching desktop application");
|
||||
desktop_launch(App, vec![], vec![Box::new(config)]);
|
||||
}
|
||||
|
||||
fn App() -> Element {
|
||||
use_context_provider(|| Signal::new(AlertsState::default()));
|
||||
use_context_provider(|| Signal::new(SessionState::default()));
|
||||
use_context_provider(|| Signal::new(GroupsState::default()));
|
||||
use_context_provider(|| Signal::new(ChatState::default()));
|
||||
use_context_provider(|| Signal::new(ConsensusState::default()));
|
||||
|
||||
rsx! {
|
||||
document::Stylesheet { href: CSS }
|
||||
HeaderBar {}
|
||||
AlertsCenter {}
|
||||
Router::<Route> {}
|
||||
}
|
||||
}
|
||||
|
||||
fn HeaderBar() -> Element {
|
||||
// local signal to reflect current level in the select
|
||||
let mut level = use_signal(|| std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()));
|
||||
let session = use_context::<Signal<SessionState>>();
|
||||
let my_addr = session.read().address.clone();
|
||||
|
||||
let on_change = {
|
||||
move |evt: FormEvent| {
|
||||
let new_val = evt.value();
|
||||
if let Err(e) = crate::logging::set_level(&new_val) {
|
||||
tracing::warn!("failed to set log level: {}", e);
|
||||
} else {
|
||||
level.set(new_val);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
div { class: "header",
|
||||
div { class: "brand", "DE-MLS" }
|
||||
if !my_addr.is_empty() {
|
||||
span { class: "user-hint mono ellipsis", title: "{my_addr}", "{my_addr}" }
|
||||
}
|
||||
div { class: "spacer" }
|
||||
label { class: "label", "Log level" }
|
||||
select {
|
||||
class: "level",
|
||||
value: "{level}",
|
||||
oninput: on_change,
|
||||
option { value: "error", "error" }
|
||||
option { value: "warn", "warn" }
|
||||
option { value: "info", "info" }
|
||||
option { value: "debug", "debug" }
|
||||
option { value: "trace", "trace" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────── Pages ───────────────────────────
|
||||
|
||||
fn Login() -> Element {
|
||||
let nav = use_navigator();
|
||||
let mut session = use_context::<Signal<SessionState>>();
|
||||
let mut key = use_signal(String::new);
|
||||
let mut alerts = use_context::<Signal<AlertsState>>();
|
||||
|
||||
// Local single-consumer loop: only Login() steals LoggedIn events
|
||||
use_future({
|
||||
move || async move {
|
||||
loop {
|
||||
match GATEWAY.next_event().await {
|
||||
Some(AppEvent::LoggedIn(name)) => {
|
||||
session.write().address = name;
|
||||
nav.replace(Route::Home);
|
||||
break;
|
||||
}
|
||||
Some(AppEvent::Error(error)) => {
|
||||
record_error(&mut alerts, error);
|
||||
}
|
||||
Some(other) => {
|
||||
tracing::debug!("login view ignored event: {:?}", other);
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let oninput_key = { move |e: FormEvent| key.set(e.value()) };
|
||||
|
||||
let mut on_submit = move |_| {
|
||||
let k = key.read().trim().to_string();
|
||||
if k.is_empty() {
|
||||
return;
|
||||
}
|
||||
session.write().key = k.clone();
|
||||
spawn(async move {
|
||||
let _ = GATEWAY.send(AppCmd::Login { private_key: k }).await;
|
||||
});
|
||||
};
|
||||
|
||||
rsx! {
|
||||
div { class: "page login",
|
||||
h1 { "DE-MLS — Login" }
|
||||
div { class: "form-row",
|
||||
label { "Private key" }
|
||||
input {
|
||||
r#type: "password",
|
||||
value: "{key}",
|
||||
oninput: oninput_key,
|
||||
placeholder: "0x...",
|
||||
}
|
||||
}
|
||||
button { class: "primary", onclick: move |_| { on_submit(()); }, "Enter" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn Home() -> Element {
|
||||
let mut groups = use_context::<Signal<GroupsState>>();
|
||||
let mut chat = use_context::<Signal<ChatState>>();
|
||||
let mut cons = use_context::<Signal<ConsensusState>>();
|
||||
let mut alerts = use_context::<Signal<AlertsState>>();
|
||||
|
||||
use_future({
|
||||
move || async move {
|
||||
if !groups.read().loaded {
|
||||
let _ = GATEWAY.send(AppCmd::ListGroups).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Local event loop for handling events from the gateway
|
||||
use_future({
|
||||
move || async move {
|
||||
loop {
|
||||
match GATEWAY.next_event().await {
|
||||
Some(AppEvent::StewardStatus {
|
||||
group_id,
|
||||
is_steward,
|
||||
}) => {
|
||||
// only update if it is the currently opened group
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
cons.write().is_steward = is_steward;
|
||||
}
|
||||
}
|
||||
Some(AppEvent::CurrentEpochProposals {
|
||||
group_id,
|
||||
proposals,
|
||||
}) => {
|
||||
// only update if it is the currently opened group
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
cons.write().current_epoch_proposals = proposals;
|
||||
}
|
||||
}
|
||||
Some(AppEvent::GroupMembers { group_id, members }) => {
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
chat.write().members = members;
|
||||
}
|
||||
}
|
||||
Some(AppEvent::ProposalAdded {
|
||||
group_id,
|
||||
action,
|
||||
address,
|
||||
}) => {
|
||||
// only update if it is the currently opened group
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
// Avoid duplicates: do not enqueue if the same (action, address) already exists
|
||||
let exists = {
|
||||
cons.read().current_epoch_proposals.iter().any(|(a, addr)| {
|
||||
a == &action && addr.eq_ignore_ascii_case(&address)
|
||||
})
|
||||
};
|
||||
if !exists {
|
||||
cons.write().current_epoch_proposals.push((action, address));
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(AppEvent::CurrentEpochProposalsCleared { group_id }) => {
|
||||
// only update if it is the currently opened group
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
cons.write().current_epoch_proposals.clear();
|
||||
}
|
||||
}
|
||||
Some(AppEvent::Groups(names)) => {
|
||||
groups.write().items = names;
|
||||
groups.write().loaded = true;
|
||||
}
|
||||
Some(AppEvent::ChatMessage(msg)) => {
|
||||
chat.write().messages.push(msg);
|
||||
}
|
||||
Some(AppEvent::VoteRequested(vp)) => {
|
||||
let opened = chat.read().opened_group.clone();
|
||||
if opened.as_deref() == Some(vp.group_id.as_str()) {
|
||||
cons.write().pending = Some(vp);
|
||||
}
|
||||
}
|
||||
Some(AppEvent::ProposalDecided(ProposalResult {
|
||||
group_id,
|
||||
proposal_id,
|
||||
outcome,
|
||||
decided_at_ms,
|
||||
})) => {
|
||||
if chat.read().opened_group.as_deref() == Some(group_id.as_str()) {
|
||||
cons.write().latest_results.push((
|
||||
proposal_id,
|
||||
Outcome::try_from(outcome).unwrap_or(Outcome::Unspecified),
|
||||
decided_at_ms,
|
||||
));
|
||||
}
|
||||
cons.write().pending = None;
|
||||
}
|
||||
Some(AppEvent::GroupRemoved(name)) => {
|
||||
let mut g = groups.write();
|
||||
g.items.retain(|n| n != &name);
|
||||
if chat.read().opened_group.as_deref() == Some(name.as_str()) {
|
||||
chat.write().opened_group = None;
|
||||
chat.write().members.clear();
|
||||
}
|
||||
}
|
||||
Some(AppEvent::Error(error)) => {
|
||||
record_error(&mut alerts, error);
|
||||
}
|
||||
Some(_) => {}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
rsx! {
|
||||
div { class: "page home",
|
||||
div { class: "layout",
|
||||
GroupListSection {}
|
||||
ChatSection {}
|
||||
ConsensusSection {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn AlertsCenter() -> Element {
|
||||
let alerts = use_context::<Signal<AlertsState>>();
|
||||
let items = alerts.read().errors.clone();
|
||||
rsx! {
|
||||
div { class: "alerts",
|
||||
for alert in items.iter() {
|
||||
AlertItem {
|
||||
key: "{alert.id}",
|
||||
alert_id: alert.id,
|
||||
message: alert.message.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Props, PartialEq, Clone)]
|
||||
struct AlertItemProps {
|
||||
alert_id: u64,
|
||||
message: String,
|
||||
}
|
||||
|
||||
fn AlertItem(props: AlertItemProps) -> Element {
|
||||
let mut alerts = use_context::<Signal<AlertsState>>();
|
||||
let alert_id = props.alert_id;
|
||||
let message = props.message.clone();
|
||||
let dismiss = move |_| {
|
||||
dismiss_error(&mut alerts, alert_id);
|
||||
};
|
||||
|
||||
rsx! {
|
||||
div { class: "alert error",
|
||||
span { class: "message", "{message}" }
|
||||
button { class: "ghost icon", onclick: dismiss, "✕" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────── Sections ───────────────────────────
|
||||
|
||||
fn GroupListSection() -> Element {
|
||||
let groups_state = use_context::<Signal<GroupsState>>();
|
||||
let mut chat = use_context::<Signal<ChatState>>();
|
||||
let mut show_modal = use_signal(|| false);
|
||||
let mut new_name = use_signal(String::new);
|
||||
let mut create_mode = use_signal(|| true); // true=create, false=join
|
||||
|
||||
let items_snapshot: Vec<String> = groups_state.read().items.clone();
|
||||
let loaded = groups_state.read().loaded;
|
||||
|
||||
let mut open_group = {
|
||||
move |name: String| {
|
||||
chat.write().opened_group = Some(name.clone());
|
||||
chat.write().members.clear();
|
||||
let group_id = name.clone();
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::EnterGroup {
|
||||
group_id: group_id.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::LoadHistory {
|
||||
group_id: group_id.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::GetStewardStatus {
|
||||
group_id: group_id.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::GetCurrentEpochProposals {
|
||||
group_id: group_id.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::GetGroupMembers {
|
||||
group_id: group_id.clone(),
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let mut modal_submit = {
|
||||
move |_| {
|
||||
let name = new_name.read().trim().to_string();
|
||||
if name.is_empty() {
|
||||
return;
|
||||
}
|
||||
let action_name = name.clone();
|
||||
if *create_mode.read() {
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::CreateGroup {
|
||||
name: action_name.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY.send(AppCmd::ListGroups).await;
|
||||
});
|
||||
} else {
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::JoinGroup {
|
||||
name: action_name.clone(),
|
||||
})
|
||||
.await;
|
||||
let _ = GATEWAY.send(AppCmd::ListGroups).await;
|
||||
});
|
||||
}
|
||||
open_group(name);
|
||||
new_name.set(String::new());
|
||||
show_modal.set(false);
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
div { class: "panel groups",
|
||||
h2 { "Groups" }
|
||||
|
||||
if !loaded {
|
||||
div { class: "hint", "Loading groups…" }
|
||||
} else if items_snapshot.is_empty() {
|
||||
div { class: "hint", "No groups yet." }
|
||||
} else {
|
||||
ul { class: "group-list",
|
||||
for name in items_snapshot.into_iter() {
|
||||
li {
|
||||
key: "{name}",
|
||||
class: "group-row",
|
||||
div { class: "title", "{name}" }
|
||||
button {
|
||||
class: "secondary",
|
||||
onclick: move |_| { open_group(name.clone()); },
|
||||
"Open"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
div { class: "footer",
|
||||
button { class: "primary", onclick: move |_| { create_mode.set(true); show_modal.set(true); }, "Create" }
|
||||
button { class: "primary", onclick: move |_| { create_mode.set(false); show_modal.set(true); }, "Join" }
|
||||
}
|
||||
|
||||
if *show_modal.read() {
|
||||
Modal {
|
||||
title: if *create_mode.read() { "Create Group".to_string() } else { "Join Group".to_string() },
|
||||
on_close: move || { show_modal.set(false); },
|
||||
div { class: "form-row",
|
||||
label { "Group name" }
|
||||
input {
|
||||
r#type: "text",
|
||||
value: "{new_name}",
|
||||
oninput: move |e| new_name.set(e.value()),
|
||||
placeholder: "mls-devs",
|
||||
}
|
||||
}
|
||||
|
||||
div { class: "actions",
|
||||
button { class: "primary", onclick: move |_| { modal_submit(()); }, "Confirm" }
|
||||
button { class: "ghost", onclick: move |_| { show_modal.set(false); }, "Cancel" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ChatSection() -> Element {
|
||||
let chat = use_context::<Signal<ChatState>>();
|
||||
let session = use_context::<Signal<SessionState>>();
|
||||
let mut msg_input = use_signal(String::new);
|
||||
let mut show_ban_modal = use_signal(|| false);
|
||||
let mut ban_address = use_signal(String::new);
|
||||
let mut ban_error = use_signal(|| Option::<String>::None);
|
||||
|
||||
let send_msg = {
|
||||
move |_| {
|
||||
let text = msg_input.read().trim().to_string();
|
||||
if text.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Some(gid) = chat.read().opened_group.clone() else {
|
||||
return;
|
||||
};
|
||||
|
||||
msg_input.set(String::new());
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::SendMessage {
|
||||
group_id: gid,
|
||||
body: text,
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let open_ban_modal = {
|
||||
move |_| {
|
||||
if let Some(gid) = chat.read().opened_group.clone() {
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::GetGroupMembers {
|
||||
group_id: gid.clone(),
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
ban_error.set(None);
|
||||
show_ban_modal.set(true);
|
||||
}
|
||||
};
|
||||
|
||||
let submit_ban_request = {
|
||||
move |_| {
|
||||
let raw = ban_address.read().to_string();
|
||||
let target = match normalize_wallet_address_str(&raw) {
|
||||
Ok(addr) => addr,
|
||||
Err(err) => {
|
||||
ban_error.set(Some(err.to_string()));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let opened = chat.read().opened_group.clone();
|
||||
let Some(group_id) = opened else {
|
||||
return;
|
||||
};
|
||||
|
||||
ban_error.set(None);
|
||||
show_ban_modal.set(false);
|
||||
ban_address.set(String::new());
|
||||
|
||||
let addr_to_ban = target.clone();
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::SendBanRequest {
|
||||
group_id: group_id.clone(),
|
||||
user_to_ban: addr_to_ban,
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let oninput_ban_address = {
|
||||
move |e: FormEvent| {
|
||||
ban_error.set(None);
|
||||
ban_address.set(e.value())
|
||||
}
|
||||
};
|
||||
|
||||
let close_ban_modal = {
|
||||
move || {
|
||||
ban_address.set(String::new());
|
||||
ban_error.set(None);
|
||||
show_ban_modal.set(false);
|
||||
}
|
||||
};
|
||||
|
||||
let cancel_ban_modal = {
|
||||
move |_| {
|
||||
ban_address.set(String::new());
|
||||
ban_error.set(None);
|
||||
show_ban_modal.set(false);
|
||||
}
|
||||
};
|
||||
|
||||
let msgs_for_group = {
|
||||
let opened = chat.read().opened_group.clone();
|
||||
chat.read()
|
||||
.messages
|
||||
.iter()
|
||||
.filter(|m| Some(m.group_name.as_str()) == opened.as_deref())
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let my_name = Arc::new(session.read().address.clone());
|
||||
let my_name_for_leave = my_name.clone();
|
||||
|
||||
let members_snapshot = chat.read().members.clone();
|
||||
let my_address = (*my_name).clone();
|
||||
let selectable_members: Vec<String> = members_snapshot
|
||||
.into_iter()
|
||||
.filter(|member| !member.eq_ignore_ascii_case(&my_address))
|
||||
.collect();
|
||||
|
||||
let pick_member_handler = {
|
||||
move |member: String| {
|
||||
move |_| {
|
||||
ban_error.set(None);
|
||||
ban_address.set(member.clone());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
div { class: "panel chat",
|
||||
div { class: "chat-header",
|
||||
h2 { "Chat" }
|
||||
if let Some(gid) = chat.read().opened_group.clone() {
|
||||
button {
|
||||
class: "ghost mini",
|
||||
onclick: move |_| {
|
||||
let group_id = gid.clone();
|
||||
let addr = my_name_for_leave.clone();
|
||||
// Send a self-ban (leave) request: requester filled by backend
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::SendBanRequest { group_id: group_id.clone(), user_to_ban: (*addr).clone() })
|
||||
.await;
|
||||
});
|
||||
},
|
||||
"Leave group"
|
||||
}
|
||||
button {
|
||||
class: "ghost mini",
|
||||
onclick: open_ban_modal,
|
||||
"Request ban"
|
||||
}
|
||||
}
|
||||
}
|
||||
if chat.read().opened_group.is_none() {
|
||||
div { class: "hint", "Pick a group to chat." }
|
||||
} else {
|
||||
div { class: "messages",
|
||||
for (i, m) in msgs_for_group.iter().enumerate() {
|
||||
if (*my_name).clone() == m.sender || m.sender.eq_ignore_ascii_case("me") {
|
||||
div { key: "{i}", class: "msg me",
|
||||
span { class: "from", "{m.sender}" }
|
||||
span { class: "body", "{String::from_utf8_lossy(&m.message)}" }
|
||||
}
|
||||
} else if m.sender.eq_ignore_ascii_case("system") {
|
||||
div { key: "{i}", class: "msg system",
|
||||
span { class: "body", "{String::from_utf8_lossy(&m.message)}" }
|
||||
}
|
||||
} else {
|
||||
div { key: "{i}", class: "msg",
|
||||
span { class: "from", "{m.sender}" }
|
||||
span { class: "body", "{String::from_utf8_lossy(&m.message)}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "composer",
|
||||
input {
|
||||
r#type: "text",
|
||||
value: "{msg_input}",
|
||||
oninput: move |e| msg_input.set(e.value()),
|
||||
placeholder: "Type a message…",
|
||||
}
|
||||
button { class: "primary", onclick: send_msg, "Send" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *show_ban_modal.read() {
|
||||
Modal {
|
||||
title: "Request user ban".to_string(),
|
||||
on_close: close_ban_modal,
|
||||
div { class: "form-row",
|
||||
label { "User address" }
|
||||
input {
|
||||
r#type: "text",
|
||||
value: "{ban_address}",
|
||||
oninput: oninput_ban_address,
|
||||
placeholder: "0x...",
|
||||
}
|
||||
if let Some(error) = &*ban_error.read() {
|
||||
span { class: "input-error", "{error}" }
|
||||
}
|
||||
}
|
||||
if selectable_members.is_empty() {
|
||||
div { class: "hint muted", "No members loaded yet." }
|
||||
} else {
|
||||
div { class: "member-picker",
|
||||
span { class: "helper", "Or pick a member:" }
|
||||
div { class: "member-list",
|
||||
for member in selectable_members.iter() {
|
||||
div {
|
||||
key: "{member}",
|
||||
class: "member-item",
|
||||
div { class: "member-actions",
|
||||
span { class: "member-id mono", "{member}" }
|
||||
button {
|
||||
class: "member-choose",
|
||||
onclick: pick_member_handler(member.clone()),
|
||||
"Choose"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "actions",
|
||||
button { class: "primary", onclick: submit_ban_request, "Submit" }
|
||||
button {
|
||||
class: "ghost",
|
||||
onclick: cancel_ban_modal,
|
||||
"Cancel"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ConsensusSection() -> Element {
|
||||
let chat = use_context::<Signal<ChatState>>();
|
||||
let mut cons = use_context::<Signal<ConsensusState>>();
|
||||
|
||||
let vote_yes = {
|
||||
move |_| {
|
||||
let pending_proposal = cons.read().pending.clone();
|
||||
if let Some(v) = pending_proposal {
|
||||
// Clear the pending proposal immediately to close the vote window
|
||||
cons.write().pending = None;
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::Vote {
|
||||
group_id: v.group_id.clone(),
|
||||
proposal_id: v.proposal_id,
|
||||
choice: true,
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
let vote_no = {
|
||||
move |_| {
|
||||
let pending_proposal = cons.read().pending.clone();
|
||||
if let Some(v) = pending_proposal {
|
||||
// Clear the pending proposal immediately to close the vote window
|
||||
cons.write().pending = None;
|
||||
spawn(async move {
|
||||
let _ = GATEWAY
|
||||
.send(AppCmd::Vote {
|
||||
group_id: v.group_id.clone(),
|
||||
proposal_id: v.proposal_id,
|
||||
choice: false,
|
||||
})
|
||||
.await;
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let opened = chat.read().opened_group.clone();
|
||||
let pending = cons
|
||||
.read()
|
||||
.pending
|
||||
.clone()
|
||||
.filter(|p| Some(p.group_id.as_str()) == opened.as_deref());
|
||||
|
||||
rsx! {
|
||||
div { class: "panel consensus",
|
||||
h2 { "Consensus" }
|
||||
|
||||
if let Some(_group) = opened {
|
||||
// Steward status
|
||||
div { class: "status",
|
||||
span { class: "muted", "You are " }
|
||||
if cons.read().is_steward {
|
||||
span { class: "good", "a steward" }
|
||||
} else {
|
||||
span { class: "bad", "not a steward" }
|
||||
}
|
||||
}
|
||||
|
||||
// Pending Requests section
|
||||
div { class: "consensus-section",
|
||||
h3 { "Pending Requests" }
|
||||
if cons.read().is_steward && !cons.read().current_epoch_proposals.is_empty() {
|
||||
div { class: "proposals-window",
|
||||
for (action, address) in &cons.read().current_epoch_proposals {
|
||||
div { class: "proposal-item",
|
||||
span { class: "action", "{action}:" }
|
||||
span { class: "value", "{address}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
div { class: "no-data", "No pending requests" }
|
||||
}
|
||||
}
|
||||
|
||||
// Proposal for Vote section
|
||||
div { class: "consensus-section",
|
||||
h3 { "Proposal for Vote" }
|
||||
if let Some(v) = pending {
|
||||
div { class: "proposals-window",
|
||||
div { class: "proposal-item proposal-id",
|
||||
span { class: "action", "Proposal ID:" }
|
||||
span { class: "value", "{v.proposal_id}" }
|
||||
}
|
||||
for (action, id) in convert_group_requests_to_display(&v.group_requests) {
|
||||
div { class: "proposal-item",
|
||||
span { class: "action", "{action}:" }
|
||||
span { class: "value", "{id}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "vote-actions",
|
||||
button { class: "primary", onclick: vote_yes, "YES" }
|
||||
button { class: "ghost", onclick: vote_no, "NO" }
|
||||
}
|
||||
} else {
|
||||
div { class: "no-data", "No proposal for vote" }
|
||||
}
|
||||
}
|
||||
|
||||
// Latest Decisions section
|
||||
div { class: "consensus-section",
|
||||
h3 { "Latest Decisions" }
|
||||
if cons.read().latest_results.is_empty() {
|
||||
div { class: "no-data", "No latest decisions" }
|
||||
} else {
|
||||
div { class: "results-window",
|
||||
for (vid, res, timestamp_ms) in cons.read().latest_results.iter().rev() {
|
||||
div { class: "result-item",
|
||||
span { class: "proposal-id", "{vid}" }
|
||||
span {
|
||||
class: match res {
|
||||
Outcome::Accepted => "outcome accepted",
|
||||
Outcome::Rejected => "outcome rejected",
|
||||
Outcome::Unspecified => "outcome unspecified",
|
||||
},
|
||||
match res {
|
||||
Outcome::Accepted => "Accepted",
|
||||
Outcome::Rejected => "Rejected",
|
||||
Outcome::Unspecified => "Unspecified",
|
||||
}
|
||||
}
|
||||
span { class: "timestamp",
|
||||
"{format_timestamp(*timestamp_ms)}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
div { class: "hint", "Open a group to see proposals & voting." }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────── Modal ───────────────────────────
|
||||
|
||||
#[derive(Props, PartialEq, Clone)]
|
||||
struct ModalProps {
|
||||
title: String,
|
||||
children: Element,
|
||||
on_close: EventHandler,
|
||||
}
|
||||
fn Modal(props: ModalProps) -> Element {
|
||||
rsx! {
|
||||
div { class: "modal-backdrop", onclick: move |_| (props.on_close)(()),
|
||||
div { class: "modal", onclick: move |e| e.stop_propagation(),
|
||||
div { class: "modal-head",
|
||||
h3 { "{props.title}" }
|
||||
button { class: "icon", onclick: move |_| (props.on_close)(()), "✕" }
|
||||
}
|
||||
div { class: "modal-body", {props.children} }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
11
build.rs
Normal file
11
build.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
fn main() -> Result<(), std::io::Error> {
|
||||
prost_build::compile_protos(
|
||||
&[
|
||||
"src/protos/messages/v1/consensus.proto",
|
||||
"src/protos/messages/v1/welcome.proto",
|
||||
"src/protos/messages/v1/application.proto",
|
||||
],
|
||||
&["src/protos/"],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
27
crates/de_mls_gateway/Cargo.toml
Normal file
27
crates/de_mls_gateway/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "de_mls_gateway"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.43.0", features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
"sync",
|
||||
"time",
|
||||
] }
|
||||
anyhow = "1.0.100"
|
||||
kameo = "0.13.0"
|
||||
futures = "0.3.31"
|
||||
uuid = { version = "1.18.1", features = ["v4", "serde"] }
|
||||
|
||||
de_mls_ui_protocol = { path = "../de_mls_ui_protocol" }
|
||||
ds = { path = "../../ds" }
|
||||
mls_crypto = { path = "../../mls_crypto" }
|
||||
once_cell = "1.21.3"
|
||||
parking_lot = "0.12.5"
|
||||
de_mls = { path = "../../" }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.20"
|
||||
hex = "0.4"
|
||||
151
crates/de_mls_gateway/src/forwarder.rs
Normal file
151
crates/de_mls_gateway/src/forwarder.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use ds::DeliveryService;
|
||||
use kameo::actor::ActorRef;
|
||||
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
use tracing::info;
|
||||
|
||||
use de_mls::{
|
||||
message::MessageType,
|
||||
protos::de_mls::messages::v1::{app_message, ConversationMessage},
|
||||
user::{User, UserAction},
|
||||
user_actor::LeaveGroupRequest,
|
||||
user_app_instance::CoreCtx,
|
||||
};
|
||||
use de_mls_ui_protocol::v1::AppEvent;
|
||||
|
||||
use crate::Gateway;
|
||||
|
||||
impl<DS: DeliveryService> Gateway<DS> {
|
||||
pub(crate) fn spawn_consensus_forwarder(&self, core: Arc<CoreCtx<DS>>) -> anyhow::Result<()> {
|
||||
let evt_tx = self.evt_tx.clone();
|
||||
let mut rx = core.consensus.subscribe_decisions();
|
||||
|
||||
tokio::spawn(async move {
|
||||
tracing::info!("gateway: consensus forwarder started");
|
||||
while let Ok(res) = rx.recv().await {
|
||||
let _ = evt_tx.unbounded_send(AppEvent::ProposalDecided(res));
|
||||
}
|
||||
tracing::info!("gateway: consensus forwarder ended");
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Spawn the pubsub forwarder once, after first successful login.
|
||||
pub(crate) fn spawn_waku_forwarder(&self, core: Arc<CoreCtx<DS>>, user: ActorRef<User>) {
|
||||
if self.started.swap(true, Ordering::SeqCst) {
|
||||
return;
|
||||
}
|
||||
|
||||
let evt_tx = self.evt_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut rx = core.app_state.pubsub.subscribe();
|
||||
tracing::info!("gateway: pubsub forwarder started");
|
||||
|
||||
while let Ok(pkt) = rx.recv().await {
|
||||
// fast-topic filter
|
||||
if !core.topics.contains(&pkt.group_id, &pkt.subtopic).await {
|
||||
continue;
|
||||
}
|
||||
|
||||
// hand over to user actor to decide action
|
||||
let action = match user.ask(pkt).await {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!("user.ask failed: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// route the action
|
||||
let res = match action {
|
||||
UserAction::Outbound(msg) => core
|
||||
.app_state
|
||||
.delivery
|
||||
.send(msg)
|
||||
.await
|
||||
.map(|_| ())
|
||||
.map_err(|e| anyhow::anyhow!("error sending outbound message: {e}")),
|
||||
|
||||
UserAction::SendToApp(app_msg) => {
|
||||
// voting
|
||||
let res = match &app_msg.payload {
|
||||
Some(app_message::Payload::VotePayload(vp)) => evt_tx
|
||||
.unbounded_send(AppEvent::VoteRequested(vp.clone()))
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!("error sending vote requested event: {e}")
|
||||
})
|
||||
.and_then(|_| {
|
||||
// Also clear current epoch proposals when voting starts
|
||||
evt_tx.unbounded_send(AppEvent::CurrentEpochProposalsCleared {
|
||||
group_id: vp.group_id.clone(),
|
||||
})
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!("error sending clear current epoch proposals event: {e}")
|
||||
})
|
||||
}),
|
||||
Some(app_message::Payload::ProposalAdded(pa)) => evt_tx
|
||||
.unbounded_send(AppEvent::from(pa.clone()))
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!("error sending proposal added event: {e}")
|
||||
}),
|
||||
Some(app_message::Payload::BanRequest(br)) => evt_tx
|
||||
.unbounded_send(AppEvent::from(br.clone()))
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!("error sending proposal added event (ban request): {e}")
|
||||
}),
|
||||
Some(app_message::Payload::ConversationMessage(cm)) => evt_tx
|
||||
.unbounded_send(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: cm.message.clone(),
|
||||
sender: cm.sender.clone(),
|
||||
group_name: cm.group_name.clone(),
|
||||
}))
|
||||
.map_err(|e| anyhow::anyhow!("error sending chat message: {e}")),
|
||||
_ => {
|
||||
AppEvent::Error(format!("Invalid app message: {:?}", app_msg.payload.unwrap().message_type()));
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => Err(anyhow::anyhow!("error sending app message: {e}")),
|
||||
}
|
||||
}
|
||||
|
||||
UserAction::LeaveGroup(group_name) => {
|
||||
let _ = user
|
||||
.ask(LeaveGroupRequest {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("error leaving group: {e}"));
|
||||
|
||||
core.topics.remove_many(&group_name).await;
|
||||
info!("Leave group: {:?}", &group_name);
|
||||
|
||||
let _ = evt_tx
|
||||
.unbounded_send(AppEvent::GroupRemoved(group_name.clone()))
|
||||
.map_err(|e| anyhow::anyhow!("error sending group removed event: {e}"));
|
||||
|
||||
let _ = evt_tx
|
||||
.unbounded_send(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: format!("You're removed from the group {group_name}")
|
||||
.into_bytes(),
|
||||
sender: "system".to_string(),
|
||||
group_name: group_name.clone(),
|
||||
}))
|
||||
.map_err(|e| anyhow::anyhow!("error sending chat message: {e}"));
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
UserAction::DoNothing => Ok(()),
|
||||
};
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::warn!("error handling waku action: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
tracing::info!("gateway: pubsub forwarder ended");
|
||||
});
|
||||
}
|
||||
}
|
||||
282
crates/de_mls_gateway/src/group.rs
Normal file
282
crates/de_mls_gateway/src/group.rs
Normal file
@@ -0,0 +1,282 @@
|
||||
use ds::DeliveryService;
|
||||
use std::time::Duration;
|
||||
use tracing::info;
|
||||
|
||||
use de_mls::{
|
||||
protos::de_mls::messages::v1::{app_message, BanRequest},
|
||||
steward,
|
||||
user::UserAction,
|
||||
user_actor::{
|
||||
CreateGroupRequest, GetCurrentEpochProposalsRequest, GetGroupMembersRequest,
|
||||
GetProposalsForStewardVotingRequest, IsStewardStatusRequest, SendGroupMessage,
|
||||
StartStewardEpochRequest, StewardMessageRequest, UserVoteRequest,
|
||||
},
|
||||
user_app_instance::STEWARD_EPOCH,
|
||||
};
|
||||
use de_mls_ui_protocol::v1::AppEvent;
|
||||
|
||||
use crate::Gateway;
|
||||
|
||||
impl<DS: DeliveryService> Gateway<DS> {
|
||||
pub async fn create_group(&self, group_name: String) -> anyhow::Result<()> {
|
||||
let core = self.core();
|
||||
let user = self.user()?;
|
||||
user.ask(CreateGroupRequest {
|
||||
group_name: group_name.clone(),
|
||||
is_creation: true,
|
||||
})
|
||||
.await?;
|
||||
core.topics.add_many(&group_name).await;
|
||||
core.groups.insert(group_name.clone()).await;
|
||||
info!("User start sending steward message for group {group_name:?}");
|
||||
let user_clone = user.clone();
|
||||
let group_name_clone = group_name.clone();
|
||||
let evt_tx_clone = self.evt_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(STEWARD_EPOCH));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
// Step 1: Start steward epoch - check for proposals and start epoch if needed
|
||||
let proposals_count = match user_clone
|
||||
.ask(StartStewardEpochRequest {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(count) => count,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"start steward epoch request failed for group {group_name:?}: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Step 2: Send new steward key to the waku node for new epoch
|
||||
let msg = match user_clone
|
||||
.ask(StewardMessageRequest {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(msg) => msg,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"steward message request failed for group {group_name:?}: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if let Err(e) = core.app_state.delivery.send(msg).await {
|
||||
tracing::warn!("failed to send steward message for group {group_name:?}: {e}");
|
||||
continue;
|
||||
}
|
||||
|
||||
if proposals_count == 0 {
|
||||
info!("No proposals to vote on for group: {group_name}, completing epoch without voting");
|
||||
} else {
|
||||
info!("Found {proposals_count} proposals to vote on for group: {group_name}");
|
||||
|
||||
// Step 3: Start voting process - steward gets proposals for voting
|
||||
let action = match user_clone
|
||||
.ask(GetProposalsForStewardVotingRequest {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(action) => action,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"get proposals for steward voting failed for group {group_name:?}: {e}"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Step 4: Send proposals to ws to steward to vote or do nothing if no proposals
|
||||
// After voting, steward sends vote and proposal to waku node and start consensus process
|
||||
match action {
|
||||
UserAction::SendToApp(app_msg) => {
|
||||
if let Some(app_message::Payload::VotePayload(vp)) = &app_msg.payload {
|
||||
if let Err(e) =
|
||||
evt_tx_clone.unbounded_send(AppEvent::VoteRequested(vp.clone()))
|
||||
{
|
||||
tracing::warn!("failed to send vote requested event: {e}");
|
||||
}
|
||||
|
||||
// Also clear current epoch proposals when voting starts
|
||||
if let Err(e) = evt_tx_clone.unbounded_send(
|
||||
AppEvent::CurrentEpochProposalsCleared {
|
||||
group_id: group_name.clone(),
|
||||
},
|
||||
) {
|
||||
tracing::warn!("failed to send proposals cleared event: {e}");
|
||||
}
|
||||
}
|
||||
if let Some(app_message::Payload::ProposalAdded(pa)) = &app_msg.payload
|
||||
{
|
||||
if let Err(e) =
|
||||
evt_tx_clone.unbounded_send(AppEvent::from(pa.clone()))
|
||||
{
|
||||
tracing::warn!("failed to send proposal added event: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
UserAction::DoNothing => {
|
||||
info!("No action to take for group: {group_name}");
|
||||
return Ok(());
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow::anyhow!("Invalid user action: {action}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
tracing::debug!("User started sending steward message for group {group_name_clone:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn join_group(&self, group_name: String) -> anyhow::Result<()> {
|
||||
let core = self.core();
|
||||
let user = self.user()?;
|
||||
user.ask(CreateGroupRequest {
|
||||
group_name: group_name.clone(),
|
||||
is_creation: false,
|
||||
})
|
||||
.await?;
|
||||
core.topics.add_many(&group_name).await;
|
||||
core.groups.insert(group_name.clone()).await;
|
||||
tracing::debug!("User joined group {group_name}");
|
||||
tracing::debug!(
|
||||
"User have topic for group {:?}",
|
||||
core.topics.snapshot().await
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn send_message(&self, group_name: String, message: String) -> anyhow::Result<()> {
|
||||
let core = self.core();
|
||||
let user = self.user()?;
|
||||
let pmt = user
|
||||
.ask(SendGroupMessage {
|
||||
message: message.clone().into_bytes(),
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
core.app_state.delivery.send(pmt).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn send_ban_request(
|
||||
&self,
|
||||
group_name: String,
|
||||
user_to_ban: String,
|
||||
) -> anyhow::Result<()> {
|
||||
let core = self.core();
|
||||
let user = self.user()?;
|
||||
|
||||
let ban_request = BanRequest {
|
||||
user_to_ban: user_to_ban.clone(),
|
||||
requester: String::new(),
|
||||
group_name: group_name.clone(),
|
||||
};
|
||||
|
||||
let msg = user
|
||||
.ask(de_mls::user_actor::BuildBanMessage {
|
||||
ban_request,
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
match msg {
|
||||
UserAction::Outbound(msg) => {
|
||||
core.app_state.delivery.send(msg).await?;
|
||||
}
|
||||
UserAction::SendToApp(app_msg) => {
|
||||
let event = match app_msg.payload {
|
||||
Some(app_message::Payload::ProposalAdded(ref proposal)) => {
|
||||
AppEvent::from(proposal.clone())
|
||||
}
|
||||
Some(app_message::Payload::BanRequest(ref ban_request)) => {
|
||||
AppEvent::from(ban_request.clone())
|
||||
}
|
||||
_ => return Err(anyhow::anyhow!("Invalid user action")),
|
||||
};
|
||||
self.push_event(event);
|
||||
}
|
||||
_ => return Err(anyhow::anyhow!("Invalid user action")),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn process_user_vote(
|
||||
&self,
|
||||
group_name: String,
|
||||
proposal_id: u32,
|
||||
vote: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let user = self.user()?;
|
||||
|
||||
let user_vote_result = user
|
||||
.ask(UserVoteRequest {
|
||||
group_name: group_name.clone(),
|
||||
proposal_id,
|
||||
vote,
|
||||
})
|
||||
.await?;
|
||||
if let Some(waku_msg) = user_vote_result {
|
||||
self.core().app_state.delivery.send(waku_msg).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn group_list(&self) -> Vec<String> {
|
||||
let core = self.core();
|
||||
core.groups.all().await
|
||||
}
|
||||
|
||||
pub async fn get_steward_status(&self, group_name: String) -> anyhow::Result<bool> {
|
||||
let user = self.user()?;
|
||||
let is_steward = user.ask(IsStewardStatusRequest { group_name }).await?;
|
||||
Ok(is_steward)
|
||||
}
|
||||
|
||||
/// Get current epoch proposals for the given group
|
||||
pub async fn get_current_epoch_proposals(
|
||||
&self,
|
||||
group_name: String,
|
||||
) -> anyhow::Result<Vec<(String, String)>> {
|
||||
let user = self.user()?;
|
||||
|
||||
let proposals = user
|
||||
.ask(GetCurrentEpochProposalsRequest { group_name })
|
||||
.await?;
|
||||
let display_proposals: Vec<(String, String)> = proposals
|
||||
.iter()
|
||||
.map(|proposal| match proposal {
|
||||
steward::GroupUpdateRequest::AddMember(kp) => {
|
||||
let address = format!(
|
||||
"0x{}",
|
||||
hex::encode(kp.leaf_node().credential().serialized_content())
|
||||
);
|
||||
("Add Member".to_string(), address)
|
||||
}
|
||||
steward::GroupUpdateRequest::RemoveMember(id) => {
|
||||
("Remove Member".to_string(), id.clone())
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(display_proposals)
|
||||
}
|
||||
|
||||
pub async fn get_group_members(&self, group_name: String) -> anyhow::Result<Vec<String>> {
|
||||
let user = self.user()?;
|
||||
let members = user
|
||||
.ask(GetGroupMembersRequest {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
Ok(members)
|
||||
}
|
||||
}
|
||||
138
crates/de_mls_gateway/src/lib.rs
Normal file
138
crates/de_mls_gateway/src/lib.rs
Normal file
@@ -0,0 +1,138 @@
|
||||
//! de_mls_gateway: a thin facade between UI (AppCmd/AppEvent) and the core runtime.
|
||||
//!
|
||||
//! Responsibilities:
|
||||
//! - Own a single event pipe UI <- gateway (`AppEvent`)
|
||||
//! - Provide a command entrypoint UI -> gateway (`send(AppCmd)`)
|
||||
//! - Hold references to the core context (`CoreCtx`) and current user actor
|
||||
//! - Offer small helper methods (login_with_private_key, etc.)
|
||||
use ds::{waku::WakuDeliveryService, DeliveryService};
|
||||
use futures::{
|
||||
channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender},
|
||||
StreamExt,
|
||||
};
|
||||
use kameo::actor::ActorRef;
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::RwLock;
|
||||
use std::sync::{atomic::AtomicBool, Arc};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use de_mls::{
|
||||
user::User,
|
||||
user_app_instance::{create_user_instance, CoreCtx},
|
||||
};
|
||||
use de_mls_ui_protocol::v1::{AppCmd, AppEvent};
|
||||
|
||||
mod forwarder;
|
||||
mod group;
|
||||
// Global, process-wide gateway instance
|
||||
pub static GATEWAY: Lazy<Gateway<WakuDeliveryService>> = Lazy::new(Gateway::new);
|
||||
|
||||
/// Helper to set the core context once during startup (called by ui_bridge).
|
||||
pub fn init_core(core: Arc<CoreCtx<WakuDeliveryService>>) {
|
||||
GATEWAY.set_core(core);
|
||||
}
|
||||
|
||||
pub struct Gateway<DS: DeliveryService> {
|
||||
// UI events (gateway -> UI)
|
||||
// A channel that sends AppEvents to the UI.
|
||||
evt_tx: UnboundedSender<AppEvent>,
|
||||
// A channel that receives AppEvents from the UI.
|
||||
evt_rx: Mutex<UnboundedReceiver<AppEvent>>,
|
||||
|
||||
// UI commands (UI -> gateway)
|
||||
// A channel that sends AppCommands to the gateway (ui_bridge registers the sender here).
|
||||
// It gives the UI an async door to submit AppCmds back to the gateway (`Gateway::send(AppCmd)`).
|
||||
cmd_tx: RwLock<Option<UnboundedSender<AppCmd>>>,
|
||||
|
||||
// It anchors the shared references to consensus, topics, app_state, etc.
|
||||
core: RwLock<Option<Arc<CoreCtx<DS>>>>, // set once during startup
|
||||
|
||||
// Current logged-in user actor
|
||||
user: RwLock<Option<ActorRef<User>>>,
|
||||
// Flag that guards against spawning delivery service forwarder more than once.
|
||||
// It's initialized to false and set to true after the first successful login.
|
||||
started: AtomicBool,
|
||||
}
|
||||
|
||||
impl<DS: DeliveryService> Gateway<DS> {
|
||||
fn new() -> Self {
|
||||
let (evt_tx, evt_rx) = unbounded();
|
||||
Self {
|
||||
evt_tx,
|
||||
evt_rx: Mutex::new(evt_rx),
|
||||
cmd_tx: RwLock::new(None),
|
||||
core: RwLock::new(None),
|
||||
user: RwLock::new(None),
|
||||
started: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Called once by the bootstrap (ui_bridge) to provide the core context.
|
||||
pub fn set_core(&self, core: Arc<CoreCtx<DS>>) {
|
||||
*self.core.write() = Some(core);
|
||||
}
|
||||
|
||||
pub fn core(&self) -> Arc<CoreCtx<DS>> {
|
||||
self.core
|
||||
.read()
|
||||
.as_ref()
|
||||
.expect("Gateway core not initialized")
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// ui_bridge registers its command sender so `send` can work.
|
||||
pub fn register_cmd_sink(&self, tx: UnboundedSender<AppCmd>) {
|
||||
*self.cmd_tx.write() = Some(tx);
|
||||
}
|
||||
|
||||
/// Push an event to the UI.
|
||||
pub fn push_event(&self, evt: AppEvent) {
|
||||
let _ = self.evt_tx.unbounded_send(evt);
|
||||
}
|
||||
|
||||
/// Await next event on the UI side.
|
||||
pub async fn next_event(&self) -> Option<AppEvent> {
|
||||
let mut rx = self.evt_rx.lock().await;
|
||||
rx.next().await
|
||||
}
|
||||
|
||||
/// UI convenience: enqueue a command (UI -> gateway).
|
||||
pub async fn send(&self, cmd: AppCmd) -> anyhow::Result<()> {
|
||||
if let Some(tx) = self.cmd_tx.read().clone() {
|
||||
tx.unbounded_send(cmd)
|
||||
.map_err(|e| anyhow::anyhow!("send cmd failed: {e}"))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("cmd sink not registered"))
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────── High-level helpers ───────────────────────────
|
||||
|
||||
/// Create the user actor with a private key (no group yet).
|
||||
/// Returns a derived display name (e.g., address string).
|
||||
pub async fn login_with_private_key(&self, private_key: String) -> anyhow::Result<String> {
|
||||
let core = self.core();
|
||||
let consensus_service = core.consensus.as_ref().clone();
|
||||
|
||||
let (user_ref, user_address) = create_user_instance(
|
||||
private_key.clone(),
|
||||
core.app_state.clone(),
|
||||
&consensus_service,
|
||||
)
|
||||
.await?;
|
||||
|
||||
*self.user.write() = Some(user_ref.clone());
|
||||
|
||||
self.spawn_waku_forwarder(core.clone(), user_ref.clone());
|
||||
self.spawn_consensus_forwarder(core.clone())?;
|
||||
Ok(user_address)
|
||||
}
|
||||
|
||||
/// Get a copy of the current user ref (if logged in).
|
||||
pub fn user(&self) -> anyhow::Result<ActorRef<User>> {
|
||||
self.user
|
||||
.read()
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow::anyhow!("user not logged in"))
|
||||
}
|
||||
}
|
||||
15
crates/de_mls_ui_protocol/Cargo.toml
Normal file
15
crates/de_mls_ui_protocol/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "de_mls_ui_protocol"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0.163", features = ["derive"] }
|
||||
uuid = { version = "1.18.1", features = ["v4", "serde"] }
|
||||
thiserror = "2.0.17"
|
||||
|
||||
de_mls = { path = "../../" }
|
||||
mls_crypto = { path = "../../mls_crypto" }
|
||||
127
crates/de_mls_ui_protocol/src/lib.rs
Normal file
127
crates/de_mls_ui_protocol/src/lib.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
//! UI <-> Gateway protocol (PoC)
|
||||
// crates/de_mls_ui_protocol/src/lib.rs
|
||||
pub mod v1 {
|
||||
use de_mls::{
|
||||
message::MessageType,
|
||||
protos::{
|
||||
consensus::v1::{ProposalResult, VotePayload},
|
||||
de_mls::messages::v1::{BanRequest, ConversationMessage, ProposalAdded},
|
||||
},
|
||||
};
|
||||
use mls_crypto::identity::normalize_wallet_address;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[non_exhaustive]
|
||||
pub enum AppCmd {
|
||||
Login {
|
||||
private_key: String,
|
||||
},
|
||||
ListGroups,
|
||||
CreateGroup {
|
||||
name: String,
|
||||
},
|
||||
JoinGroup {
|
||||
name: String,
|
||||
},
|
||||
EnterGroup {
|
||||
group_id: String,
|
||||
},
|
||||
SendMessage {
|
||||
group_id: String,
|
||||
body: String,
|
||||
},
|
||||
LoadHistory {
|
||||
group_id: String,
|
||||
},
|
||||
Vote {
|
||||
group_id: String,
|
||||
proposal_id: u32,
|
||||
choice: bool,
|
||||
},
|
||||
LeaveGroup {
|
||||
group_id: String,
|
||||
},
|
||||
GetStewardStatus {
|
||||
group_id: String,
|
||||
},
|
||||
GetCurrentEpochProposals {
|
||||
group_id: String,
|
||||
},
|
||||
SendBanRequest {
|
||||
group_id: String,
|
||||
user_to_ban: String,
|
||||
},
|
||||
GetGroupMembers {
|
||||
group_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[non_exhaustive]
|
||||
pub enum AppEvent {
|
||||
LoggedIn(String),
|
||||
Groups(Vec<String>),
|
||||
GroupCreated(String),
|
||||
GroupRemoved(String),
|
||||
EnteredGroup {
|
||||
group_id: String,
|
||||
},
|
||||
ChatMessage(ConversationMessage),
|
||||
LeaveGroup {
|
||||
group_id: String,
|
||||
},
|
||||
|
||||
StewardStatus {
|
||||
group_id: String,
|
||||
is_steward: bool,
|
||||
},
|
||||
|
||||
VoteRequested(VotePayload),
|
||||
ProposalDecided(ProposalResult),
|
||||
CurrentEpochProposals {
|
||||
group_id: String,
|
||||
proposals: Vec<(String, String)>,
|
||||
},
|
||||
ProposalAdded {
|
||||
group_id: String,
|
||||
action: String,
|
||||
address: String,
|
||||
},
|
||||
CurrentEpochProposalsCleared {
|
||||
group_id: String,
|
||||
},
|
||||
GroupMembers {
|
||||
group_id: String,
|
||||
members: Vec<String>,
|
||||
},
|
||||
Error(String),
|
||||
}
|
||||
|
||||
impl From<ProposalAdded> for AppEvent {
|
||||
fn from(proposal_added: ProposalAdded) -> Self {
|
||||
AppEvent::ProposalAdded {
|
||||
group_id: proposal_added.group_id.clone(),
|
||||
action: proposal_added
|
||||
.request
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.message_type()
|
||||
.to_string(),
|
||||
address: normalize_wallet_address(
|
||||
&proposal_added.request.as_ref().unwrap().wallet_address,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanRequest> for AppEvent {
|
||||
fn from(ban_request: BanRequest) -> Self {
|
||||
AppEvent::ProposalAdded {
|
||||
group_id: ban_request.group_name.clone(),
|
||||
action: "Remove Member".to_string(),
|
||||
address: ban_request.user_to_ban.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
16
crates/ui_bridge/Cargo.toml
Normal file
16
crates/ui_bridge/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "ui_bridge"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
futures = "0.3.31"
|
||||
anyhow = "1.0.100"
|
||||
uuid = { version = "1.18.1", features = ["v4", "serde"] }
|
||||
tracing = "0.1.41"
|
||||
tokio = { version = "1.47.1", features = ["macros", "rt-multi-thread"] }
|
||||
|
||||
de_mls_gateway = { path = "../de_mls_gateway" }
|
||||
de_mls_ui_protocol = { path = "../de_mls_ui_protocol" }
|
||||
ds = { path = "../../ds" }
|
||||
de_mls = { path = "../../" }
|
||||
202
crates/ui_bridge/src/lib.rs
Normal file
202
crates/ui_bridge/src/lib.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
//! ui_bridge
|
||||
//!
|
||||
//! Owns the command loop translating `AppCmd` -> core calls
|
||||
//! and pushing `AppEvent` back to the UI via the Gateway.
|
||||
//!
|
||||
//! It ensures there is a Tokio runtime (desktop app may not have one yet).
|
||||
|
||||
// crates/ui_bridge/src/lib.rs
|
||||
use futures::channel::mpsc::{unbounded, UnboundedReceiver};
|
||||
use futures::StreamExt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use de_mls::protos::de_mls::messages::v1::ConversationMessage;
|
||||
use de_mls::user_app_instance::CoreCtx;
|
||||
use de_mls_gateway::{init_core, GATEWAY};
|
||||
use de_mls_ui_protocol::v1::{AppCmd, AppEvent};
|
||||
use ds::waku::WakuDeliveryService;
|
||||
|
||||
/// Call once during process startup (before launching the Dioxus UI).
|
||||
pub fn start_ui_bridge(core: Arc<CoreCtx<WakuDeliveryService>>) {
|
||||
// 1) Give the gateway access to the core context.
|
||||
init_core(core);
|
||||
|
||||
// 2) Create a command channel UI -> gateway and register the sender.
|
||||
let (cmd_tx, cmd_rx) = unbounded::<AppCmd>();
|
||||
GATEWAY.register_cmd_sink(cmd_tx);
|
||||
|
||||
// 3) Drive the dispatcher loop on a Tokio runtime
|
||||
if let Ok(handle) = tokio::runtime::Handle::try_current() {
|
||||
handle.spawn(async move {
|
||||
if let Err(e) = ui_loop(cmd_rx).await {
|
||||
tracing::error!("ui_loop crashed: {e}");
|
||||
}
|
||||
});
|
||||
} else {
|
||||
std::thread::Builder::new()
|
||||
.name("ui-bridge".into())
|
||||
.spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("tokio runtime");
|
||||
rt.block_on(async move {
|
||||
if let Err(e) = ui_loop(cmd_rx).await {
|
||||
eprintln!("ui_loop crashed: {e:?}");
|
||||
}
|
||||
});
|
||||
})
|
||||
.expect("spawn ui-bridge");
|
||||
}
|
||||
}
|
||||
|
||||
async fn ui_loop(mut cmd_rx: UnboundedReceiver<AppCmd>) -> anyhow::Result<()> {
|
||||
while let Some(cmd) = cmd_rx.next().await {
|
||||
match cmd {
|
||||
// ───────────── Authentication / session ─────────────
|
||||
AppCmd::Login { private_key } => {
|
||||
match GATEWAY.login_with_private_key(private_key).await {
|
||||
Ok(derived_name) => GATEWAY.push_event(AppEvent::LoggedIn(derived_name)),
|
||||
Err(e) => GATEWAY.push_event(AppEvent::Error(format!("Login failed: {e}"))),
|
||||
}
|
||||
}
|
||||
|
||||
// ───────────── Groups ─────────────
|
||||
AppCmd::ListGroups => {
|
||||
let groups = GATEWAY.group_list().await;
|
||||
GATEWAY.push_event(AppEvent::Groups(groups));
|
||||
}
|
||||
|
||||
AppCmd::CreateGroup { name } => {
|
||||
GATEWAY.create_group(name.clone()).await?;
|
||||
|
||||
let groups = GATEWAY.group_list().await;
|
||||
GATEWAY.push_event(AppEvent::Groups(groups));
|
||||
}
|
||||
|
||||
AppCmd::JoinGroup { name } => {
|
||||
GATEWAY.join_group(name.clone()).await?;
|
||||
|
||||
let groups = GATEWAY.group_list().await;
|
||||
GATEWAY.push_event(AppEvent::Groups(groups));
|
||||
}
|
||||
|
||||
AppCmd::EnterGroup { group_id } => {
|
||||
GATEWAY.push_event(AppEvent::EnteredGroup { group_id });
|
||||
}
|
||||
|
||||
AppCmd::LeaveGroup { group_id } => {
|
||||
GATEWAY.push_event(AppEvent::LeaveGroup { group_id });
|
||||
}
|
||||
|
||||
AppCmd::GetGroupMembers { group_id } => {
|
||||
match GATEWAY.get_group_members(group_id.clone()).await {
|
||||
Ok(members) => {
|
||||
GATEWAY.push_event(AppEvent::GroupMembers { group_id, members });
|
||||
}
|
||||
Err(e) => {
|
||||
GATEWAY
|
||||
.push_event(AppEvent::Error(format!("Get group members failed: {e}")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AppCmd::SendBanRequest {
|
||||
group_id,
|
||||
user_to_ban,
|
||||
} => {
|
||||
if let Err(e) = GATEWAY
|
||||
.send_ban_request(group_id.clone(), user_to_ban.clone())
|
||||
.await
|
||||
{
|
||||
GATEWAY.push_event(AppEvent::Error(format!("Send ban request failed: {e}")));
|
||||
} else {
|
||||
GATEWAY.push_event(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: "You requested to leave or ban user from the group"
|
||||
.to_string()
|
||||
.into_bytes(),
|
||||
sender: "system".to_string(),
|
||||
group_name: group_id.clone(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
// ───────────── Chat ─────────────
|
||||
AppCmd::SendMessage { group_id, body } => {
|
||||
GATEWAY.push_event(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: body.as_bytes().to_vec(),
|
||||
sender: "me".to_string(),
|
||||
group_name: group_id.clone(),
|
||||
}));
|
||||
|
||||
GATEWAY.send_message(group_id, body).await?;
|
||||
}
|
||||
|
||||
AppCmd::LoadHistory { group_id } => {
|
||||
// TODO: load from storage; stub:
|
||||
GATEWAY.push_event(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: "History loaded (stub)".as_bytes().to_vec(),
|
||||
sender: "system".to_string(),
|
||||
group_name: group_id.clone(),
|
||||
}));
|
||||
}
|
||||
|
||||
// ───────────── Consensus ─────────────
|
||||
AppCmd::Vote {
|
||||
group_id,
|
||||
proposal_id,
|
||||
choice,
|
||||
} => {
|
||||
// Process the user vote:
|
||||
// if it come from the user, send the vote result to Waku
|
||||
// if it come from the steward, just process it and return None
|
||||
GATEWAY
|
||||
.process_user_vote(group_id.clone(), proposal_id, choice)
|
||||
.await?;
|
||||
|
||||
GATEWAY.push_event(AppEvent::ChatMessage(ConversationMessage {
|
||||
message: format!(
|
||||
"Your vote ({}) has been submitted for proposal {proposal_id}",
|
||||
if choice { "YES" } else { "NO" }
|
||||
)
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
sender: "system".to_string(),
|
||||
group_name: group_id.clone(),
|
||||
}));
|
||||
}
|
||||
|
||||
AppCmd::GetCurrentEpochProposals { group_id } => {
|
||||
match GATEWAY.get_current_epoch_proposals(group_id.clone()).await {
|
||||
Ok(proposals) => {
|
||||
GATEWAY.push_event(AppEvent::CurrentEpochProposals {
|
||||
group_id,
|
||||
proposals,
|
||||
});
|
||||
}
|
||||
Err(e) => GATEWAY.push_event(AppEvent::Error(format!(
|
||||
"Get current epoch proposals failed: {e}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
AppCmd::GetStewardStatus { group_id } => {
|
||||
match GATEWAY.get_steward_status(group_id.clone()).await {
|
||||
Ok(is_steward) => {
|
||||
GATEWAY.push_event(AppEvent::StewardStatus {
|
||||
group_id,
|
||||
is_steward,
|
||||
});
|
||||
}
|
||||
Err(e) => GATEWAY
|
||||
.push_event(AppEvent::Error(format!("Get steward status failed: {e}"))),
|
||||
}
|
||||
}
|
||||
|
||||
other => {
|
||||
tracing::warn!("unhandled AppCmd: {:?}", other);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
name: ${NAME}
|
||||
services:
|
||||
frontend:
|
||||
build:
|
||||
context: frontend
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- ${FRONTEND_PORT}:5173
|
||||
environment:
|
||||
- PUBLIC_API_URL=http://127.0.0.1:${BACKEND_PORT}
|
||||
- PUBLIC_WEBSOCKET_URL=ws://127.0.0.1:${BACKEND_PORT}
|
||||
depends_on:
|
||||
- backend
|
||||
|
||||
backend:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- ${BACKEND_PORT}:3000
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
- NODE=${NODE}
|
||||
@@ -6,24 +6,25 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
waku-bindings = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "force-cluster-15", subdir = "waku-bindings" }
|
||||
waku-sys = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "force-cluster-15", subdir = "waku-sys" }
|
||||
waku-bindings = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "rln-fix-deps" }
|
||||
waku-sys = { git = "https://github.com/waku-org/waku-rust-bindings.git", branch = "rln-fix-deps" }
|
||||
|
||||
tokio = { version = "=1.38.0", features = ["full"] }
|
||||
kameo = "=0.13.0"
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
kameo = "0.13.0"
|
||||
bounded-vec-deque = "0.1.1"
|
||||
|
||||
chrono = "=0.4.38"
|
||||
uuid = { version = "=1.11.0", features = [
|
||||
chrono = "0.4"
|
||||
uuid = { version = "1.11.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
|
||||
anyhow = "=1.0.81"
|
||||
thiserror = "=1.0.61"
|
||||
anyhow = "1.0.81"
|
||||
thiserror = "1.0.39"
|
||||
|
||||
serde_json = "=1.0"
|
||||
serde = "=1.0.204"
|
||||
serde_json = "1.0"
|
||||
serde = "1.0.163"
|
||||
|
||||
env_logger = "=0.11.5"
|
||||
log = "=0.4.22"
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.3.20"
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
use core::result::Result;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
str::FromStr,
|
||||
sync::{Arc, Mutex as SyncMutex},
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
use waku_bindings::*;
|
||||
|
||||
use crate::DeliveryServiceError;
|
||||
|
||||
pub const GROUP_VERSION: &str = "1";
|
||||
pub const APP_MSG_SUBTOPIC: &str = "app_msg";
|
||||
pub const COMMIT_MSG_SUBTOPIC: &str = "commit_msg";
|
||||
pub const WELCOME_SUBTOPIC: &str = "welcome";
|
||||
pub const SUBTOPICS: [&str; 3] = [APP_MSG_SUBTOPIC, COMMIT_MSG_SUBTOPIC, WELCOME_SUBTOPIC];
|
||||
|
||||
/// The pubsub topic for the Waku Node
|
||||
/// Fixed for now because nodes on the network would need to be subscribed to existing pubsub topics
|
||||
pub fn pubsub_topic() -> WakuPubSubTopic {
|
||||
"/waku/2/rs/15/0".to_string()
|
||||
}
|
||||
|
||||
/// Build the content topics for a group. Subtopics are fixed for de-mls group communication.
|
||||
///
|
||||
/// Input:
|
||||
/// - group_name: The name of the group
|
||||
/// - group_version: The version of the group
|
||||
///
|
||||
/// Returns:
|
||||
/// - content_topics: The content topics of the group
|
||||
pub fn build_content_topics(group_name: &str, group_version: &str) -> Vec<WakuContentTopic> {
|
||||
SUBTOPICS
|
||||
.iter()
|
||||
.map(|subtopic| build_content_topic(group_name, group_version, subtopic))
|
||||
.collect::<Vec<WakuContentTopic>>()
|
||||
}
|
||||
|
||||
/// Build the content topic for the given group and subtopic
|
||||
/// Input:
|
||||
/// - group_name: The name of the group
|
||||
/// - group_version: The version of the group
|
||||
/// - subtopic: The subtopic of the group
|
||||
///
|
||||
/// Returns:
|
||||
/// - content_topic: The content topic of the subtopic
|
||||
pub fn build_content_topic(
|
||||
group_name: &str,
|
||||
group_version: &str,
|
||||
subtopic: &str,
|
||||
) -> WakuContentTopic {
|
||||
WakuContentTopic {
|
||||
application_name: Cow::from(group_name.to_string()),
|
||||
version: Cow::from(group_version.to_string()),
|
||||
content_topic_name: Cow::from(subtopic.to_string()),
|
||||
encoding: Encoding::Proto,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the content filter for the given pubsub topic and content topics
|
||||
/// Input:
|
||||
/// - pubsub_topic: The pubsub topic of the Waku Node
|
||||
/// - content_topics: The content topics of the group
|
||||
///
|
||||
/// Returns:
|
||||
/// - content_filter: The content filter of the group
|
||||
pub fn content_filter(
|
||||
pubsub_topic: &WakuPubSubTopic,
|
||||
content_topics: &[WakuContentTopic],
|
||||
) -> ContentFilter {
|
||||
ContentFilter::new(Some(pubsub_topic.to_string()), content_topics.to_vec())
|
||||
}
|
||||
|
||||
/// Setup the Waku Node Handle
|
||||
/// Input:
|
||||
/// - nodes_addresses: The addresses of the nodes to connect to
|
||||
///
|
||||
/// Returns:
|
||||
/// - node_handle: The Waku Node Handle
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
pub fn setup_node_handle(
|
||||
nodes_addresses: Vec<String>,
|
||||
) -> Result<WakuNodeHandle<Running>, DeliveryServiceError> {
|
||||
let mut config = WakuNodeConfig::default();
|
||||
// Set the port to 0 to let the system choose a random port
|
||||
config.port = Some(0);
|
||||
config.log_level = Some(WakuLogLevel::Panic);
|
||||
let node_handle = waku_new(Some(config))
|
||||
.map_err(|e| DeliveryServiceError::WakuNodeAlreadyInitialized(e.to_string()))?;
|
||||
let node_handle = node_handle
|
||||
.start()
|
||||
.map_err(|e| DeliveryServiceError::WakuNodeAlreadyInitialized(e.to_string()))?;
|
||||
let content_filter = ContentFilter::new(Some(pubsub_topic()), vec![]);
|
||||
node_handle
|
||||
.relay_subscribe(&content_filter)
|
||||
.map_err(|e| DeliveryServiceError::WakuSubscribeToContentFilterError(e.to_string()))?;
|
||||
for address in nodes_addresses
|
||||
.iter()
|
||||
.map(|a| Multiaddr::from_str(a.as_str()))
|
||||
{
|
||||
let address =
|
||||
address.map_err(|e| DeliveryServiceError::FailedToParseMultiaddr(e.to_string()))?;
|
||||
let peerid = node_handle
|
||||
.add_peer(&address, ProtocolId::Relay)
|
||||
.map_err(|e| DeliveryServiceError::WakuAddPeerError(e.to_string()))?;
|
||||
node_handle
|
||||
.connect_peer_with_id(&peerid, None)
|
||||
.map_err(|e| DeliveryServiceError::WakuConnectPeerError(e.to_string()))?;
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
}
|
||||
|
||||
Ok(node_handle)
|
||||
}
|
||||
|
||||
/// Check if a content topic exists in a list of topics or if the list is empty
|
||||
pub fn match_content_topic(
|
||||
content_topics: &Arc<SyncMutex<Vec<WakuContentTopic>>>,
|
||||
topic: &WakuContentTopic,
|
||||
) -> bool {
|
||||
let locked_topics = content_topics.lock().unwrap();
|
||||
locked_topics.is_empty() || locked_topics.iter().any(|t| t == topic)
|
||||
}
|
||||
16
ds/src/error.rs
Normal file
16
ds/src/error.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum DeliveryServiceError {
|
||||
#[error("Waku publish message error: {0}")]
|
||||
WakuPublishMessageError(String),
|
||||
#[error("Waku subscribe to pubsub topic error: {0}")]
|
||||
WakuSubscribeToPubsubTopicError(String),
|
||||
#[error("Waku node already initialized: {0}")]
|
||||
WakuNodeAlreadyInitialized(String),
|
||||
#[error("Waku connect peer error: {0}")]
|
||||
WakuConnectPeerError(String),
|
||||
#[error("Waku get listen addresses error: {0}")]
|
||||
WakuGetListenAddressesError(String),
|
||||
|
||||
#[error("An unknown error occurred: {0}")]
|
||||
Other(anyhow::Error),
|
||||
}
|
||||
@@ -1,28 +1,11 @@
|
||||
pub mod ds_waku;
|
||||
pub mod waku_actor;
|
||||
pub mod error;
|
||||
pub mod topic_filter;
|
||||
pub mod transport;
|
||||
pub mod waku;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum DeliveryServiceError {
|
||||
#[error("Waku publish message error: {0}")]
|
||||
WakuPublishMessageError(String),
|
||||
#[error("Waku relay topics error: {0}")]
|
||||
WakuRelayTopicsError(String),
|
||||
#[error("Waku subscribe to group error: {0}")]
|
||||
WakuSubscribeToGroupError(String),
|
||||
#[error("Waku receive message error: {0}")]
|
||||
WakuReceiveMessageError(String),
|
||||
#[error("Waku node already initialized: {0}")]
|
||||
WakuNodeAlreadyInitialized(String),
|
||||
#[error("Waku subscribe to content filter error: {0}")]
|
||||
WakuSubscribeToContentFilterError(String),
|
||||
#[error("Waku add peer error: {0}")]
|
||||
WakuAddPeerError(String),
|
||||
#[error("Waku connect peer error: {0}")]
|
||||
WakuConnectPeerError(String),
|
||||
|
||||
#[error("Failed to parse multiaddr: {0}")]
|
||||
FailedToParseMultiaddr(String),
|
||||
|
||||
#[error("An unknown error occurred: {0}")]
|
||||
Other(anyhow::Error),
|
||||
}
|
||||
pub use error::DeliveryServiceError;
|
||||
pub use transport::{DeliveryService, InboundPacket, OutboundPacket};
|
||||
pub use waku::{
|
||||
build_content_topic, build_content_topics, pubsub_topic, APP_MSG_SUBTOPIC, GROUP_VERSION,
|
||||
SUBTOPICS, WELCOME_SUBTOPIC,
|
||||
};
|
||||
|
||||
21
ds/src/net.rs
Normal file
21
ds/src/net.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
//! Transport-agnostic network envelope.
|
||||
//!
|
||||
/// A transport-agnostic packet that should be sent to the network.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OutboundPacket {
|
||||
pub payload: Vec<u8>,
|
||||
pub subtopic: String,
|
||||
pub group_id: String,
|
||||
pub app_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl OutboundPacket {
|
||||
pub fn new(payload: Vec<u8>, subtopic: &str, group_id: &str, app_id: &[u8]) -> Self {
|
||||
Self {
|
||||
payload,
|
||||
subtopic: subtopic.to_string(),
|
||||
group_id: group_id.to_string(),
|
||||
app_id: app_id.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
62
ds/src/topic_filter.rs
Normal file
62
ds/src/topic_filter.rs
Normal file
@@ -0,0 +1,62 @@
|
||||
//! Transport-agnostic topic filter used by the app as a fast allowlist.
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::SUBTOPICS;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct TopicKey {
|
||||
pub group_id: String,
|
||||
pub subtopic: String,
|
||||
}
|
||||
|
||||
impl TopicKey {
|
||||
pub fn new(group_id: &str, subtopic: &str) -> Self {
|
||||
Self {
|
||||
group_id: group_id.to_string(),
|
||||
subtopic: subtopic.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fast allowlist for inbound routing.
|
||||
/// Internally uses a Vec and dedupes on insert.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct TopicFilter {
|
||||
list: RwLock<Vec<TopicKey>>,
|
||||
}
|
||||
|
||||
impl TopicFilter {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add all subtopics for a group.
|
||||
pub async fn add_many(&self, group_name: &str) {
|
||||
let mut w = self.list.write().await;
|
||||
for sub in SUBTOPICS {
|
||||
let k = TopicKey::new(group_name, sub);
|
||||
if !w.iter().any(|x| x == &k) {
|
||||
w.push(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove all subtopics for a group.
|
||||
pub async fn remove_many(&self, group_name: &str) {
|
||||
self.list.write().await.retain(|x| x.group_id != group_name);
|
||||
}
|
||||
|
||||
/// Membership test (first-stage filter).
|
||||
#[inline]
|
||||
pub async fn contains(&self, group_id: &str, subtopic: &str) -> bool {
|
||||
self.list
|
||||
.read()
|
||||
.await
|
||||
.iter()
|
||||
.any(|x| x.group_id == group_id && x.subtopic == subtopic)
|
||||
}
|
||||
|
||||
pub async fn snapshot(&self) -> Vec<TopicKey> {
|
||||
self.list.read().await.clone()
|
||||
}
|
||||
}
|
||||
67
ds/src/transport.rs
Normal file
67
ds/src/transport.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
//! Transport-agnostic envelopes + delivery service interface.
|
||||
|
||||
use std::future::Future;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use crate::DeliveryServiceError;
|
||||
|
||||
/// A transport-agnostic packet that should be sent to the network.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OutboundPacket {
|
||||
pub payload: Vec<u8>,
|
||||
pub subtopic: String,
|
||||
pub group_id: String,
|
||||
pub app_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl OutboundPacket {
|
||||
pub fn new(payload: Vec<u8>, subtopic: &str, group_id: &str, app_id: &[u8]) -> Self {
|
||||
Self {
|
||||
payload,
|
||||
subtopic: subtopic.to_string(),
|
||||
group_id: group_id.to_string(),
|
||||
app_id: app_id.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A transport-agnostic packet delivered from the network into the application.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct InboundPacket {
|
||||
pub payload: Vec<u8>,
|
||||
pub subtopic: String,
|
||||
pub group_id: String,
|
||||
/// Transport-provided app instance id / metadata (used for self-message filtering).
|
||||
pub app_id: Vec<u8>,
|
||||
/// Optional transport timestamp (for logging/diagnostics).
|
||||
pub timestamp: Option<i64>,
|
||||
}
|
||||
|
||||
impl InboundPacket {
|
||||
pub fn new(
|
||||
payload: Vec<u8>,
|
||||
subtopic: &str,
|
||||
group_id: &str,
|
||||
app_id: Vec<u8>,
|
||||
timestamp: Option<i64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
payload,
|
||||
subtopic: subtopic.to_string(),
|
||||
group_id: group_id.to_string(),
|
||||
app_id,
|
||||
timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DeliveryService: Send + Sync + 'static {
|
||||
/// Send a packet to the network and return a transport message id (if available).
|
||||
fn send(
|
||||
&self,
|
||||
pkt: OutboundPacket,
|
||||
) -> impl Future<Output = Result<String, DeliveryServiceError>> + Send;
|
||||
|
||||
/// Subscribe to inbound packets.
|
||||
fn subscribe(&self) -> broadcast::Receiver<InboundPacket>;
|
||||
}
|
||||
362
ds/src/waku.rs
Normal file
362
ds/src/waku.rs
Normal file
@@ -0,0 +1,362 @@
|
||||
//! Waku transport implementation and Waku-backed `DeliveryService`.
|
||||
|
||||
use std::{borrow::Cow, thread::sleep, time::Duration};
|
||||
use tokio::sync::{
|
||||
broadcast,
|
||||
mpsc::{self, Receiver, Sender},
|
||||
oneshot,
|
||||
};
|
||||
use tracing::{debug, error, info};
|
||||
use waku_bindings::{
|
||||
node::PubsubTopic,
|
||||
node::{WakuNodeConfig, WakuNodeHandle},
|
||||
waku_new, Encoding, Initialized, LibwakuResponse, Multiaddr, Running, WakuContentTopic,
|
||||
WakuEvent, WakuMessage,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
transport::{DeliveryService, InboundPacket, OutboundPacket},
|
||||
DeliveryServiceError,
|
||||
};
|
||||
|
||||
pub const GROUP_VERSION: &str = "1";
|
||||
pub const APP_MSG_SUBTOPIC: &str = "app_msg";
|
||||
pub const WELCOME_SUBTOPIC: &str = "welcome";
|
||||
pub const SUBTOPICS: [&str; 2] = [APP_MSG_SUBTOPIC, WELCOME_SUBTOPIC];
|
||||
|
||||
/// The pubsub topic for the Waku Node.
|
||||
/// Fixed for now because nodes on the network would need to be subscribed to existing pubsub topics.
|
||||
pub fn pubsub_topic() -> PubsubTopic {
|
||||
PubsubTopic::new("/waku/2/rs/15/1")
|
||||
}
|
||||
|
||||
/// Build the content topics for a group. Subtopics are fixed for de-mls group communication.
|
||||
pub fn build_content_topics(group_name: &str) -> Vec<WakuContentTopic> {
|
||||
SUBTOPICS
|
||||
.iter()
|
||||
.map(|subtopic| build_content_topic(group_name, GROUP_VERSION, subtopic))
|
||||
.collect::<Vec<WakuContentTopic>>()
|
||||
}
|
||||
|
||||
/// Build the content topic for the given group and subtopic.
|
||||
pub fn build_content_topic(
|
||||
group_name: &str,
|
||||
group_version: &str,
|
||||
subtopic: &str,
|
||||
) -> WakuContentTopic {
|
||||
WakuContentTopic {
|
||||
application_name: Cow::from(group_name.to_string()),
|
||||
version: Cow::from(group_version.to_string()),
|
||||
content_topic_name: Cow::from(subtopic.to_string()),
|
||||
encoding: Encoding::Proto,
|
||||
}
|
||||
}
|
||||
|
||||
impl From<WakuMessage> for InboundPacket {
|
||||
fn from(msg: WakuMessage) -> Self {
|
||||
InboundPacket {
|
||||
payload: msg.payload().to_vec(),
|
||||
subtopic: msg.content_topic.content_topic_name.to_string(),
|
||||
group_id: msg.content_topic.application_name.to_string(),
|
||||
app_id: msg.meta.clone(),
|
||||
timestamp: Some(msg.timestamp as i64),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OutboundPacket> for WakuMessage {
|
||||
fn from(value: OutboundPacket) -> Self {
|
||||
WakuMessage::new(
|
||||
value.payload,
|
||||
build_content_topic(&value.group_id, GROUP_VERSION, &value.subtopic),
|
||||
2,
|
||||
value.app_id,
|
||||
true,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WakuNode<State> {
|
||||
node: WakuNodeHandle<State>,
|
||||
}
|
||||
|
||||
impl WakuNode<Initialized> {
|
||||
/// Create a new WakuNode (initialized but not started).
|
||||
pub async fn new(port: usize) -> Result<WakuNode<Initialized>, DeliveryServiceError> {
|
||||
info!("Initializing waku node inside ");
|
||||
// Note: here we are auto-subscribing to the pubsub topic /waku/2/rs/15/1
|
||||
let waku = waku_new(Some(WakuNodeConfig {
|
||||
tcp_port: Some(port),
|
||||
cluster_id: Some(15),
|
||||
shards: vec![1],
|
||||
log_level: Some("FATAL"), // Supported: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL
|
||||
..Default::default()
|
||||
}))
|
||||
.await
|
||||
.map_err(|e| DeliveryServiceError::WakuNodeAlreadyInitialized(e.to_string()))?;
|
||||
info!("Waku node initialized");
|
||||
Ok(WakuNode { node: waku })
|
||||
}
|
||||
|
||||
pub async fn start_with_handler<F>(
|
||||
self,
|
||||
on_message: F,
|
||||
) -> Result<WakuNode<Running>, DeliveryServiceError>
|
||||
where
|
||||
F: Fn(WakuMessage) + Send + Sync + 'static,
|
||||
{
|
||||
let closure = move |response| {
|
||||
if let LibwakuResponse::Success(v) = response {
|
||||
let event: WakuEvent =
|
||||
serde_json::from_str(v.unwrap().as_str()).expect("Parsing event to succeed");
|
||||
match event {
|
||||
WakuEvent::WakuMessage(evt) => {
|
||||
debug!("WakuMessage event received: {:?}", evt.message_hash);
|
||||
on_message(evt.waku_message.clone());
|
||||
}
|
||||
WakuEvent::RelayTopicHealthChange(evt) => {
|
||||
debug!("Relay topic change evt: {evt:?}");
|
||||
}
|
||||
WakuEvent::ConnectionChange(evt) => {
|
||||
debug!("Conn change evt: {evt:?}");
|
||||
}
|
||||
WakuEvent::Unrecognized(e) => panic!("Unrecognized waku event: {e:?}"),
|
||||
_ => panic!("event case not expected"),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
self.node
|
||||
.set_event_callback(closure)
|
||||
.expect("set event call back working");
|
||||
|
||||
let waku = self.node.start().await.map_err(|e| {
|
||||
debug!("Failed to start the Waku Node: {e:?}");
|
||||
DeliveryServiceError::WakuNodeAlreadyInitialized(e.to_string())
|
||||
})?;
|
||||
|
||||
sleep(Duration::from_secs(2));
|
||||
|
||||
// Note: we are not subscribing to the pubsub topic here because we are already subscribed to it
|
||||
// and from waku side we can't check if we are subscribed to it or not
|
||||
// issue - https://github.com/waku-org/nwaku/issues/3246
|
||||
// waku.relay_subscribe(&pubsub_topic()).await.map_err(|e| {
|
||||
// debug!("Failed to subscribe to the Waku Node: {:?}", e);
|
||||
// DeliveryServiceError::WakuSubscribeToPubsubTopicError(e)
|
||||
// })?;
|
||||
|
||||
Ok(WakuNode { node: waku })
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
self,
|
||||
waku_sender: Sender<WakuMessage>,
|
||||
) -> Result<WakuNode<Running>, DeliveryServiceError> {
|
||||
self.start_with_handler(move |msg| {
|
||||
waku_sender
|
||||
.blocking_send(msg)
|
||||
.expect("Failed to send message to waku");
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl WakuNode<Running> {
|
||||
pub async fn send_message(&self, msg: OutboundPacket) -> Result<String, DeliveryServiceError> {
|
||||
let waku_message = msg.into();
|
||||
let msg_id = self
|
||||
.node
|
||||
.relay_publish_message(&waku_message, &pubsub_topic(), None)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to relay publish the message: {e:?}");
|
||||
DeliveryServiceError::WakuPublishMessageError(e)
|
||||
})?;
|
||||
|
||||
Ok(msg_id.to_string())
|
||||
}
|
||||
|
||||
pub async fn connect_to_peers(
|
||||
&self,
|
||||
peer_addresses: Vec<Multiaddr>,
|
||||
) -> Result<(), DeliveryServiceError> {
|
||||
for peer_address in peer_addresses {
|
||||
info!("Connecting to peer: {peer_address:?}");
|
||||
self.node
|
||||
.connect(&peer_address, Some(Duration::from_secs(10)))
|
||||
.await
|
||||
.map_err(|e| DeliveryServiceError::WakuConnectPeerError(e.to_string()))?;
|
||||
info!("Connected to peer: {peer_address:?}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn listen_addresses(&self) -> Result<Vec<Multiaddr>, DeliveryServiceError> {
|
||||
let addresses = self.node.listen_addresses().await.map_err(|e| {
|
||||
debug!("Failed to get the listen addresses: {e:?}");
|
||||
DeliveryServiceError::WakuGetListenAddressesError(e)
|
||||
})?;
|
||||
|
||||
Ok(addresses)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_waku_node(
|
||||
node_port: String,
|
||||
peer_addresses: Option<Vec<Multiaddr>>,
|
||||
waku_sender: Sender<WakuMessage>,
|
||||
receiver: &mut Receiver<OutboundPacket>,
|
||||
) -> Result<(), DeliveryServiceError> {
|
||||
info!("Initializing waku node");
|
||||
let waku_node_init = WakuNode::new(
|
||||
node_port
|
||||
.parse::<usize>()
|
||||
.expect("Failed to parse node port"),
|
||||
)
|
||||
.await?;
|
||||
let waku_node = waku_node_init.start(waku_sender).await?;
|
||||
info!("Waku node started");
|
||||
|
||||
if let Some(peer_addresses) = peer_addresses {
|
||||
waku_node.connect_to_peers(peer_addresses).await?;
|
||||
info!("Connected to all peers");
|
||||
}
|
||||
|
||||
info!("Waiting for message to send to waku");
|
||||
while let Some(msg) = receiver.recv().await {
|
||||
debug!("Received message to send to waku");
|
||||
let id = waku_node.send_message(msg).await?;
|
||||
debug!("Successfully publish message with id: {id:?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct OutboundCommand {
|
||||
pkt: OutboundPacket,
|
||||
reply: oneshot::Sender<Result<String, DeliveryServiceError>>,
|
||||
}
|
||||
|
||||
/// Waku-backed delivery service.
|
||||
///
|
||||
/// The service starts an embedded Waku node on its own dedicated thread/runtime.
|
||||
#[derive(Clone)]
|
||||
pub struct WakuDeliveryService {
|
||||
outbound: mpsc::Sender<OutboundCommand>,
|
||||
inbound: broadcast::Sender<InboundPacket>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WakuConfig {
|
||||
pub node_port: u16,
|
||||
/// Peer multiaddrs as strings.
|
||||
pub peers: Vec<String>,
|
||||
}
|
||||
|
||||
impl WakuDeliveryService {
|
||||
fn parse_peers(peers: Vec<String>) -> Result<Vec<Multiaddr>, DeliveryServiceError> {
|
||||
peers
|
||||
.into_iter()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.map(|s| {
|
||||
s.parse::<Multiaddr>()
|
||||
.map_err(|e| DeliveryServiceError::Other(anyhow::anyhow!(e)))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn start(cfg: WakuConfig) -> Result<Self, DeliveryServiceError> {
|
||||
let (inbound_tx, _) = broadcast::channel::<InboundPacket>(256);
|
||||
let (out_tx, mut out_rx) = mpsc::channel::<OutboundCommand>(256);
|
||||
|
||||
// Ensure callers can await startup success/failure.
|
||||
let (ready_tx, ready_rx) = oneshot::channel::<Result<(), DeliveryServiceError>>();
|
||||
|
||||
let inbound_tx_thread = inbound_tx.clone();
|
||||
let peers = Self::parse_peers(cfg.peers)?;
|
||||
let node_port = cfg.node_port;
|
||||
|
||||
std::thread::Builder::new()
|
||||
.name("waku-node".into())
|
||||
.spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("waku tokio runtime");
|
||||
|
||||
rt.block_on(async move {
|
||||
let started = async {
|
||||
info!("Initializing waku node");
|
||||
let waku_node_init = WakuNode::new(node_port as usize).await?;
|
||||
|
||||
let inbound_tx_cb = inbound_tx_thread.clone();
|
||||
let waku_node = waku_node_init
|
||||
.start_with_handler(move |waku_msg| {
|
||||
let _ = inbound_tx_cb.send(waku_msg.into());
|
||||
})
|
||||
.await?;
|
||||
info!("Waku node started");
|
||||
|
||||
if !peers.is_empty() {
|
||||
waku_node.connect_to_peers(peers).await?;
|
||||
info!("Connected to all peers");
|
||||
}
|
||||
|
||||
Ok::<WakuNode<waku_bindings::Running>, DeliveryServiceError>(waku_node)
|
||||
}
|
||||
.await;
|
||||
|
||||
let waku_node = match started {
|
||||
Ok(node) => {
|
||||
let _ = ready_tx.send(Ok(()));
|
||||
node
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = ready_tx.send(Err(e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
while let Some(cmd) = out_rx.recv().await {
|
||||
let res = waku_node.send_message(cmd.pkt).await;
|
||||
let _ = cmd.reply.send(res);
|
||||
}
|
||||
|
||||
info!("Waku outbound loop finished");
|
||||
});
|
||||
})
|
||||
.map_err(|e| DeliveryServiceError::Other(anyhow::anyhow!(e)))?;
|
||||
|
||||
// Wait for the node to either start or fail.
|
||||
ready_rx
|
||||
.await
|
||||
.map_err(|e| DeliveryServiceError::Other(anyhow::anyhow!(e)))??;
|
||||
|
||||
Ok(Self {
|
||||
outbound: out_tx,
|
||||
inbound: inbound_tx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DeliveryService for WakuDeliveryService {
|
||||
async fn send(&self, pkt: OutboundPacket) -> Result<String, DeliveryServiceError> {
|
||||
let (reply_tx, reply_rx) = oneshot::channel();
|
||||
self.outbound
|
||||
.send(OutboundCommand {
|
||||
pkt,
|
||||
reply: reply_tx,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| DeliveryServiceError::Other(anyhow::anyhow!(e)))?;
|
||||
|
||||
reply_rx
|
||||
.await
|
||||
.map_err(|e| DeliveryServiceError::Other(anyhow::anyhow!(e)))?
|
||||
}
|
||||
|
||||
fn subscribe(&self) -> broadcast::Receiver<InboundPacket> {
|
||||
self.inbound.subscribe()
|
||||
}
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
use chrono::Utc;
|
||||
use core::result::Result;
|
||||
use kameo::{
|
||||
message::{Context, Message},
|
||||
Actor,
|
||||
};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use waku_bindings::{Running, WakuContentTopic, WakuMessage, WakuNodeHandle};
|
||||
|
||||
use crate::ds_waku::{pubsub_topic, GROUP_VERSION};
|
||||
use crate::{
|
||||
ds_waku::{build_content_topic, build_content_topics, content_filter},
|
||||
DeliveryServiceError,
|
||||
};
|
||||
|
||||
/// WakuActor is the actor that handles the Waku Node
|
||||
#[derive(Actor)]
|
||||
pub struct WakuActor {
|
||||
node: Arc<WakuNodeHandle<Running>>,
|
||||
}
|
||||
|
||||
impl WakuActor {
|
||||
/// Create a new WakuActor
|
||||
/// Input:
|
||||
/// - node: The Waku Node to handle. Waku Node is already running
|
||||
pub fn new(node: Arc<WakuNodeHandle<Running>>) -> Self {
|
||||
Self { node }
|
||||
}
|
||||
}
|
||||
|
||||
/// Message to send to the Waku Node
|
||||
/// This message is used to send a message to the Waku Node
|
||||
/// Input:
|
||||
/// - msg: The message to send
|
||||
/// - subtopic: The subtopic to send the message to
|
||||
/// - group_id: The group to send the message to
|
||||
/// - app_id: The app is unique identifier for the application that is sending the message for filtering own messages
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ProcessMessageToSend {
|
||||
pub msg: Vec<u8>,
|
||||
pub subtopic: String,
|
||||
pub group_id: String,
|
||||
pub app_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl ProcessMessageToSend {
|
||||
/// Build a WakuMessage from the message to send
|
||||
/// Input:
|
||||
/// - msg: The message to send
|
||||
///
|
||||
/// Returns:
|
||||
/// - WakuMessage: The WakuMessage to send
|
||||
pub fn build_waku_message(&self) -> Result<WakuMessage, DeliveryServiceError> {
|
||||
let content_topic = build_content_topic(&self.group_id, GROUP_VERSION, &self.subtopic);
|
||||
Ok(WakuMessage::new(
|
||||
self.msg.clone(),
|
||||
content_topic,
|
||||
2,
|
||||
Utc::now().timestamp() as usize,
|
||||
self.app_id.clone(),
|
||||
true,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the message to send to the Waku Node
|
||||
/// Input:
|
||||
/// - msg: The message to send
|
||||
///
|
||||
/// Returns:
|
||||
/// - msg_id: The message id of the message sent to the Waku Node
|
||||
impl Message<ProcessMessageToSend> for WakuActor {
|
||||
type Reply = Result<String, DeliveryServiceError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessMessageToSend,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let waku_message = msg.build_waku_message()?;
|
||||
let msg_id = self
|
||||
.node
|
||||
.relay_publish_message(&waku_message, Some(pubsub_topic()), None)
|
||||
.map_err(|e| {
|
||||
debug!("Failed to relay publish the message: {:?}", e);
|
||||
DeliveryServiceError::WakuPublishMessageError(e)
|
||||
})?;
|
||||
Ok(msg_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Message for actor to subscribe to a group
|
||||
/// It contains the group name to subscribe to
|
||||
pub struct ProcessSubscribeToGroup {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
/// Handle the message for actor to subscribe to a group
|
||||
/// Input:
|
||||
/// - group_name: The group to subscribe to
|
||||
///
|
||||
/// Returns:
|
||||
/// - content_topics: The content topics of the group
|
||||
impl Message<ProcessSubscribeToGroup> for WakuActor {
|
||||
type Reply = Result<Vec<WakuContentTopic>, DeliveryServiceError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessSubscribeToGroup,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let content_topics = build_content_topics(&msg.group_name, GROUP_VERSION);
|
||||
let content_filter = content_filter(&pubsub_topic(), &content_topics);
|
||||
self.node.relay_subscribe(&content_filter).map_err(|e| {
|
||||
debug!("Failed to relay subscribe to the group: {:?}", e);
|
||||
DeliveryServiceError::WakuSubscribeToGroupError(e)
|
||||
})?;
|
||||
Ok(content_topics)
|
||||
}
|
||||
}
|
||||
|
||||
/// Message for actor to unsubscribe from a group
|
||||
/// It contains the group name to unsubscribe from
|
||||
pub struct ProcessUnsubscribeFromGroup {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
/// Handle the message for actor to unsubscribe from a group
|
||||
/// Input:
|
||||
/// - group_name: The group to unsubscribe from
|
||||
///
|
||||
/// Returns:
|
||||
/// - ()
|
||||
impl Message<ProcessUnsubscribeFromGroup> for WakuActor {
|
||||
type Reply = Result<(), DeliveryServiceError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessUnsubscribeFromGroup,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let content_topics = build_content_topics(&msg.group_name, GROUP_VERSION);
|
||||
let content_filter = content_filter(&pubsub_topic(), &content_topics);
|
||||
self.node
|
||||
.relay_unsubscribe(&content_filter)
|
||||
.map_err(|e| DeliveryServiceError::WakuRelayTopicsError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,132 +1,110 @@
|
||||
use ds::{
|
||||
transport::{InboundPacket, OutboundPacket},
|
||||
waku::WakuNode,
|
||||
DeliveryServiceError, APP_MSG_SUBTOPIC,
|
||||
};
|
||||
use kameo::{
|
||||
actor::pubsub::PubSub,
|
||||
message::{Context, Message},
|
||||
Actor,
|
||||
};
|
||||
use log::{error, info};
|
||||
use std::{
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::sync::mpsc::channel;
|
||||
use tracing::info;
|
||||
use waku_bindings::WakuMessage;
|
||||
|
||||
use ds::{
|
||||
ds_waku::{
|
||||
build_content_topics, match_content_topic, setup_node_handle, APP_MSG_SUBTOPIC,
|
||||
GROUP_VERSION,
|
||||
},
|
||||
waku_actor::{ProcessMessageToSend, ProcessSubscribeToGroup, WakuActor},
|
||||
DeliveryServiceError,
|
||||
};
|
||||
use waku_bindings::waku_set_event_callback;
|
||||
|
||||
#[derive(Debug, Clone, Actor)]
|
||||
pub struct ActorA {
|
||||
pub struct Application {
|
||||
pub app_id: String,
|
||||
}
|
||||
|
||||
impl ActorA {
|
||||
impl Application {
|
||||
pub fn new() -> Self {
|
||||
let app_id = uuid::Uuid::new_v4().to_string();
|
||||
Self { app_id }
|
||||
}
|
||||
}
|
||||
|
||||
impl Message<WakuMessage> for ActorA {
|
||||
type Reply = Result<WakuMessage, DeliveryServiceError>;
|
||||
impl Default for Application {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Message<InboundPacket> for Application {
|
||||
type Reply = Result<InboundPacket, DeliveryServiceError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: WakuMessage,
|
||||
msg: InboundPacket,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
println!("ActorA received message: {:?}", msg.timestamp());
|
||||
info!("Application received message: {:?}", msg.timestamp);
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_waku_client() {
|
||||
let group_name = "new_group".to_string();
|
||||
let mut pubsub = PubSub::<WakuMessage>::new();
|
||||
let (sender_alice, mut receiver_alice) = channel(100);
|
||||
// TODO: get node from env
|
||||
let res = setup_node_handle(vec![]);
|
||||
assert!(res.is_ok());
|
||||
let node = res.unwrap();
|
||||
tracing_subscriber::fmt::init();
|
||||
let group_name = "new_group";
|
||||
let mut pubsub = PubSub::<InboundPacket>::new();
|
||||
|
||||
let (sender, _) = channel::<WakuMessage>(100);
|
||||
let waku_node_default = WakuNode::new(60002)
|
||||
.await
|
||||
.expect("Failed to create WakuNode");
|
||||
|
||||
let (sender_alice, mut receiver_alice) = channel::<WakuMessage>(100);
|
||||
let waku_node_init = WakuNode::new(60001)
|
||||
.await
|
||||
.expect("Failed to create WakuNode");
|
||||
|
||||
let uuid = uuid::Uuid::new_v4().as_bytes().to_vec();
|
||||
let waku_actor = WakuActor::new(Arc::new(node));
|
||||
let actor_ref = kameo::spawn(waku_actor);
|
||||
|
||||
let actor_a = ActorA::new();
|
||||
let actor_a = Application::new();
|
||||
let actor_a_ref = kameo::spawn(actor_a);
|
||||
|
||||
pubsub.subscribe(actor_a_ref);
|
||||
|
||||
let content_topics = Arc::new(Mutex::new(build_content_topics(&group_name, GROUP_VERSION)));
|
||||
// let content_topics = Arc::new(Mutex::new(build_content_topics(&group_name)));
|
||||
|
||||
assert!(actor_ref
|
||||
.ask(ProcessSubscribeToGroup {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
let waku_node_default = waku_node_default
|
||||
.start(sender)
|
||||
.await
|
||||
.is_ok());
|
||||
.expect("Failed to start WakuNode");
|
||||
|
||||
waku_set_event_callback(move |signal| {
|
||||
match signal.event() {
|
||||
waku_bindings::Event::WakuMessage(event) => {
|
||||
let content_topic = event.waku_message().content_topic();
|
||||
// Check if message belongs to a relevant topic
|
||||
assert!(match_content_topic(&content_topics, content_topic));
|
||||
let msg = event.waku_message().clone();
|
||||
info!("msg: {:?}", msg.timestamp());
|
||||
assert!(sender_alice.blocking_send(msg).is_ok());
|
||||
}
|
||||
let node_name = waku_node_default
|
||||
.listen_addresses()
|
||||
.await
|
||||
.expect("Failed to get listen addresses");
|
||||
let waku_node = waku_node_init
|
||||
.start(sender_alice)
|
||||
.await
|
||||
.expect("Failed to start WakuNode");
|
||||
|
||||
waku_bindings::Event::Unrecognized(data) => {
|
||||
error!("Unrecognized event!\n {data:?}");
|
||||
}
|
||||
_ => {
|
||||
error!(
|
||||
"Unrecognized signal!\n {:?}",
|
||||
serde_json::to_string(&signal)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
waku_node
|
||||
.connect_to_peers(node_name)
|
||||
.await
|
||||
.expect("Failed to connect to peers");
|
||||
|
||||
let sender = tokio::spawn(async move {
|
||||
for _ in 0..10 {
|
||||
assert!(actor_ref
|
||||
.ask(ProcessMessageToSend {
|
||||
msg: format!("test_message").as_bytes().to_vec(),
|
||||
subtopic: APP_MSG_SUBTOPIC.to_string(),
|
||||
group_id: group_name.clone(),
|
||||
app_id: uuid.clone(),
|
||||
})
|
||||
.await
|
||||
.is_ok());
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
info!("sender handle is finished");
|
||||
});
|
||||
|
||||
let receiver = tokio::spawn(async move {
|
||||
tokio::spawn(async move {
|
||||
while let Some(msg) = receiver_alice.recv().await {
|
||||
info!("msg received: {:?}", msg.timestamp());
|
||||
pubsub.publish(msg).await;
|
||||
info!("msg received from receiver_alice: {:?}", msg.timestamp);
|
||||
pubsub.publish(msg.into()).await;
|
||||
}
|
||||
info!("receiver handle is finished");
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
x = sender => {
|
||||
info!("get from sender: {:?}", x);
|
||||
}
|
||||
w = receiver => {
|
||||
info!("get from receiver: {:?}", w);
|
||||
}
|
||||
}
|
||||
tokio::task::block_in_place(move || {
|
||||
tokio::runtime::Handle::current().block_on(async move {
|
||||
let res = waku_node
|
||||
.send_message(OutboundPacket::new(
|
||||
"test_message_1".as_bytes().to_vec(),
|
||||
APP_MSG_SUBTOPIC,
|
||||
group_name,
|
||||
&uuid,
|
||||
))
|
||||
.await;
|
||||
info!("res: {:?}", res);
|
||||
info!("sender handle is finished");
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
10
frontend/.gitignore
vendored
10
frontend/.gitignore
vendored
@@ -1,10 +0,0 @@
|
||||
.DS_Store
|
||||
node_modules
|
||||
/build
|
||||
/.svelte-kit
|
||||
/package
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
vite.config.js.timestamp-*
|
||||
vite.config.ts.timestamp-*
|
||||
@@ -1 +0,0 @@
|
||||
engine-strict=true
|
||||
@@ -1,11 +0,0 @@
|
||||
FROM node:18-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY package.json package-lock.json ./
|
||||
RUN npm install
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 5173
|
||||
CMD [ "npm", "run", "dev", "--", "--host", "0.0.0.0", "--port", "5173"]
|
||||
2656
frontend/package-lock.json
generated
2656
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"name": "client",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "vite dev",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
|
||||
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@sveltejs/adapter-auto": "^2.0.0",
|
||||
"@sveltejs/adapter-netlify": "^1.0.0-next.88",
|
||||
"@sveltejs/kit": "^1.15.2",
|
||||
"@tailwindcss/typography": "^0.5.9",
|
||||
"autoprefixer": "^10.4.14",
|
||||
"daisyui": "^2.51.3",
|
||||
"postcss": "^8.4.21",
|
||||
"svelte": "^3.54.0",
|
||||
"svelte-check": "^3.0.1",
|
||||
"tailwindcss": "^3.2.7",
|
||||
"tslib": "^2.4.1",
|
||||
"typescript": "^4.9.3",
|
||||
"vite": "^4.5.2"
|
||||
},
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"@sveltejs/adapter-static": "^2.0.1",
|
||||
"svelte-french-toast": "^1.0.4-beta.0",
|
||||
"svelte-preprocess": "^5.0.1"
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
module.exports = {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {},
|
||||
},
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
12
frontend/src/app.d.ts
vendored
12
frontend/src/app.d.ts
vendored
@@ -1,12 +0,0 @@
|
||||
// See https://kit.svelte.dev/docs/types#app
|
||||
// for information about these interfaces
|
||||
declare global {
|
||||
namespace App {
|
||||
// interface Error {}
|
||||
// interface Locals {}
|
||||
// interface PageData {}
|
||||
// interface Platform {}
|
||||
}
|
||||
}
|
||||
|
||||
export {};
|
||||
@@ -1,12 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
|
||||
<meta name="viewport" content="width=device-width" />
|
||||
%sveltekit.head%
|
||||
</head>
|
||||
<body data-sveltekit-preload-data="hover">
|
||||
<div style="display: contents">%sveltekit.body%</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,7 +0,0 @@
|
||||
import { writable } from "svelte/store";
|
||||
|
||||
export const user = writable("");
|
||||
export const channel = writable("");
|
||||
export const eth_private_key = writable("");
|
||||
export const group = writable("");
|
||||
export const createNewRoom = writable(false);
|
||||
@@ -1,11 +0,0 @@
|
||||
<script>
|
||||
import "../app.css"
|
||||
import {Toaster} from 'svelte-french-toast';
|
||||
</script>
|
||||
|
||||
<Toaster/>
|
||||
<div class="flex flex-col h-screen sm:justify-center sm:items-center">
|
||||
<div class="mx-auto px-3 md:px-0 sm:w-full md:w-4/5 lg:w-3/5 pt-24 sm:pt-0">
|
||||
<slot/>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,99 +0,0 @@
|
||||
<script lang="ts">
|
||||
import {user, channel, eth_private_key, group, createNewRoom} from "$lib/stores/user"
|
||||
import {goto, invalidate} from '$app/navigation';
|
||||
import { env } from '$env/dynamic/public'
|
||||
import toast from 'svelte-french-toast';
|
||||
|
||||
let status, rooms;
|
||||
export let data;
|
||||
$:({status, rooms} = data);
|
||||
|
||||
let eth_pk = "";
|
||||
let room = "";
|
||||
let create_new_room = false;
|
||||
const join_room = () => {
|
||||
eth_private_key.set(eth_pk);
|
||||
group.set(room);
|
||||
createNewRoom.set(create_new_room);
|
||||
goto("/chat");
|
||||
}
|
||||
const select_room = (selected_room: string) => {
|
||||
room = selected_room;
|
||||
};
|
||||
const filled_in = () => {
|
||||
return !(eth_pk.length > 0 && room.length > 0);
|
||||
};
|
||||
|
||||
const reload = () => {
|
||||
toast.success("Reloaded rooms")
|
||||
let url = `${env.PUBLIC_API_URL}`;
|
||||
if (url.endsWith("/")) {
|
||||
url = url.slice(0, -1);
|
||||
}
|
||||
invalidate(`${url}/rooms`);
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="flex flex-col justify-center">
|
||||
<div class="title">
|
||||
<h1 class="text-3xl font-bold text-center">Chatr: a Websocket chatroom</h1>
|
||||
</div>
|
||||
<div class="join self-center">
|
||||
</div>
|
||||
<div class="rooms self-center my-5">
|
||||
<div class="flex justify-between py-2">
|
||||
<h2 class="text-xl font-bold ">
|
||||
List of active chatroom's
|
||||
</h2>
|
||||
<button class="btn btn-square btn-sm btn-accent" on:click={reload}>↻</button>
|
||||
</div>
|
||||
{#if status && rooms.length < 1}
|
||||
<div class="card bg-base-300 w-96 shadow-xl text-center">
|
||||
<div class="card-body">
|
||||
<h3 class="card-title ">{status}</h3>
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
{#if rooms}
|
||||
{#each rooms as room}
|
||||
<div class="card bg-base-300 w-96 shadow-xl my-3" on:click={select_room(room)}>
|
||||
<div class="card-body">
|
||||
<div class="flex justify-between">
|
||||
<h2 class="card-title">{room}</h2>
|
||||
<button class="btn btn-primary btn-md">Select Room</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{/each}
|
||||
{/if}
|
||||
</div>
|
||||
<div class="create self-center my-5 w-[40rem]">
|
||||
<div>
|
||||
<label class="label" for="eth-private-key">
|
||||
<span class="label-text">Eth Private Key</span>
|
||||
</label>
|
||||
<input id="eth-private-key" placeholder="Eth Private Key" bind:value={eth_pk}
|
||||
class="input input-bordered input-primary w-full bg-base-200 mb-4 mr-3">
|
||||
</div>
|
||||
<div>
|
||||
<label class="label" for="room-name">
|
||||
<span class="label-text">Room name</span>
|
||||
</label>
|
||||
<input id="room-name" placeholder="Room Name" bind:value={room}
|
||||
class="input input-bordered input-primary w-full bg-base-200 mb-4 mr-3">
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer">
|
||||
<span class="label-text">Create Room</span>
|
||||
<input type="checkbox" class="checkbox checkbox-primary" bind:checked={create_new_room} />
|
||||
</label>
|
||||
</div>
|
||||
<button class="btn btn-primary" disabled="{filled_in(eth_pk, room, create_new_room)}" on:click={join_room}>Join Room.</button>
|
||||
</div>
|
||||
<div class="github self-center">
|
||||
<p>
|
||||
Check out <a class="link link-accent" href="https://github.com/0xLaurens/chatr" target="_blank"
|
||||
rel="noreferrer">Chatr</a>, to view the source code!
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,18 +0,0 @@
|
||||
import type {PageLoad} from './$types';
|
||||
import {env} from "$env/dynamic/public";
|
||||
|
||||
export const load: PageLoad = async ({fetch}) => {
|
||||
try {
|
||||
let url = `${env.PUBLIC_API_URL}`;
|
||||
if (url.endsWith("/")) {
|
||||
url = url.slice(0, -1);
|
||||
}
|
||||
const res = await fetch(`${url}/rooms`);
|
||||
return await res.json();
|
||||
} catch (e) {
|
||||
return {
|
||||
status: "API offline (try again in a min)",
|
||||
rooms: []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
<script lang="ts">
|
||||
import {onMount, onDestroy} from "svelte";
|
||||
import {user, channel, eth_private_key, group, createNewRoom} from "../../lib/stores/user";
|
||||
import {goto} from '$app/navigation';
|
||||
import {env} from '$env/dynamic/public'
|
||||
import toast from "svelte-french-toast";
|
||||
import { json } from "@sveltejs/kit";
|
||||
|
||||
let status = "🔴";
|
||||
let statusTip = "Disconnected";
|
||||
let message = "";
|
||||
let messages: any[] = [];
|
||||
let socket: WebSocket;
|
||||
let interval: number;
|
||||
let delay = 2000;
|
||||
let timeout = false;
|
||||
$: {
|
||||
if (interval || (!timeout && interval)) {
|
||||
clearInterval(interval);
|
||||
}
|
||||
|
||||
if (timeout == true) {
|
||||
interval = setInterval(() => {
|
||||
if (delay < 30_000) delay = delay * 2;
|
||||
console.log("reconnecting in:", delay)
|
||||
connect();
|
||||
}, delay)
|
||||
}
|
||||
}
|
||||
|
||||
function connect() {
|
||||
socket = new WebSocket(`${env.PUBLIC_WEBSOCKET_URL}/ws`)
|
||||
socket.addEventListener("open", () => {
|
||||
status = "🟢"
|
||||
statusTip = "Connected";
|
||||
timeout = false;
|
||||
socket.send(JSON.stringify({
|
||||
eth_private_key: $eth_private_key,
|
||||
group_id: $group,
|
||||
should_create: $createNewRoom,
|
||||
}));
|
||||
})
|
||||
|
||||
socket.addEventListener("close", () => {
|
||||
status = "🔴";
|
||||
statusTip = "Disconnected";
|
||||
if (timeout == false) {
|
||||
delay = 2000;
|
||||
timeout = true;
|
||||
}
|
||||
})
|
||||
|
||||
socket.addEventListener('message', function (event) {
|
||||
if (event.data == "Username already taken.") {
|
||||
toast.error(event.data)
|
||||
goto("/");
|
||||
} else {
|
||||
messages = [...messages, event.data]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
onMount(() => {
|
||||
if ($eth_private_key.length < 1 || $group.length < 1 ) {
|
||||
toast.error("Something went wrong!")
|
||||
goto("/");
|
||||
} else {
|
||||
connect()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
onDestroy(() => {
|
||||
if (socket) {
|
||||
socket.close()
|
||||
}
|
||||
if (interval) {
|
||||
clearInterval(interval)
|
||||
}
|
||||
timeout = false
|
||||
})
|
||||
|
||||
const sendMessage = () => {
|
||||
socket.send(JSON.stringify({
|
||||
message: message,
|
||||
group_id: $group,
|
||||
}));
|
||||
message = "";
|
||||
};
|
||||
const clear_messages = () => {
|
||||
messages = [];
|
||||
};
|
||||
|
||||
|
||||
</script>
|
||||
<div class="title flex justify-between">
|
||||
<h1 class="text-3xl font-bold cursor-default">Chat Room <span class="tooltip" data-tip="{statusTip}">{status}</span>
|
||||
</h1>
|
||||
<button class="btn btn-accent" on:click={clear_messages}>clear</button>
|
||||
</div>
|
||||
<div class="card h-96 flex-grow bg-base-300 shadow-xl my-10">
|
||||
<div class="card-body">
|
||||
<div class="flex flex-col overflow-y-auto max-h-80 scroll-smooth">
|
||||
{#each messages as msg}
|
||||
<div class="my-2">{msg}</div>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="message-box flex justify-end">
|
||||
<form on:submit|preventDefault={sendMessage}>
|
||||
<input placeholder="Message" class="input input-bordered input-primary w-[51rem] bg-base-200 mb-2"
|
||||
bind:value={message}>
|
||||
<button class="btn btn-primary w-full sm:w-auto btn-wide">Send</button>
|
||||
</form>
|
||||
</div>
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.5 KiB |
@@ -1,16 +0,0 @@
|
||||
import adapter from '@sveltejs/adapter-netlify';
|
||||
import preprocess from 'svelte-preprocess';
|
||||
|
||||
/** @type {import('@sveltejs/kit').Config} */
|
||||
const config = {
|
||||
preprocess: preprocess({
|
||||
postcss: true
|
||||
}),
|
||||
|
||||
kit: {
|
||||
adapter: adapter()
|
||||
}
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
/** @type {import('tailwindcss').Config} */
|
||||
module.exports = {
|
||||
content: ['./src/**/*.{html,js,svelte,ts}'],
|
||||
theme: {
|
||||
extend: {}
|
||||
},
|
||||
plugins: [require('@tailwindcss/typography'), require('daisyui')],
|
||||
};
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"extends": "./.svelte-kit/tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"allowJs": true,
|
||||
"checkJs": true,
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"skipLibCheck": true,
|
||||
"sourceMap": true,
|
||||
"strict": true,
|
||||
"paths": {
|
||||
"$lib": ["src/lib"],
|
||||
"$lib/*": ["src/lib/*"]
|
||||
}
|
||||
}
|
||||
// Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias
|
||||
//
|
||||
// If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes
|
||||
// from the referenced tsconfig.json - TypeScript does not merge them in
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
import {sveltekit} from '@sveltejs/kit/vite';
|
||||
import {defineConfig} from 'vite';
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [sveltekit()],
|
||||
});
|
||||
@@ -1,12 +1,24 @@
|
||||
[package]
|
||||
name = "mls_crypto"
|
||||
version = "0.1.0"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
openmls = { version = "=0.5.0", features = ["test-utils"] }
|
||||
openmls_basic_credential = "=0.2.0"
|
||||
openmls_rust_crypto = "=0.2.0"
|
||||
openmls_traits = "=0.2.0"
|
||||
openmls = { version = "0.6.0"}
|
||||
openmls_basic_credential = "0.3.0"
|
||||
openmls_rust_crypto = "0.3.0"
|
||||
openmls_traits = "0.3.0"
|
||||
|
||||
anyhow = "1.0.81"
|
||||
thiserror = "1.0.39"
|
||||
|
||||
alloy = { version = "1.0.37", features = [
|
||||
"providers",
|
||||
"node-bindings",
|
||||
"network",
|
||||
"transports",
|
||||
"k256",
|
||||
"signer-local",
|
||||
] }
|
||||
|
||||
22
mls_crypto/src/error.rs
Normal file
22
mls_crypto/src/error.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use openmls::{
|
||||
error::LibraryError,
|
||||
prelude::{CredentialError, KeyPackageNewError},
|
||||
};
|
||||
use openmls_rust_crypto::MemoryStorageError;
|
||||
use openmls_traits::types::CryptoError;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IdentityError {
|
||||
#[error(transparent)]
|
||||
UnableToCreateKeyPackage(#[from] KeyPackageNewError),
|
||||
#[error("Invalid hash reference: {0}")]
|
||||
InvalidHashRef(#[from] LibraryError),
|
||||
#[error("Unable to create new signer: {0}")]
|
||||
UnableToCreateSigner(#[from] CryptoError),
|
||||
#[error("Unable to save signature key: {0}")]
|
||||
UnableToSaveSignatureKey(#[from] MemoryStorageError),
|
||||
#[error("Unable to create credential: {0}")]
|
||||
UnableToCreateCredential(#[from] CredentialError),
|
||||
#[error("Invalid wallet address: {0}")]
|
||||
InvalidWalletAddress(String),
|
||||
}
|
||||
206
mls_crypto/src/identity.rs
Normal file
206
mls_crypto/src/identity.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
use alloy::{hex, primitives::Address, signers::local::PrivateKeySigner};
|
||||
use openmls::{credentials::CredentialWithKey, key_packages::KeyPackage, prelude::BasicCredential};
|
||||
use openmls_basic_credential::SignatureKeyPair;
|
||||
use openmls_traits::{types::Ciphersuite, OpenMlsProvider};
|
||||
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
||||
|
||||
use crate::error::IdentityError;
|
||||
use crate::openmls_provider::{MlsProvider, CIPHERSUITE};
|
||||
|
||||
pub struct Identity {
|
||||
pub(crate) kp: HashMap<Vec<u8>, KeyPackage>,
|
||||
pub(crate) credential_with_key: CredentialWithKey,
|
||||
pub(crate) signer: SignatureKeyPair,
|
||||
}
|
||||
|
||||
impl Identity {
|
||||
pub fn new(
|
||||
ciphersuite: Ciphersuite,
|
||||
provider: &MlsProvider,
|
||||
user_wallet_address: &[u8],
|
||||
) -> Result<Identity, IdentityError> {
|
||||
let credential = BasicCredential::new(user_wallet_address.to_vec());
|
||||
let signer = SignatureKeyPair::new(ciphersuite.signature_algorithm())?;
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential: credential.into(),
|
||||
signature_key: signer.to_public_vec().into(),
|
||||
};
|
||||
signer.store(provider.storage())?;
|
||||
|
||||
let mut kps = HashMap::new();
|
||||
let key_package_bundle = KeyPackage::builder().build(
|
||||
CIPHERSUITE,
|
||||
provider,
|
||||
&signer,
|
||||
credential_with_key.clone(),
|
||||
)?;
|
||||
let key_package = key_package_bundle.key_package();
|
||||
let kp = key_package.hash_ref(provider.crypto())?;
|
||||
kps.insert(kp.as_slice().to_vec(), key_package.clone());
|
||||
|
||||
Ok(Identity {
|
||||
kp: kps,
|
||||
credential_with_key,
|
||||
signer,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create an additional key package using the credential_with_key/signer bound to this identity
|
||||
pub fn generate_key_package(
|
||||
&mut self,
|
||||
crypto: &MlsProvider,
|
||||
) -> Result<KeyPackage, IdentityError> {
|
||||
let key_package_bundle = KeyPackage::builder().build(
|
||||
CIPHERSUITE,
|
||||
crypto,
|
||||
&self.signer,
|
||||
self.credential_with_key.clone(),
|
||||
)?;
|
||||
let key_package = key_package_bundle.key_package();
|
||||
let kp = key_package.hash_ref(crypto.crypto())?;
|
||||
self.kp.insert(kp.as_slice().to_vec(), key_package.clone());
|
||||
Ok(key_package.clone())
|
||||
}
|
||||
|
||||
/// Get the plain identity as byte vector.
|
||||
pub fn identity(&self) -> &[u8] {
|
||||
self.credential_with_key.credential.serialized_content()
|
||||
}
|
||||
|
||||
pub fn identity_string(&self) -> String {
|
||||
Address::from_slice(self.credential_with_key.credential.serialized_content()).to_string()
|
||||
}
|
||||
|
||||
pub fn signer(&self) -> &SignatureKeyPair {
|
||||
&self.signer
|
||||
}
|
||||
|
||||
pub fn credential_with_key(&self) -> CredentialWithKey {
|
||||
self.credential_with_key.clone()
|
||||
}
|
||||
|
||||
pub fn signature_key(&self) -> Vec<u8> {
|
||||
self.credential_with_key.signature_key.as_slice().to_vec()
|
||||
}
|
||||
|
||||
pub fn is_key_package_exists(&self, kp_hash_ref: &[u8]) -> bool {
|
||||
self.kp.contains_key(kp_hash_ref)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Identity {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.identity_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn random_identity() -> Result<Identity, IdentityError> {
|
||||
let signer = PrivateKeySigner::random();
|
||||
let user_address = signer.address();
|
||||
|
||||
let crypto = MlsProvider::default();
|
||||
let id = Identity::new(CIPHERSUITE, &crypto, user_address.as_slice())?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Validates and normalizes Ethereum-style wallet addresses.
|
||||
///
|
||||
/// Accepts either `0x`-prefixed or raw 40-character hex strings, returning a lowercase,
|
||||
/// `0x`-prefixed representation on success.
|
||||
pub fn normalize_wallet_address_str(address: &str) -> Result<String, IdentityError> {
|
||||
parse_wallet_address(address).map(|addr| addr.to_string())
|
||||
}
|
||||
|
||||
/// Parses an Ethereum wallet address into an [`Address`] after validation.
|
||||
///
|
||||
/// This ensures the address is 20 bytes / 40 hex chars and contains only hexadecimal digits.
|
||||
pub fn parse_wallet_address(address: &str) -> Result<Address, IdentityError> {
|
||||
let trimmed = address.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(IdentityError::InvalidWalletAddress(address.to_string()));
|
||||
}
|
||||
|
||||
let hex_part = trimmed
|
||||
.strip_prefix("0x")
|
||||
.or_else(|| trimmed.strip_prefix("0X"))
|
||||
.unwrap_or(trimmed);
|
||||
|
||||
if hex_part.len() != 40 || !hex_part.chars().all(|c| c.is_ascii_hexdigit()) {
|
||||
return Err(IdentityError::InvalidWalletAddress(trimmed.to_string()));
|
||||
}
|
||||
|
||||
let normalized = format!("0x{}", hex_part.to_ascii_lowercase());
|
||||
Address::from_str(&normalized)
|
||||
.map_err(|_| IdentityError::InvalidWalletAddress(trimmed.to_string()))
|
||||
}
|
||||
|
||||
fn is_prefixed_hex(input: &str) -> bool {
|
||||
let rest = input
|
||||
.strip_prefix("0x")
|
||||
.or_else(|| input.strip_prefix("0X"));
|
||||
match rest {
|
||||
Some(hex_part) if !hex_part.is_empty() => hex_part.chars().all(|c| c.is_ascii_hexdigit()),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_raw_hex(input: &str) -> bool {
|
||||
!input.is_empty() && input.chars().all(|c| c.is_ascii_hexdigit())
|
||||
}
|
||||
|
||||
pub fn normalize_wallet_address(raw: &[u8]) -> String {
|
||||
let as_utf8 = std::str::from_utf8(raw)
|
||||
.map(|s| s.trim())
|
||||
.unwrap_or_default();
|
||||
|
||||
if is_prefixed_hex(as_utf8) {
|
||||
return as_utf8.to_string();
|
||||
}
|
||||
|
||||
if is_raw_hex(as_utf8) {
|
||||
return format!("0x{}", as_utf8);
|
||||
}
|
||||
|
||||
if raw.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("0x{}", hex::encode(raw))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{is_prefixed_hex, normalize_wallet_address};
|
||||
|
||||
#[test]
|
||||
fn keeps_prefixed_hex() {
|
||||
let addr = normalize_wallet_address(b"0xAbCd1234");
|
||||
assert_eq!(addr, "0xAbCd1234");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefixes_raw_hex() {
|
||||
let addr = normalize_wallet_address(b"ABCD1234");
|
||||
assert_eq!(addr, "0xABCD1234");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encodes_binary_bytes() {
|
||||
let addr = normalize_wallet_address(&[0x11, 0x22, 0x33]);
|
||||
assert_eq!(addr, "0x112233");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trims_ascii_input() {
|
||||
let addr = normalize_wallet_address(b" 0x1F ");
|
||||
assert_eq!(addr, "0x1F");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefixed_hex_helper() {
|
||||
assert!(is_prefixed_hex("0xabc"));
|
||||
assert!(is_prefixed_hex("0XABC"));
|
||||
assert!(!is_prefixed_hex("abc"));
|
||||
assert!(!is_prefixed_hex("0x"));
|
||||
}
|
||||
}
|
||||
@@ -1 +1,5 @@
|
||||
pub mod error;
|
||||
pub mod identity;
|
||||
pub mod openmls_provider;
|
||||
|
||||
pub use identity::{normalize_wallet_address_str, parse_wallet_address};
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
use openmls::prelude::*;
|
||||
use openmls_rust_crypto::MemoryKeyStore;
|
||||
use openmls_rust_crypto::RustCrypto;
|
||||
use openmls_traits::OpenMlsCryptoProvider;
|
||||
use openmls::prelude::Ciphersuite;
|
||||
use openmls_rust_crypto::{MemoryStorage, RustCrypto};
|
||||
use openmls_traits::OpenMlsProvider;
|
||||
|
||||
pub const CIPHERSUITE: Ciphersuite = Ciphersuite::MLS_128_DHKEMX25519_AES128GCM_SHA256_Ed25519;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MlsCryptoProvider {
|
||||
pub struct MlsProvider {
|
||||
crypto: RustCrypto,
|
||||
key_storage: MemoryKeyStore,
|
||||
storage: MemoryStorage,
|
||||
}
|
||||
|
||||
impl OpenMlsCryptoProvider for MlsCryptoProvider {
|
||||
impl OpenMlsProvider for MlsProvider {
|
||||
type CryptoProvider = RustCrypto;
|
||||
type RandProvider = RustCrypto;
|
||||
type KeyStoreProvider = MemoryKeyStore;
|
||||
type StorageProvider = MemoryStorage;
|
||||
|
||||
fn crypto(&self) -> &Self::CryptoProvider {
|
||||
&self.crypto
|
||||
@@ -24,7 +23,7 @@ impl OpenMlsCryptoProvider for MlsCryptoProvider {
|
||||
&self.crypto
|
||||
}
|
||||
|
||||
fn key_store(&self) -> &Self::KeyStoreProvider {
|
||||
&self.key_storage
|
||||
fn storage(&self) -> &Self::StorageProvider {
|
||||
&self.storage
|
||||
}
|
||||
}
|
||||
|
||||
95
src/bootstrap.rs
Normal file
95
src/bootstrap.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
// de_mls/src/bootstrap.rs
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::info;
|
||||
|
||||
use ds::{
|
||||
transport::{DeliveryService, InboundPacket},
|
||||
waku::{WakuConfig, WakuDeliveryService},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
error::BootstrapError,
|
||||
user_app_instance::{AppState, CoreCtx},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BootstrapConfig {
|
||||
/// TCP/UDP port for the embedded Waku node
|
||||
pub node_port: u16,
|
||||
/// Peer multiaddrs as strings (parsed by the transport impl).
|
||||
pub peers: Vec<String>,
|
||||
}
|
||||
|
||||
pub struct Bootstrap<DS: DeliveryService> {
|
||||
pub core: Arc<CoreCtx<DS>>,
|
||||
/// Cancels the Waku→broadcast forwarder task
|
||||
pub cancel: CancellationToken,
|
||||
}
|
||||
|
||||
pub async fn bootstrap_core(
|
||||
cfg: BootstrapConfig,
|
||||
) -> Result<Bootstrap<WakuDeliveryService>, BootstrapError> {
|
||||
let delivery = WakuDeliveryService::start(WakuConfig {
|
||||
node_port: cfg.node_port,
|
||||
peers: cfg.peers,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Broadcast inbound packets inside the app
|
||||
let (pubsub_tx, _) = broadcast::channel::<InboundPacket>(100);
|
||||
|
||||
// Subscribe before moving delivery into AppState.
|
||||
let mut rx = delivery.subscribe();
|
||||
|
||||
let app_state = Arc::new(AppState {
|
||||
delivery,
|
||||
pubsub: pubsub_tx.clone(),
|
||||
});
|
||||
|
||||
let core = Arc::new(CoreCtx::new(app_state.clone()));
|
||||
|
||||
// Forward delivery-service packets into broadcast
|
||||
let forward_cancel = CancellationToken::new();
|
||||
{
|
||||
let forward_cancel = forward_cancel.clone();
|
||||
tokio::spawn(async move {
|
||||
info!("Forwarding delivery → broadcast started");
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = forward_cancel.cancelled() => break,
|
||||
res = rx.recv() => {
|
||||
match res {
|
||||
Ok(pkt) => { let _ = pubsub_tx.send(pkt); }
|
||||
Err(broadcast::error::RecvError::Lagged(_)) => continue,
|
||||
Err(broadcast::error::RecvError::Closed) => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Forwarding delivery → broadcast stopped");
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Bootstrap {
|
||||
core,
|
||||
cancel: forward_cancel,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn bootstrap_core_from_env() -> Result<Bootstrap<WakuDeliveryService>, BootstrapError> {
|
||||
let node_port = std::env::var("NODE_PORT")
|
||||
.map_err(|e| BootstrapError::EnvVar("NODE_PORT", e))?
|
||||
.parse::<u16>()?;
|
||||
|
||||
let peer_addresses =
|
||||
std::env::var("PEER_ADDRESSES").map_err(|e| BootstrapError::EnvVar("PEER_ADDRESSES", e))?;
|
||||
let peers = peer_addresses
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
bootstrap_core(BootstrapConfig { node_port, peers }).await
|
||||
}
|
||||
308
src/consensus/mod.rs
Normal file
308
src/consensus/mod.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
//! Consensus module implementing HashGraph-like consensus for distributed voting
|
||||
//!
|
||||
//! This module implements the consensus protocol described in the [RFC](https://github.com/vacp2p/rfc-index/blob/consensus-hashgraph-like/vac/raw/consensus-hashgraphlike.md)
|
||||
//!
|
||||
//! The consensus is designed to work with GossipSub-like networks and provides:
|
||||
//! - Proposal management
|
||||
//! - Vote collection and validation
|
||||
//! - Consensus reached detection
|
||||
use prost::Message;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::ConsensusError;
|
||||
use crate::protos::consensus::v1::{Outcome, Proposal, ProposalResult, Vote};
|
||||
use crate::LocalSigner;
|
||||
|
||||
pub mod service;
|
||||
pub use service::ConsensusService;
|
||||
|
||||
/// Consensus events emitted when consensus state changes
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConsensusEvent {
|
||||
/// Consensus has been reached for a proposal
|
||||
ConsensusReached { proposal_id: u32, result: bool },
|
||||
/// Consensus failed due to timeout or other reasons
|
||||
ConsensusFailed { proposal_id: u32, reason: String },
|
||||
}
|
||||
|
||||
/// Consensus configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConsensusConfig {
|
||||
/// Minimum number of votes required for consensus (as percentage of expected voters)
|
||||
pub consensus_threshold: f64,
|
||||
/// Timeout for consensus rounds in seconds
|
||||
pub consensus_timeout: u64,
|
||||
/// Maximum number of rounds before consensus is considered failed
|
||||
pub max_rounds: u32,
|
||||
/// Whether to use liveness criteria for silent peers
|
||||
pub liveness_criteria: bool,
|
||||
}
|
||||
|
||||
impl Default for ConsensusConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
consensus_threshold: 0.67, // 67% supermajority
|
||||
consensus_timeout: 10, // 10 seconds
|
||||
max_rounds: 3, // Maximum 3 rounds
|
||||
liveness_criteria: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Consensus state for a proposal
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ConsensusState {
|
||||
/// Proposal is active and accepting votes
|
||||
Active,
|
||||
/// Consensus has been reached
|
||||
ConsensusReached(bool), // true for yes, false for no
|
||||
/// Consensus failed (timeout or insufficient votes)
|
||||
Failed,
|
||||
/// Proposal has expired
|
||||
Expired,
|
||||
}
|
||||
|
||||
/// Consensus session for a specific proposal
|
||||
#[derive(Debug)]
|
||||
pub struct ConsensusSession {
|
||||
pub proposal: Proposal,
|
||||
pub state: ConsensusState,
|
||||
pub votes: HashMap<Vec<u8>, Vote>, // vote_owner -> Vote
|
||||
pub created_at: u64,
|
||||
pub config: ConsensusConfig,
|
||||
pub event_sender: broadcast::Sender<(String, ConsensusEvent)>,
|
||||
pub decisions_tx: broadcast::Sender<ProposalResult>,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl ConsensusSession {
|
||||
pub fn new(
|
||||
proposal: Proposal,
|
||||
config: ConsensusConfig,
|
||||
event_sender: broadcast::Sender<(String, ConsensusEvent)>,
|
||||
decisions_tx: broadcast::Sender<ProposalResult>,
|
||||
group_name: &str,
|
||||
) -> Self {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs();
|
||||
|
||||
Self {
|
||||
proposal,
|
||||
state: ConsensusState::Active,
|
||||
votes: HashMap::new(),
|
||||
created_at: now,
|
||||
config,
|
||||
event_sender,
|
||||
decisions_tx,
|
||||
group_name: group_name.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_consensus_threshold(&mut self, consensus_threshold: f64) {
|
||||
self.config.consensus_threshold = consensus_threshold
|
||||
}
|
||||
|
||||
/// Add a vote to the session
|
||||
pub fn add_vote(&mut self, vote: Vote) -> Result<(), ConsensusError> {
|
||||
match self.state {
|
||||
ConsensusState::Active => {
|
||||
// Check if voter already voted
|
||||
if self.votes.contains_key(&vote.vote_owner) {
|
||||
return Err(ConsensusError::DuplicateVote);
|
||||
}
|
||||
|
||||
// Add vote into the session and proposal
|
||||
self.votes.insert(vote.vote_owner.clone(), vote.clone());
|
||||
self.proposal.votes.push(vote.clone());
|
||||
|
||||
// Check if consensus can be reached after adding the vote
|
||||
self.check_consensus();
|
||||
Ok(())
|
||||
}
|
||||
ConsensusState::ConsensusReached(_) => {
|
||||
info!(
|
||||
"[mod::add_vote]: Consensus already reached for proposal {}, skipping vote",
|
||||
self.proposal.proposal_id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(ConsensusError::SessionNotActive),
|
||||
}
|
||||
}
|
||||
|
||||
/// Count the number of required votes to reach consensus
|
||||
fn count_required_votes(&self) -> usize {
|
||||
let expected_voters = self.proposal.expected_voters_count as usize;
|
||||
if expected_voters <= 2 {
|
||||
expected_voters
|
||||
} else {
|
||||
((expected_voters as f64) * self.config.consensus_threshold) as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if consensus has been reached
|
||||
///
|
||||
/// - `ConsensusReached(true)` if yes votes > no votes
|
||||
/// - `ConsensusReached(false)`
|
||||
/// - if no votes > yes votes
|
||||
/// - if no votes == yes votes and we have all votes
|
||||
/// - `Active`
|
||||
/// - if no votes == yes votes and we don't have all votes
|
||||
/// - if total votes < required votes (we wait for more votes)
|
||||
fn check_consensus(&mut self) {
|
||||
let total_votes = self.votes.len();
|
||||
let yes_votes = self.votes.values().filter(|v| v.vote).count();
|
||||
let no_votes = total_votes - yes_votes;
|
||||
|
||||
// Check if we have all expected votes (only calculate consensus immediately if ALL votes received)
|
||||
let expected_voters = self.proposal.expected_voters_count as usize;
|
||||
let required_votes = self.count_required_votes();
|
||||
// For <= 2 voters, we require all votes to reach consensus
|
||||
if total_votes >= required_votes {
|
||||
// All votes received - calculate consensus immediately
|
||||
if yes_votes > no_votes {
|
||||
self.state = ConsensusState::ConsensusReached(true);
|
||||
info!(
|
||||
"[mod::check_consensus]: Enough votes received {yes_votes}-{no_votes} - consensus reached: YES"
|
||||
);
|
||||
self.emit_consensus_event(ConsensusEvent::ConsensusReached {
|
||||
proposal_id: self.proposal.proposal_id,
|
||||
result: true,
|
||||
});
|
||||
} else if no_votes > yes_votes {
|
||||
self.state = ConsensusState::ConsensusReached(false);
|
||||
info!(
|
||||
"[mod::check_consensus]: Enough votes received {yes_votes}-{no_votes} - consensus reached: NO"
|
||||
);
|
||||
self.emit_consensus_event(ConsensusEvent::ConsensusReached {
|
||||
proposal_id: self.proposal.proposal_id,
|
||||
result: false,
|
||||
});
|
||||
} else {
|
||||
// Tie - if it's all votes, we reject the proposal
|
||||
if total_votes == expected_voters {
|
||||
self.state = ConsensusState::ConsensusReached(false);
|
||||
info!(
|
||||
"[mod::check_consensus]: All votes received and tie - consensus not reached"
|
||||
);
|
||||
self.emit_consensus_event(ConsensusEvent::ConsensusReached {
|
||||
proposal_id: self.proposal.proposal_id,
|
||||
result: false,
|
||||
});
|
||||
} else {
|
||||
// Tie - if it's not all votes, we wait for more votes
|
||||
self.state = ConsensusState::Active;
|
||||
info!(
|
||||
"[mod::check_consensus]: Not enough votes received - consensus not reached"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit a consensus event
|
||||
fn emit_consensus_event(&self, event: ConsensusEvent) {
|
||||
info!("[mod::emit_consensus_event]: Emitting consensus event: {event:?}");
|
||||
let _ = self
|
||||
.event_sender
|
||||
.send((self.group_name.clone(), event.clone()));
|
||||
let _ = self.decisions_tx.send(ProposalResult {
|
||||
group_id: self.group_name.clone(),
|
||||
proposal_id: self.proposal.proposal_id,
|
||||
outcome: Outcome::from(event) as i32,
|
||||
decided_at_ms: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
});
|
||||
}
|
||||
|
||||
/// Check if the session is still active
|
||||
pub fn is_active(&self) -> bool {
|
||||
matches!(self.state, ConsensusState::Active)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the hash of a vote
|
||||
pub fn compute_vote_hash(vote: &Vote) -> Vec<u8> {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(vote.vote_id.to_le_bytes());
|
||||
hasher.update(&vote.vote_owner);
|
||||
hasher.update(vote.proposal_id.to_le_bytes());
|
||||
hasher.update(vote.timestamp.to_le_bytes());
|
||||
hasher.update([vote.vote as u8]);
|
||||
hasher.update(&vote.parent_hash);
|
||||
hasher.update(&vote.received_hash);
|
||||
hasher.finalize().to_vec()
|
||||
}
|
||||
|
||||
/// Create a vote for an incoming proposal based on user's vote
|
||||
async fn create_vote_for_proposal<S: LocalSigner>(
|
||||
proposal: &Proposal,
|
||||
user_vote: bool,
|
||||
signer: S,
|
||||
) -> Result<Vote, ConsensusError> {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
|
||||
// Get the latest vote as parent and received hash
|
||||
let (parent_hash, received_hash) = if let Some(latest_vote) = proposal.votes.last() {
|
||||
// Check if we already voted (same voter)
|
||||
let is_same_voter = latest_vote.vote_owner == signer.address_bytes();
|
||||
if is_same_voter {
|
||||
// Same voter: parent_hash should be the hash of our previous vote
|
||||
(latest_vote.vote_hash.clone(), Vec::new())
|
||||
} else {
|
||||
// Different voter: parent_hash is empty, received_hash is the hash of the latest vote
|
||||
(Vec::new(), latest_vote.vote_hash.clone())
|
||||
}
|
||||
} else {
|
||||
(Vec::new(), Vec::new())
|
||||
};
|
||||
|
||||
// Create our vote with user's choice
|
||||
let mut vote = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: signer.address_bytes(),
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: now,
|
||||
vote: user_vote, // Use the user's actual vote choice
|
||||
parent_hash,
|
||||
received_hash,
|
||||
vote_hash: Vec::new(), // Will be computed below
|
||||
signature: Vec::new(), // Will be signed below
|
||||
};
|
||||
|
||||
// Compute vote hash and signature
|
||||
vote.vote_hash = compute_vote_hash(&vote);
|
||||
let vote_bytes = vote.encode_to_vec();
|
||||
vote.signature = signer
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.map_err(|e| ConsensusError::InvalidSignature(e.to_string()))?;
|
||||
|
||||
Ok(vote)
|
||||
}
|
||||
|
||||
/// Statistics about consensus sessions
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConsensusStats {
|
||||
pub total_sessions: usize,
|
||||
pub active_sessions: usize,
|
||||
pub consensus_reached: usize,
|
||||
pub failed_sessions: usize,
|
||||
}
|
||||
|
||||
impl Default for ConsensusService {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
703
src/consensus/service.rs
Normal file
703
src/consensus/service.rs
Normal file
@@ -0,0 +1,703 @@
|
||||
//! Consensus service for managing consensus sessions and HashGraph integration
|
||||
use prost::Message;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::consensus::{
|
||||
compute_vote_hash, create_vote_for_proposal, ConsensusConfig, ConsensusEvent, ConsensusSession,
|
||||
ConsensusState, ConsensusStats,
|
||||
};
|
||||
use crate::error::ConsensusError;
|
||||
use crate::protos::consensus::v1::{Proposal, ProposalResult, UpdateRequest, Vote};
|
||||
use crate::{verify_vote_hash, LocalSigner};
|
||||
|
||||
/// Consensus service that manages multiple consensus sessions for multiple groups
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ConsensusService {
|
||||
/// Active consensus sessions organized by group: group_name -> proposal_id -> session
|
||||
sessions: Arc<RwLock<HashMap<String, HashMap<u32, ConsensusSession>>>>,
|
||||
/// Maximum number of voting sessions to keep per group
|
||||
max_sessions_per_group: usize,
|
||||
/// Event sender for consensus events
|
||||
event_sender: broadcast::Sender<(String, ConsensusEvent)>,
|
||||
/// Event sender for consensus results for UI
|
||||
decisions_tx: broadcast::Sender<ProposalResult>,
|
||||
}
|
||||
|
||||
impl ConsensusService {
|
||||
/// Create a new consensus service
|
||||
pub fn new() -> Self {
|
||||
let (event_sender, _) = broadcast::channel(1000);
|
||||
let (decisions_tx, _) = broadcast::channel(128);
|
||||
Self {
|
||||
sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_sessions_per_group: 10,
|
||||
event_sender,
|
||||
decisions_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new consensus service with custom max sessions per group
|
||||
pub fn new_with_max_sessions(max_sessions_per_group: usize) -> Self {
|
||||
let (event_sender, _) = broadcast::channel(1000);
|
||||
let (decisions_tx, _) = broadcast::channel(128);
|
||||
Self {
|
||||
sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_sessions_per_group,
|
||||
event_sender,
|
||||
decisions_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Subscribe to consensus events
|
||||
pub fn subscribe_to_events(&self) -> broadcast::Receiver<(String, ConsensusEvent)> {
|
||||
self.event_sender.subscribe()
|
||||
}
|
||||
|
||||
/// Subscribe to consensus decisions
|
||||
pub fn subscribe_decisions(&self) -> broadcast::Receiver<ProposalResult> {
|
||||
self.decisions_tx.subscribe()
|
||||
}
|
||||
|
||||
// /// Send consensus decision to UI
|
||||
// pub fn send_decision(&self, res: ProposalResult) {
|
||||
// let _ = self.decisions_tx.send(res);
|
||||
// }
|
||||
|
||||
pub async fn set_consensus_threshold_for_group_session(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
consensus_threshold: f64,
|
||||
) -> Result<(), ConsensusError> {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.entry(group_name.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
let session = group_sessions
|
||||
.get_mut(&proposal_id)
|
||||
.ok_or(ConsensusError::SessionNotFound)?;
|
||||
|
||||
session.set_consensus_threshold(consensus_threshold);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn create_proposal(
|
||||
&self,
|
||||
group_name: &str,
|
||||
name: String,
|
||||
group_requests: Vec<UpdateRequest>,
|
||||
proposal_owner: Vec<u8>,
|
||||
expected_voters_count: u32,
|
||||
expiration_time: u64,
|
||||
liveness_criteria_yes: bool,
|
||||
) -> Result<Proposal, ConsensusError> {
|
||||
let proposal_id = Uuid::new_v4().as_u128() as u32;
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
let config = ConsensusConfig::default();
|
||||
|
||||
// Create proposal with steward's vote
|
||||
let proposal = Proposal {
|
||||
name,
|
||||
group_requests,
|
||||
proposal_id,
|
||||
proposal_owner,
|
||||
votes: vec![],
|
||||
expected_voters_count,
|
||||
round: 1,
|
||||
timestamp: now,
|
||||
expiration_time: now + expiration_time,
|
||||
liveness_criteria_yes,
|
||||
};
|
||||
|
||||
// Create consensus session
|
||||
|
||||
let session = ConsensusSession::new(
|
||||
proposal.clone(),
|
||||
config.clone(),
|
||||
self.event_sender.clone(),
|
||||
self.decisions_tx.clone(),
|
||||
group_name,
|
||||
);
|
||||
|
||||
// Get timeout from session config before adding to sessions
|
||||
let timeout_seconds = config.consensus_timeout;
|
||||
|
||||
// Add session to group and handle cleanup in a single lock operation
|
||||
{
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.entry(group_name.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
self.insert_session(group_sessions, proposal_id, session);
|
||||
}
|
||||
|
||||
// Start automatic timeout handling for this proposal using session config
|
||||
let self_clone = self.clone();
|
||||
let group_name_owned = group_name.to_string();
|
||||
tokio::spawn(async move {
|
||||
let timeout_duration = std::time::Duration::from_secs(timeout_seconds);
|
||||
tokio::time::sleep(timeout_duration).await;
|
||||
|
||||
if self_clone
|
||||
.get_consensus_result(&group_name_owned, proposal_id)
|
||||
.await
|
||||
.is_some()
|
||||
{
|
||||
info!(
|
||||
"[create_proposal]:Consensus result already exists for proposal {proposal_id}, skipping timeout"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Apply timeout consensus if still active
|
||||
if self_clone
|
||||
.handle_consensus_timeout(&group_name_owned, proposal_id)
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
info!(
|
||||
"[create_proposal]: Automatic timeout applied for proposal {proposal_id} after {timeout_seconds}s"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(proposal)
|
||||
}
|
||||
|
||||
/// Create a new proposal with steward's vote attached
|
||||
pub async fn vote_on_proposal<S: LocalSigner>(
|
||||
&self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
steward_vote: bool,
|
||||
signer: S,
|
||||
) -> Result<Proposal, ConsensusError> {
|
||||
let vote_id = Uuid::new_v4().as_u128() as u32;
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)?
|
||||
.as_secs();
|
||||
|
||||
// Create steward's vote first
|
||||
let steward_vote_obj = Vote {
|
||||
vote_id,
|
||||
vote_owner: signer.address_bytes(),
|
||||
proposal_id,
|
||||
timestamp: now,
|
||||
vote: steward_vote,
|
||||
parent_hash: Vec::new(), // First vote, no parent
|
||||
received_hash: Vec::new(), // First vote, no received
|
||||
vote_hash: Vec::new(), // Will be computed below
|
||||
signature: Vec::new(), // Will be signed below
|
||||
};
|
||||
|
||||
// Compute vote hash and signature for steward's vote
|
||||
let mut steward_vote_obj = steward_vote_obj;
|
||||
steward_vote_obj.vote_hash = compute_vote_hash(&steward_vote_obj);
|
||||
let vote_bytes = steward_vote_obj.encode_to_vec();
|
||||
steward_vote_obj.signature = signer
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.map_err(|e| ConsensusError::InvalidSignature(e.to_string()))?;
|
||||
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.entry(group_name.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
let session = group_sessions
|
||||
.get_mut(&proposal_id)
|
||||
.ok_or(ConsensusError::SessionNotFound)?;
|
||||
|
||||
session.add_vote(steward_vote_obj.clone())?;
|
||||
|
||||
Ok(session.proposal.clone())
|
||||
}
|
||||
|
||||
/// 1. Check the signatures of the each votes in proposal, in particular for proposal P_1,
|
||||
/// verify the signature of V_1 where V_1 = P_1.votes\[0\] with V_1.signature and V_1.vote_owner
|
||||
/// 2. Do parent_hash check: If there are repeated votes from the same sender,
|
||||
/// check that the hash of the former vote is equal to the parent_hash of the later vote.
|
||||
/// 3. Do received_hash check: If there are multiple votes in a proposal,
|
||||
/// check that the hash of a vote is equal to the received_hash of the next one.
|
||||
pub fn validate_proposal(&self, proposal: &Proposal) -> Result<(), ConsensusError> {
|
||||
// Validate each vote individually first
|
||||
for vote in proposal.votes.iter() {
|
||||
self.validate_vote(vote, proposal.expiration_time)?;
|
||||
}
|
||||
|
||||
// Validate vote chain integrity according to RFC
|
||||
self.validate_vote_chain(&proposal.votes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_vote(&self, vote: &Vote, expiration_time: u64) -> Result<(), ConsensusError> {
|
||||
if vote.vote_owner.is_empty() {
|
||||
return Err(ConsensusError::EmptyVoteOwner);
|
||||
}
|
||||
|
||||
if vote.vote_hash.is_empty() {
|
||||
return Err(ConsensusError::EmptyVoteHash);
|
||||
}
|
||||
|
||||
if vote.signature.is_empty() {
|
||||
return Err(ConsensusError::EmptySignature);
|
||||
}
|
||||
|
||||
let expected_hash = compute_vote_hash(vote);
|
||||
if vote.vote_hash != expected_hash {
|
||||
return Err(ConsensusError::InvalidVoteHash);
|
||||
}
|
||||
|
||||
// Encode vote without signature to verify signature
|
||||
let mut vote_copy = vote.clone();
|
||||
vote_copy.signature = Vec::new();
|
||||
let vote_copy_bytes = vote_copy.encode_to_vec();
|
||||
|
||||
// Validate signature
|
||||
let verified = verify_vote_hash(&vote.signature, &vote.vote_owner, &vote_copy_bytes)?;
|
||||
|
||||
if !verified {
|
||||
return Err(ConsensusError::InvalidVoteSignature);
|
||||
}
|
||||
|
||||
let now = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs();
|
||||
|
||||
// Check that vote timestamp is not in the future
|
||||
if vote.timestamp > now {
|
||||
return Err(ConsensusError::InvalidVoteTimestamp);
|
||||
}
|
||||
|
||||
// Check that vote timestamp is within expiration threshold
|
||||
if now - vote.timestamp > expiration_time {
|
||||
return Err(ConsensusError::VoteExpired);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate vote chain integrity according to RFC specification
|
||||
fn validate_vote_chain(&self, votes: &[Vote]) -> Result<(), ConsensusError> {
|
||||
if votes.len() <= 1 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for i in 0..votes.len() - 1 {
|
||||
let current_vote = &votes[i];
|
||||
let next_vote = &votes[i + 1];
|
||||
|
||||
// RFC requirement: received_hash of next vote should equal hash of current vote
|
||||
if current_vote.vote_hash != next_vote.received_hash {
|
||||
return Err(ConsensusError::ReceivedHashMismatch);
|
||||
}
|
||||
|
||||
// RFC requirement: if same voter, parent_hash should equal hash of previous vote
|
||||
if current_vote.vote_owner == next_vote.vote_owner
|
||||
&& current_vote.vote_hash != next_vote.parent_hash
|
||||
{
|
||||
return Err(ConsensusError::ParentHashMismatch);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn insert_session(
|
||||
&self,
|
||||
group_sessions: &mut HashMap<u32, ConsensusSession>,
|
||||
proposal_id: u32,
|
||||
session: ConsensusSession,
|
||||
) {
|
||||
group_sessions.insert(proposal_id, session);
|
||||
self.prune_sessions(group_sessions);
|
||||
}
|
||||
|
||||
fn prune_sessions(&self, group_sessions: &mut HashMap<u32, ConsensusSession>) {
|
||||
if group_sessions.len() <= self.max_sessions_per_group {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut session_entries: Vec<_> = group_sessions.drain().collect();
|
||||
session_entries.sort_by(|a, b| b.1.created_at.cmp(&a.1.created_at));
|
||||
|
||||
for (proposal_id, session) in session_entries
|
||||
.into_iter()
|
||||
.take(self.max_sessions_per_group)
|
||||
{
|
||||
group_sessions.insert(proposal_id, session);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process incoming proposal message
|
||||
pub async fn process_incoming_proposal(
|
||||
&self,
|
||||
group_name: &str,
|
||||
proposal: Proposal,
|
||||
) -> Result<(), ConsensusError> {
|
||||
info!(
|
||||
"[service::process_incoming_proposal]: Processing incoming proposal for group {group_name}"
|
||||
);
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.entry(group_name.to_string())
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
// Check if proposal already exists
|
||||
if group_sessions.contains_key(&proposal.proposal_id) {
|
||||
return Err(ConsensusError::ProposalAlreadyExist);
|
||||
}
|
||||
|
||||
// Validate proposal including vote chain integrity
|
||||
self.validate_proposal(&proposal)?;
|
||||
|
||||
// Create new session without our vote - user will vote later
|
||||
let mut session = ConsensusSession::new(
|
||||
proposal.clone(),
|
||||
ConsensusConfig::default(),
|
||||
self.event_sender.clone(),
|
||||
self.decisions_tx.clone(),
|
||||
group_name,
|
||||
);
|
||||
|
||||
session.add_vote(proposal.votes[0].clone())?;
|
||||
self.insert_session(group_sessions, proposal.proposal_id, session);
|
||||
|
||||
info!("[service::process_incoming_proposal]: Proposal stored, waiting for user vote");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process user vote for a proposal
|
||||
pub async fn process_user_vote<S: LocalSigner>(
|
||||
&self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
user_vote: bool,
|
||||
signer: S,
|
||||
) -> Result<Vote, ConsensusError> {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.get_mut(group_name)
|
||||
.ok_or(ConsensusError::GroupNotFound)?;
|
||||
|
||||
let session = group_sessions
|
||||
.get_mut(&proposal_id)
|
||||
.ok_or(ConsensusError::SessionNotFound)?;
|
||||
|
||||
// Check if user already voted
|
||||
let user_address = signer.address_bytes();
|
||||
if session.votes.values().any(|v| v.vote_owner == user_address) {
|
||||
return Err(ConsensusError::UserAlreadyVoted);
|
||||
}
|
||||
|
||||
// Create our vote based on the user's choice
|
||||
let our_vote = create_vote_for_proposal(&session.proposal, user_vote, signer).await?;
|
||||
|
||||
session.add_vote(our_vote.clone())?;
|
||||
|
||||
Ok(our_vote)
|
||||
}
|
||||
|
||||
/// Process incoming vote
|
||||
pub async fn process_incoming_vote(
|
||||
&self,
|
||||
group_name: &str,
|
||||
vote: Vote,
|
||||
) -> Result<(), ConsensusError> {
|
||||
info!("[service::process_incoming_vote]: Processing incoming vote for group {group_name}");
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let group_sessions = sessions
|
||||
.get_mut(group_name)
|
||||
.ok_or(ConsensusError::GroupNotFound)?;
|
||||
|
||||
let session = group_sessions
|
||||
.get_mut(&vote.proposal_id)
|
||||
.ok_or(ConsensusError::SessionNotFound)?;
|
||||
|
||||
self.validate_vote(&vote, session.proposal.expiration_time)?;
|
||||
|
||||
// Add vote to session
|
||||
session.add_vote(vote.clone())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get liveness criteria for a proposal
|
||||
pub async fn get_proposal_liveness_criteria(
|
||||
&self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
) -> Option<bool> {
|
||||
let sessions = self.sessions.read().await;
|
||||
if let Some(group_sessions) = sessions.get(group_name) {
|
||||
if let Some(session) = group_sessions.get(&proposal_id) {
|
||||
return Some(session.proposal.liveness_criteria_yes);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Get consensus result for a proposal
|
||||
pub async fn get_consensus_result(&self, group_name: &str, proposal_id: u32) -> Option<bool> {
|
||||
let sessions = self.sessions.read().await;
|
||||
if let Some(group_sessions) = sessions.get(group_name) {
|
||||
if let Some(session) = group_sessions.get(&proposal_id) {
|
||||
match session.state {
|
||||
ConsensusState::ConsensusReached(result) => Some(result),
|
||||
_ => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get active proposals for a specific group
|
||||
pub async fn get_active_proposals(&self, group_name: &str) -> Vec<Proposal> {
|
||||
let sessions = self.sessions.read().await;
|
||||
if let Some(group_sessions) = sessions.get(group_name) {
|
||||
group_sessions
|
||||
.values()
|
||||
.filter(|session| session.is_active())
|
||||
.map(|session| session.proposal.clone())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean up expired sessions for all groups
|
||||
pub async fn cleanup_expired_sessions(&self) {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs();
|
||||
|
||||
let group_names: Vec<String> = sessions.keys().cloned().collect();
|
||||
|
||||
for group_name in group_names {
|
||||
if let Some(group_sessions) = sessions.get_mut(&group_name) {
|
||||
group_sessions.retain(|_, session| {
|
||||
now <= session.proposal.expiration_time && session.is_active()
|
||||
});
|
||||
|
||||
// Clean up old sessions if we exceed the limit
|
||||
if group_sessions.len() > self.max_sessions_per_group {
|
||||
// Sort sessions by creation time and keep the most recent ones
|
||||
let mut session_entries: Vec<_> = group_sessions.drain().collect();
|
||||
session_entries.sort_by(|a, b| b.1.created_at.cmp(&a.1.created_at));
|
||||
|
||||
// Keep only the most recent sessions
|
||||
for (proposal_id, session) in session_entries
|
||||
.into_iter()
|
||||
.take(self.max_sessions_per_group)
|
||||
{
|
||||
group_sessions.insert(proposal_id, session);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get session statistics for a specific group
|
||||
pub async fn get_group_stats(&self, group_name: &str) -> ConsensusStats {
|
||||
let sessions = self.sessions.read().await;
|
||||
if let Some(group_sessions) = sessions.get(group_name) {
|
||||
let total_sessions = group_sessions.len();
|
||||
let active_sessions = group_sessions.values().filter(|s| s.is_active()).count();
|
||||
let consensus_reached = group_sessions
|
||||
.values()
|
||||
.filter(|s| matches!(s.state, ConsensusState::ConsensusReached(_)))
|
||||
.count();
|
||||
|
||||
ConsensusStats {
|
||||
total_sessions,
|
||||
active_sessions,
|
||||
consensus_reached,
|
||||
failed_sessions: total_sessions - active_sessions - consensus_reached,
|
||||
}
|
||||
} else {
|
||||
ConsensusStats {
|
||||
total_sessions: 0,
|
||||
active_sessions: 0,
|
||||
consensus_reached: 0,
|
||||
failed_sessions: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get overall session statistics across all groups
|
||||
pub async fn get_overall_stats(&self) -> ConsensusStats {
|
||||
let sessions = self.sessions.read().await;
|
||||
let mut total_sessions = 0;
|
||||
let mut active_sessions = 0;
|
||||
let mut consensus_reached = 0;
|
||||
|
||||
for group_sessions in sessions.values() {
|
||||
total_sessions += group_sessions.len();
|
||||
active_sessions += group_sessions.values().filter(|s| s.is_active()).count();
|
||||
consensus_reached += group_sessions
|
||||
.values()
|
||||
.filter(|s| matches!(s.state, ConsensusState::ConsensusReached(_)))
|
||||
.count();
|
||||
}
|
||||
|
||||
ConsensusStats {
|
||||
total_sessions,
|
||||
active_sessions,
|
||||
consensus_reached,
|
||||
failed_sessions: total_sessions - active_sessions - consensus_reached,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all group names that have active sessions
|
||||
pub async fn get_active_groups(&self) -> Vec<String> {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions
|
||||
.iter()
|
||||
.filter(|(_, group_sessions)| {
|
||||
group_sessions.values().any(|session| session.is_active())
|
||||
})
|
||||
.map(|(group_name, _)| group_name.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Remove all sessions for a specific group
|
||||
pub async fn remove_group_sessions(&self, group_name: &str) {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.remove(group_name);
|
||||
}
|
||||
|
||||
/// Check if we have enough votes for consensus (2n/3 threshold)
|
||||
pub async fn has_sufficient_votes(&self, group_name: &str, proposal_id: u32) -> bool {
|
||||
let sessions = self.sessions.read().await;
|
||||
|
||||
if let Some(group_sessions) = sessions.get(group_name) {
|
||||
if let Some(session) = group_sessions.get(&proposal_id) {
|
||||
let total_votes = session.votes.len() as u32;
|
||||
let expected_voters = session.proposal.expected_voters_count;
|
||||
self.check_sufficient_votes(
|
||||
total_votes,
|
||||
expected_voters,
|
||||
session.config.consensus_threshold,
|
||||
)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle consensus when timeout is reached
|
||||
pub async fn handle_consensus_timeout(
|
||||
&self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
) -> Result<bool, ConsensusError> {
|
||||
// First, check if consensus was already reached to avoid unnecessary work
|
||||
let mut sessions = self.sessions.write().await;
|
||||
if let Some(group_sessions) = sessions.get_mut(group_name) {
|
||||
if let Some(session) = group_sessions.get_mut(&proposal_id) {
|
||||
// Check if consensus was already reached
|
||||
match session.state {
|
||||
crate::consensus::ConsensusState::ConsensusReached(result) => {
|
||||
info!("[handle_consensus_timeout]: Consensus already reached for proposal {proposal_id}, skipping timeout");
|
||||
Ok(result)
|
||||
}
|
||||
_ => {
|
||||
// Calculate consensus result
|
||||
let total_votes = session.votes.len() as u32;
|
||||
let expected_voters = session.proposal.expected_voters_count;
|
||||
let result = if self.check_sufficient_votes(
|
||||
total_votes,
|
||||
expected_voters,
|
||||
session.config.consensus_threshold,
|
||||
) {
|
||||
// We have sufficient votes (2n/3) - calculate result based on votes
|
||||
self.calculate_consensus_result(
|
||||
&session.votes,
|
||||
session.proposal.liveness_criteria_yes,
|
||||
)
|
||||
} else {
|
||||
// Insufficient votes - apply liveness criteria
|
||||
session.proposal.liveness_criteria_yes
|
||||
};
|
||||
|
||||
// Apply timeout consensus
|
||||
session.state = crate::consensus::ConsensusState::ConsensusReached(result);
|
||||
info!("[handle_consensus_timeout]: Timeout consensus applied for proposal {proposal_id}: {result} (liveness criteria)");
|
||||
|
||||
// Emit consensus event
|
||||
session.emit_consensus_event(
|
||||
crate::consensus::ConsensusEvent::ConsensusReached {
|
||||
proposal_id,
|
||||
result,
|
||||
},
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(ConsensusError::SessionNotFound)
|
||||
}
|
||||
} else {
|
||||
Err(ConsensusError::SessionNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper method to calculate required votes for consensus
|
||||
fn calculate_required_votes(&self, expected_voters: u32, consensus_threshold: f64) -> u32 {
|
||||
if expected_voters == 1 || expected_voters == 2 {
|
||||
expected_voters
|
||||
} else {
|
||||
((expected_voters as f64) * consensus_threshold) as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper method to check if sufficient votes exist for consensus
|
||||
fn check_sufficient_votes(
|
||||
&self,
|
||||
total_votes: u32,
|
||||
expected_voters: u32,
|
||||
consensus_threshold: f64,
|
||||
) -> bool {
|
||||
let required_votes = self.calculate_required_votes(expected_voters, consensus_threshold);
|
||||
println!(
|
||||
"[service::check_sufficient_votes]: Total votes: {total_votes}, Expected voters: {expected_voters}, Consensus threshold: {consensus_threshold}, Required votes: {required_votes}"
|
||||
);
|
||||
total_votes >= required_votes
|
||||
}
|
||||
|
||||
/// Helper method to calculate consensus result based on votes
|
||||
fn calculate_consensus_result(
|
||||
&self,
|
||||
votes: &HashMap<Vec<u8>, Vote>,
|
||||
liveness_criteria_yes: bool,
|
||||
) -> bool {
|
||||
let total_votes = votes.len() as u32;
|
||||
let yes_votes = votes.values().filter(|v| v.vote).count() as u32;
|
||||
let no_votes = total_votes - yes_votes;
|
||||
|
||||
if yes_votes > no_votes {
|
||||
true
|
||||
} else if no_votes > yes_votes {
|
||||
false
|
||||
} else {
|
||||
// Tie - apply liveness criteria
|
||||
liveness_criteria_yes
|
||||
}
|
||||
}
|
||||
}
|
||||
189
src/error.rs
Normal file
189
src/error.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
use alloy::signers::local::LocalSignerError;
|
||||
use openmls::group::WelcomeError;
|
||||
use openmls::{
|
||||
framing::errors::MlsMessageError,
|
||||
group::ProposeRemoveMemberError,
|
||||
prelude::{
|
||||
CommitToPendingProposalsError, CreateMessageError, MergeCommitError,
|
||||
MergePendingCommitError, NewGroupError, ProcessMessageError, ProposeAddMemberError,
|
||||
},
|
||||
};
|
||||
use openmls_rust_crypto::MemoryStorageError;
|
||||
use std::env::VarError;
|
||||
use std::num::ParseIntError;
|
||||
use std::string::FromUtf8Error;
|
||||
|
||||
use ds::DeliveryServiceError;
|
||||
use mls_crypto::error::IdentityError;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConsensusError {
|
||||
#[error(transparent)]
|
||||
MessageError(#[from] MessageError),
|
||||
|
||||
#[error("Verification failed")]
|
||||
InvalidVoteSignature,
|
||||
#[error("Duplicate vote")]
|
||||
DuplicateVote,
|
||||
#[error("Empty vote owner")]
|
||||
EmptyVoteOwner,
|
||||
#[error("Vote expired")]
|
||||
VoteExpired,
|
||||
#[error("Invalid vote hash")]
|
||||
InvalidVoteHash,
|
||||
#[error("Empty vote hash")]
|
||||
EmptyVoteHash,
|
||||
#[error("Received hash mismatch")]
|
||||
ReceivedHashMismatch,
|
||||
#[error("Parent hash mismatch")]
|
||||
ParentHashMismatch,
|
||||
#[error("Invalid vote timestamp")]
|
||||
InvalidVoteTimestamp,
|
||||
|
||||
#[error("Session not active")]
|
||||
SessionNotActive,
|
||||
#[error("Group not found")]
|
||||
GroupNotFound,
|
||||
#[error("Session not found")]
|
||||
SessionNotFound,
|
||||
|
||||
#[error("User already voted")]
|
||||
UserAlreadyVoted,
|
||||
|
||||
#[error("Proposal already exist in consensus service")]
|
||||
ProposalAlreadyExist,
|
||||
|
||||
#[error("Empty signature")]
|
||||
EmptySignature,
|
||||
#[error("Invalid signature: {0}")]
|
||||
InvalidSignature(String),
|
||||
|
||||
#[error("Failed to get current time")]
|
||||
FailedToGetCurrentTime(#[from] std::time::SystemTimeError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum MessageError {
|
||||
#[error("Failed to verify signature: {0}")]
|
||||
InvalidSignature(#[from] libsecp256k1::Error),
|
||||
#[error("JSON processing error: {0}")]
|
||||
InvalidJson(#[from] serde_json::Error),
|
||||
#[error("Failed to serialize or deserialize MLS message: {0}")]
|
||||
InvalidMlsMessage(#[from] MlsMessageError),
|
||||
#[error("Invalid alloy signature: {0}")]
|
||||
InvalidAlloySignature(#[from] alloy::primitives::SignatureError),
|
||||
#[error("Mismatched length: expected {expect}, got {actual}")]
|
||||
MismatchedLength { expect: usize, actual: usize },
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum GroupError {
|
||||
#[error(transparent)]
|
||||
MessageError(#[from] MessageError),
|
||||
#[error(transparent)]
|
||||
IdentityError(#[from] IdentityError),
|
||||
|
||||
#[error("Steward not set")]
|
||||
StewardNotSet,
|
||||
#[error("MLS group not initialized")]
|
||||
MlsGroupNotSet,
|
||||
#[error("Group still active")]
|
||||
GroupStillActive,
|
||||
#[error("Invalid state transition from {from} to {to}")]
|
||||
InvalidStateTransition { from: String, to: String },
|
||||
#[error("Empty proposals for current epoch")]
|
||||
EmptyProposals,
|
||||
#[error("Invalid state [{state}] to send message [{message_type}]")]
|
||||
InvalidStateToMessageSend { state: String, message_type: String },
|
||||
|
||||
#[error("Failed to decode hex address: {0}")]
|
||||
HexDecodeError(#[from] alloy::hex::FromHexError),
|
||||
#[error("Unable to create MLS group: {0}")]
|
||||
UnableToCreateGroup(#[from] NewGroupError<MemoryStorageError>),
|
||||
#[error("Unable to merge pending commit in MLS group: {0}")]
|
||||
UnableToMergePendingCommit(#[from] MergePendingCommitError<MemoryStorageError>),
|
||||
#[error("Unable to merge staged commit in MLS group: {0}")]
|
||||
UnableToMergeStagedCommit(#[from] MergeCommitError<MemoryStorageError>),
|
||||
#[error("Unable to process message: {0}")]
|
||||
InvalidProcessMessage(#[from] ProcessMessageError),
|
||||
#[error("Unable to encrypt MLS message: {0}")]
|
||||
UnableToEncryptMlsMessage(#[from] CreateMessageError),
|
||||
#[error("Unable to create proposal to add members: {0}")]
|
||||
UnableToCreateProposal(#[from] ProposeAddMemberError<MemoryStorageError>),
|
||||
#[error("Unable to create proposal to remove members: {0}")]
|
||||
UnableToCreateProposalToRemoveMembers(#[from] ProposeRemoveMemberError<MemoryStorageError>),
|
||||
#[error("Unable to revert commit to pending proposals: {0}")]
|
||||
UnableToRevertCommitToPendingProposals(
|
||||
#[from] CommitToPendingProposalsError<MemoryStorageError>,
|
||||
),
|
||||
#[error("Unable to store pending proposal: {0}")]
|
||||
UnableToStorePendingProposal(#[from] MemoryStorageError),
|
||||
#[error("Failed to serialize mls message: {0}")]
|
||||
MlsMessageError(#[from] MlsMessageError),
|
||||
#[error("Failed to decode app message: {0}")]
|
||||
AppMessageDecodeError(#[from] prost::DecodeError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum UserError {
|
||||
#[error(transparent)]
|
||||
DeliveryServiceError(#[from] DeliveryServiceError),
|
||||
#[error(transparent)]
|
||||
IdentityError(#[from] IdentityError),
|
||||
#[error(transparent)]
|
||||
GroupError(#[from] GroupError),
|
||||
#[error(transparent)]
|
||||
MessageError(#[from] MessageError),
|
||||
#[error(transparent)]
|
||||
ConsensusError(#[from] ConsensusError),
|
||||
|
||||
#[error("Group already exists")]
|
||||
GroupAlreadyExistsError,
|
||||
#[error("Group not found")]
|
||||
GroupNotFoundError,
|
||||
#[error("MLS group not initialized")]
|
||||
MlsGroupNotInitialized,
|
||||
#[error("Welcome message cannot be empty.")]
|
||||
EmptyWelcomeMessageError,
|
||||
#[error("Failed to extract welcome message")]
|
||||
FailedToExtractWelcomeMessage,
|
||||
#[error("Message verification failed")]
|
||||
MessageVerificationFailed,
|
||||
#[error("Invalid user action: {0}")]
|
||||
InvalidUserAction(String),
|
||||
#[error("Unknown content topic type: {0}")]
|
||||
UnknownContentTopicType(String),
|
||||
#[error("Invalid group state: {0}")]
|
||||
InvalidGroupState(String),
|
||||
#[error("No proposals found")]
|
||||
NoProposalsFound,
|
||||
#[error("Invalid app message type")]
|
||||
InvalidAppMessageType,
|
||||
|
||||
#[error("Failed to create staged join: {0}")]
|
||||
MlsWelcomeError(#[from] WelcomeError<MemoryStorageError>),
|
||||
#[error("UTF-8 parsing error: {0}")]
|
||||
Utf8ParsingError(#[from] FromUtf8Error),
|
||||
#[error("Failed to parse signer: {0}")]
|
||||
SignerParsingError(#[from] LocalSignerError),
|
||||
#[error("Failed to decode welcome message: {0}")]
|
||||
WelcomeMessageDecodeError(#[from] prost::DecodeError),
|
||||
#[error("Failed to deserialize mls message in: {0}")]
|
||||
MlsMessageInDeserializeError(#[from] openmls::prelude::Error),
|
||||
#[error("Failed to try into protocol message: {0}")]
|
||||
TryIntoProtocolMessageError(#[from] openmls::framing::errors::ProtocolMessageError),
|
||||
#[error("Failed to get current time")]
|
||||
FailedToGetCurrentTime(#[from] std::time::SystemTimeError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum BootstrapError {
|
||||
#[error("Failed to read env var {0}: {1}")]
|
||||
EnvVar(&'static str, #[source] VarError),
|
||||
|
||||
#[error("Failed to parse int: {0}")]
|
||||
ParseInt(#[from] ParseIntError),
|
||||
|
||||
#[error(transparent)]
|
||||
DeliveryServiceError(#[from] DeliveryServiceError),
|
||||
}
|
||||
793
src/group.rs
Normal file
793
src/group.rs
Normal file
@@ -0,0 +1,793 @@
|
||||
use alloy::hex;
|
||||
use openmls::{
|
||||
group::{GroupEpoch, GroupId, MlsGroup, MlsGroupCreateConfig},
|
||||
prelude::{
|
||||
ApplicationMessage, CredentialWithKey, KeyPackage, LeafNodeIndex, OpenMlsProvider,
|
||||
ProcessedMessageContent, ProtocolMessage,
|
||||
},
|
||||
};
|
||||
use openmls_basic_credential::SignatureKeyPair;
|
||||
use prost::Message;
|
||||
use std::{fmt::Display, sync::Arc};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tracing::{error, info};
|
||||
use uuid;
|
||||
|
||||
use crate::{
|
||||
error::GroupError,
|
||||
message::{message_types, MessageType},
|
||||
protos::{
|
||||
consensus::v1::{Proposal, RequestType, UpdateRequest, Vote},
|
||||
de_mls::messages::v1::{app_message, AppMessage, BatchProposalsMessage, WelcomeMessage},
|
||||
},
|
||||
state_machine::{GroupState, GroupStateMachine},
|
||||
steward::GroupUpdateRequest,
|
||||
};
|
||||
use ds::{transport::OutboundPacket, APP_MSG_SUBTOPIC, WELCOME_SUBTOPIC};
|
||||
use mls_crypto::{identity::normalize_wallet_address_str, openmls_provider::MlsProvider};
|
||||
|
||||
/// Represents the action to take after processing a group message or event.
|
||||
///
|
||||
/// This enum defines the possible outcomes when processing group-related operations,
|
||||
/// allowing the caller to determine the appropriate next steps.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum GroupAction {
|
||||
GroupAppMsg(AppMessage),
|
||||
GroupProposal(Proposal),
|
||||
GroupVote(Vote),
|
||||
LeaveGroup,
|
||||
DoNothing,
|
||||
}
|
||||
|
||||
impl Display for GroupAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GroupAction::GroupAppMsg(_) => write!(f, "Message will be printed to the app"),
|
||||
GroupAction::GroupProposal(_) => write!(f, "Get proposal for voting"),
|
||||
GroupAction::GroupVote(_) => write!(f, "Get vote for proposal"),
|
||||
GroupAction::LeaveGroup => write!(f, "User will leave the group"),
|
||||
GroupAction::DoNothing => write!(f, "Do Nothing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a group in the MLS-based messaging system.
|
||||
///
|
||||
/// The Group struct manages the lifecycle of an MLS group, including member management,
|
||||
/// proposal handling, and state transitions. It integrates with the state machine
|
||||
/// to enforce proper group operations and steward epoch management.
|
||||
///
|
||||
/// ## Key Features:
|
||||
/// - MLS group management and message processing
|
||||
/// - Steward epoch coordination and proposal handling
|
||||
/// - State machine integration for proper workflow enforcement
|
||||
/// - Member addition/removal through proposals
|
||||
/// - Message validation and permission checking
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Group {
|
||||
group_name: String,
|
||||
mls_group: Option<Arc<Mutex<MlsGroup>>>,
|
||||
is_kp_shared: bool,
|
||||
app_id: Vec<u8>,
|
||||
state_machine: Arc<RwLock<GroupStateMachine>>,
|
||||
}
|
||||
|
||||
impl Group {
|
||||
pub fn new(
|
||||
group_name: &str,
|
||||
is_creation: bool,
|
||||
provider: Option<&MlsProvider>,
|
||||
signer: Option<&SignatureKeyPair>,
|
||||
credential_with_key: Option<&CredentialWithKey>,
|
||||
) -> Result<Self, GroupError> {
|
||||
let uuid = uuid::Uuid::new_v4().as_bytes().to_vec();
|
||||
let mut group = Group {
|
||||
group_name: group_name.to_string(),
|
||||
mls_group: None,
|
||||
is_kp_shared: false,
|
||||
app_id: uuid.clone(),
|
||||
state_machine: if is_creation {
|
||||
Arc::new(RwLock::new(GroupStateMachine::new_with_steward()))
|
||||
} else {
|
||||
Arc::new(RwLock::new(GroupStateMachine::new()))
|
||||
},
|
||||
};
|
||||
|
||||
if is_creation {
|
||||
if let (Some(provider), Some(signer), Some(credential_with_key)) =
|
||||
(provider, signer, credential_with_key)
|
||||
{
|
||||
// Create a new MLS group instance
|
||||
let group_config = MlsGroupCreateConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
let mls_group = MlsGroup::new_with_group_id(
|
||||
provider,
|
||||
signer,
|
||||
&group_config,
|
||||
GroupId::from_slice(group_name.as_bytes()),
|
||||
credential_with_key.clone(),
|
||||
)?;
|
||||
group.mls_group = Some(Arc::new(Mutex::new(mls_group)));
|
||||
group.is_kp_shared = true;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(group)
|
||||
}
|
||||
|
||||
/// Get the identities of all current group members.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of member identity bytes
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::MlsGroupNotSet` if MLS group is not initialized
|
||||
pub async fn members_identity(&self) -> Result<Vec<Vec<u8>>, GroupError> {
|
||||
let mls_group = self
|
||||
.mls_group
|
||||
.as_ref()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await;
|
||||
let x = mls_group
|
||||
.members()
|
||||
.map(|m| m.credential.serialized_content().to_vec())
|
||||
.collect();
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
/// Find the leaf node index of a member by their identity.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `identity`: The member's identity bytes
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `Some(LeafNodeIndex)` if member is found, `None` otherwise
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::MlsGroupNotSet` if MLS group is not initialized
|
||||
pub async fn find_member_index(
|
||||
&self,
|
||||
identity: Vec<u8>,
|
||||
) -> Result<Option<LeafNodeIndex>, GroupError> {
|
||||
let mls_group = self
|
||||
.mls_group
|
||||
.as_ref()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await;
|
||||
let x = mls_group.members().find_map(|m| {
|
||||
if m.credential.serialized_content() == identity {
|
||||
Some(m.index)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
Ok(x)
|
||||
}
|
||||
|
||||
/// Get the current epoch of the MLS group.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Current group epoch
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::MlsGroupNotSet` if MLS group is not initialized
|
||||
pub async fn epoch(&self) -> Result<GroupEpoch, GroupError> {
|
||||
let mls_group = self
|
||||
.mls_group
|
||||
.as_ref()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await;
|
||||
Ok(mls_group.epoch())
|
||||
}
|
||||
|
||||
/// Set the MLS group instance for this group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `mls_group`: The MLS group instance to set
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Sets `is_kp_shared` to `true`
|
||||
/// - Stores the MLS group in an `Arc<Mutex<MlsGroup>>`
|
||||
pub fn set_mls_group(&mut self, mls_group: MlsGroup) -> Result<(), GroupError> {
|
||||
self.is_kp_shared = true;
|
||||
self.mls_group = Some(Arc::new(Mutex::new(mls_group)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the MLS group is initialized.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if MLS group is set, `false` otherwise
|
||||
pub fn is_mls_group_initialized(&self) -> bool {
|
||||
self.mls_group.is_some()
|
||||
}
|
||||
|
||||
/// Check if the key package has been shared.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if key package is shared, `false` otherwise
|
||||
pub fn is_kp_shared(&self) -> bool {
|
||||
self.is_kp_shared
|
||||
}
|
||||
|
||||
/// Set the key package shared status.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `is_kp_shared`: Whether the key package is shared
|
||||
pub fn set_kp_shared(&mut self, is_kp_shared: bool) {
|
||||
self.is_kp_shared = is_kp_shared;
|
||||
}
|
||||
|
||||
/// Check if this group has a steward configured.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if steward is configured, `false` otherwise
|
||||
pub async fn is_steward(&self) -> bool {
|
||||
self.state_machine.read().await.has_steward()
|
||||
}
|
||||
|
||||
/// Get the application ID for this group.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Reference to the application ID bytes
|
||||
pub fn app_id(&self) -> &[u8] {
|
||||
&self.app_id
|
||||
}
|
||||
|
||||
/// Get the group name as bytes.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Reference to the group name bytes
|
||||
pub fn group_name_bytes(&self) -> &[u8] {
|
||||
self.group_name.as_bytes()
|
||||
}
|
||||
|
||||
/// Generate a steward announcement message for this group.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Waku message containing the steward announcement
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::StewardNotSet` if no steward is configured
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Refreshes the steward's key pair
|
||||
/// - Creates a new group announcement
|
||||
pub async fn generate_steward_message(&mut self) -> Result<OutboundPacket, GroupError> {
|
||||
let mut state_machine = self.state_machine.write().await;
|
||||
let steward = state_machine
|
||||
.get_steward_mut()
|
||||
.ok_or(GroupError::StewardNotSet)?;
|
||||
steward.refresh_key_pair().await;
|
||||
|
||||
let welcome_msg: WelcomeMessage = steward.create_announcement().await.into();
|
||||
let msg_to_send = OutboundPacket::new(
|
||||
welcome_msg.encode_to_vec(),
|
||||
WELCOME_SUBTOPIC,
|
||||
&self.group_name,
|
||||
self.app_id(),
|
||||
);
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
|
||||
/// Decrypt a steward message using the group's steward key.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `message`: The encrypted message bytes
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Decrypted KeyPackage
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::StewardNotSet` if no steward is configured
|
||||
/// - Various decryption errors from the steward
|
||||
pub async fn decrypt_steward_msg(
|
||||
&mut self,
|
||||
message: Vec<u8>,
|
||||
) -> Result<KeyPackage, GroupError> {
|
||||
let state_machine = self.state_machine.read().await;
|
||||
let steward = state_machine
|
||||
.get_steward()
|
||||
.ok_or(GroupError::StewardNotSet)?;
|
||||
let msg: KeyPackage = steward.decrypt_message(message).await?;
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
/// Store an invite proposal in the steward queue for the current epoch.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `key_package`: The key package of the member to add
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Adds an AddMember proposal to the current epoch
|
||||
/// - Proposal will be processed in the next steward epoch
|
||||
/// - Returns a serialized `UiUpdateRequest` for UI notification
|
||||
pub async fn store_invite_proposal(
|
||||
&mut self,
|
||||
key_package: Box<KeyPackage>,
|
||||
) -> Result<UpdateRequest, GroupError> {
|
||||
let mut state_machine = self.state_machine.write().await;
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::AddMember(key_package.clone()))
|
||||
.await;
|
||||
|
||||
let wallet_bytes = key_package
|
||||
.leaf_node()
|
||||
.credential()
|
||||
.serialized_content()
|
||||
.to_vec();
|
||||
|
||||
Ok(UpdateRequest {
|
||||
request_type: RequestType::AddMember as i32,
|
||||
wallet_address: wallet_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Store a remove proposal in the steward queue for the current epoch.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `identity`: The identity string of the member to remove
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Returns a serialized `UiUpdateRequest` for UI notification
|
||||
/// - `GroupError::InvalidIdentity` if the identity is invalid
|
||||
pub async fn store_remove_proposal(
|
||||
&mut self,
|
||||
identity: String,
|
||||
) -> Result<UpdateRequest, GroupError> {
|
||||
let normalized_identity = normalize_wallet_address_str(&identity)?;
|
||||
let mut state_machine = self.state_machine.write().await;
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::RemoveMember(
|
||||
normalized_identity.clone(),
|
||||
))
|
||||
.await;
|
||||
|
||||
let wallet_bytes = hex::decode(
|
||||
normalized_identity
|
||||
.strip_prefix("0x")
|
||||
.unwrap_or(&normalized_identity),
|
||||
)?;
|
||||
|
||||
Ok(UpdateRequest {
|
||||
request_type: RequestType::RemoveMember as i32,
|
||||
wallet_address: wallet_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Process an application message and determine the appropriate action.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `message`: The application message to process
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `GroupAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - For ban requests from stewards: automatically adds remove proposals
|
||||
/// - For other messages: processes normally
|
||||
///
|
||||
/// ## Supported Message Types:
|
||||
/// - Conversation messages
|
||||
/// - Proposals
|
||||
/// - Votes
|
||||
/// - Ban requests
|
||||
pub async fn process_application_message(
|
||||
&mut self,
|
||||
message: ApplicationMessage,
|
||||
) -> Result<GroupAction, GroupError> {
|
||||
let app_msg = AppMessage::decode(message.into_bytes().as_slice())?;
|
||||
match app_msg.payload {
|
||||
Some(app_message::Payload::ConversationMessage(conversation_message)) => {
|
||||
info!("[process_application_message]: Processing conversation message");
|
||||
Ok(GroupAction::GroupAppMsg(conversation_message.into()))
|
||||
}
|
||||
Some(app_message::Payload::Proposal(proposal)) => {
|
||||
info!("[process_application_message]: Processing proposal message");
|
||||
Ok(GroupAction::GroupProposal(proposal))
|
||||
}
|
||||
Some(app_message::Payload::Vote(vote)) => {
|
||||
info!("[process_application_message]: Processing vote message");
|
||||
Ok(GroupAction::GroupVote(vote))
|
||||
}
|
||||
Some(app_message::Payload::BanRequest(ban_request)) => {
|
||||
info!("[process_application_message]: Processing ban request message");
|
||||
|
||||
if self.is_steward().await {
|
||||
info!(
|
||||
"[process_application_message]: Steward adding remove proposal for user {}",
|
||||
ban_request.user_to_ban.clone()
|
||||
);
|
||||
let _ = self
|
||||
.store_remove_proposal(ban_request.user_to_ban.clone())
|
||||
.await?;
|
||||
} else {
|
||||
info!(
|
||||
"[process_application_message]: Non-steward received ban request message"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(GroupAction::GroupAppMsg(ban_request.into()))
|
||||
}
|
||||
_ => Ok(GroupAction::DoNothing),
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a protocol message from the MLS group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `message`: The protocol message to process
|
||||
/// - `provider`: The MLS provider for processing
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `GroupAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Processes MLS group messages
|
||||
/// - Handles member removal scenarios
|
||||
/// - Stores pending proposals
|
||||
///
|
||||
/// ## Supported Message Types:
|
||||
/// - Application messages
|
||||
/// - Proposal messages
|
||||
/// - External join proposals
|
||||
/// - Staged commit messages
|
||||
pub async fn process_protocol_msg(
|
||||
&mut self,
|
||||
message: ProtocolMessage,
|
||||
provider: &MlsProvider,
|
||||
) -> Result<GroupAction, GroupError> {
|
||||
let group_id = message.group_id().as_slice();
|
||||
if group_id != self.group_name_bytes() {
|
||||
return Ok(GroupAction::DoNothing);
|
||||
}
|
||||
let mut mls_group = self
|
||||
.mls_group
|
||||
.as_ref()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await;
|
||||
// If the message is from a previous epoch, we don't need to process it and it's a commit for welcome message
|
||||
if message.epoch() < mls_group.epoch() && message.epoch() == 0.into() {
|
||||
return Ok(GroupAction::DoNothing);
|
||||
}
|
||||
|
||||
let processed_message = mls_group.process_message(provider, message)?;
|
||||
|
||||
match processed_message.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(application_message) => {
|
||||
drop(mls_group);
|
||||
self.process_application_message(application_message).await
|
||||
}
|
||||
ProcessedMessageContent::ProposalMessage(proposal_ptr) => {
|
||||
mls_group
|
||||
.store_pending_proposal(provider.storage(), proposal_ptr.as_ref().clone())?;
|
||||
Ok(GroupAction::DoNothing)
|
||||
}
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(_external_proposal_ptr) => {
|
||||
Ok(GroupAction::DoNothing)
|
||||
}
|
||||
ProcessedMessageContent::StagedCommitMessage(commit_ptr) => {
|
||||
let mut remove_proposal: bool = false;
|
||||
if commit_ptr.self_removed() {
|
||||
remove_proposal = true;
|
||||
}
|
||||
mls_group.merge_staged_commit(provider, *commit_ptr)?;
|
||||
if remove_proposal {
|
||||
if mls_group.is_active() {
|
||||
return Err(GroupError::GroupStillActive);
|
||||
}
|
||||
Ok(GroupAction::LeaveGroup)
|
||||
} else {
|
||||
Ok(GroupAction::DoNothing)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build and validate a message for sending to the group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `provider`: The MLS provider for message creation
|
||||
/// - `signer`: The signature key pair for signing
|
||||
/// - `msg`: The application message to build
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Waku message ready for transmission
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Validates message can be sent in current state
|
||||
/// - Creates MLS message with proper signing
|
||||
///
|
||||
/// ## Validation:
|
||||
/// - Checks state machine permissions
|
||||
/// - Ensures steward status and proposal availability
|
||||
pub async fn build_message(
|
||||
&mut self,
|
||||
provider: &MlsProvider,
|
||||
signer: &SignatureKeyPair,
|
||||
msg: &AppMessage,
|
||||
) -> Result<OutboundPacket, GroupError> {
|
||||
let is_steward = self.is_steward().await;
|
||||
let has_proposals = self.get_pending_proposals_count().await > 0;
|
||||
|
||||
let message_type = msg
|
||||
.payload
|
||||
.as_ref()
|
||||
.map(|p| p.message_type())
|
||||
.unwrap_or(message_types::UNKNOWN);
|
||||
|
||||
// Check if message can be sent in current state
|
||||
let state_machine = self.state_machine.read().await;
|
||||
let current_state = state_machine.current_state();
|
||||
if !state_machine.can_send_message_type(is_steward, has_proposals, message_type) {
|
||||
return Err(GroupError::InvalidStateToMessageSend {
|
||||
state: current_state.to_string(),
|
||||
message_type: message_type.to_string(),
|
||||
});
|
||||
}
|
||||
let message_out = self
|
||||
.mls_group
|
||||
.as_mut()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await
|
||||
.create_message(provider, signer, &msg.encode_to_vec())?
|
||||
.to_bytes()?;
|
||||
Ok(OutboundPacket::new(
|
||||
message_out,
|
||||
APP_MSG_SUBTOPIC,
|
||||
&self.group_name,
|
||||
self.app_id(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Get the current state of the group state machine.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Current `GroupState` of the group
|
||||
pub async fn get_state(&self) -> GroupState {
|
||||
self.state_machine.read().await.current_state()
|
||||
}
|
||||
|
||||
/// Get the number of pending proposals for the current epoch
|
||||
pub async fn get_pending_proposals_count(&self) -> usize {
|
||||
self.state_machine
|
||||
.read()
|
||||
.await
|
||||
.get_current_epoch_proposals_count()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get the current epoch proposals for UI display.
|
||||
pub async fn get_current_epoch_proposals(&self) -> Vec<GroupUpdateRequest> {
|
||||
self.state_machine
|
||||
.read()
|
||||
.await
|
||||
.get_current_epoch_proposals()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get the number of pending proposals for the voting epoch
|
||||
pub async fn get_voting_proposals_count(&self) -> usize {
|
||||
self.state_machine
|
||||
.read()
|
||||
.await
|
||||
.get_voting_epoch_proposals_count()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get the proposals for the voting epoch
|
||||
pub async fn get_proposals_for_voting_epoch(&self) -> Vec<GroupUpdateRequest> {
|
||||
self.state_machine
|
||||
.read()
|
||||
.await
|
||||
.get_voting_epoch_proposals()
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_proposals_for_voting_epoch_as_ui_update_requests(&self) -> Vec<UpdateRequest> {
|
||||
self.get_proposals_for_voting_epoch()
|
||||
.await
|
||||
.iter()
|
||||
.map(|p| p.clone().into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Start voting on proposals for the current epoch
|
||||
pub async fn start_voting(&mut self) -> Result<(), GroupError> {
|
||||
self.state_machine.write().await.start_voting()
|
||||
}
|
||||
|
||||
/// Complete voting and update state based on result
|
||||
pub async fn complete_voting(&mut self, vote_result: bool) -> Result<(), GroupError> {
|
||||
self.state_machine
|
||||
.write()
|
||||
.await
|
||||
.complete_voting(vote_result)
|
||||
}
|
||||
|
||||
/// Start working state (for non-steward peers after consensus or edge case recovery)
|
||||
pub async fn start_working(&mut self) {
|
||||
self.state_machine.write().await.start_working();
|
||||
}
|
||||
|
||||
/// Start consensus reached state (for non-steward peers after consensus)
|
||||
pub async fn start_consensus_reached(&mut self) {
|
||||
self.state_machine.write().await.start_consensus_reached();
|
||||
}
|
||||
|
||||
/// Start waiting state (for non-steward peers after consensus or edge case recovery)
|
||||
pub async fn start_waiting(&mut self) {
|
||||
self.state_machine.write().await.start_waiting();
|
||||
}
|
||||
|
||||
/// Start steward epoch with validation
|
||||
pub async fn start_steward_epoch_with_validation(&mut self) -> Result<usize, GroupError> {
|
||||
self.state_machine
|
||||
.write()
|
||||
.await
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Handle successful vote for group
|
||||
pub async fn handle_yes_vote(&mut self) -> Result<(), GroupError> {
|
||||
self.state_machine.write().await.handle_yes_vote().await
|
||||
}
|
||||
|
||||
/// Handle failed vote for group
|
||||
pub async fn handle_no_vote(&mut self) -> Result<(), GroupError> {
|
||||
self.state_machine.write().await.handle_no_vote().await
|
||||
}
|
||||
|
||||
/// Start waiting state when steward sends batch proposals after consensus
|
||||
pub async fn start_waiting_after_consensus(&mut self) -> Result<(), GroupError> {
|
||||
self.state_machine
|
||||
.write()
|
||||
.await
|
||||
.start_waiting_after_consensus()
|
||||
}
|
||||
|
||||
/// Create a batch proposals message and welcome message for the current epoch.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `provider`: The MLS provider for proposal creation
|
||||
/// - `signer`: The signature key pair for signing
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of Waku messages: [batch_proposals_msg, welcome_msg]
|
||||
/// - Welcome message is only included if there are new members to add
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must be a steward
|
||||
/// - Must have proposals in the voting epoch
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Creates MLS proposals for all pending group updates
|
||||
/// - Commits all proposals to the MLS group
|
||||
/// - Merges the commit to apply changes
|
||||
///
|
||||
/// ## Supported Proposal Types:
|
||||
/// - AddMember: Adds new member with key package
|
||||
/// - RemoveMember: Removes member by identity
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `GroupError::StewardNotSet` if not a steward
|
||||
/// - `GroupError::EmptyProposals` if no proposals exist
|
||||
/// - Various MLS processing errors
|
||||
pub async fn create_batch_proposals_message(
|
||||
&mut self,
|
||||
provider: &MlsProvider,
|
||||
signer: &SignatureKeyPair,
|
||||
) -> Result<Vec<OutboundPacket>, GroupError> {
|
||||
if !self.is_steward().await {
|
||||
return Err(GroupError::StewardNotSet);
|
||||
}
|
||||
|
||||
let proposals = self.get_proposals_for_voting_epoch().await;
|
||||
|
||||
if proposals.is_empty() {
|
||||
return Err(GroupError::EmptyProposals);
|
||||
}
|
||||
|
||||
let mut member_indices = Vec::new();
|
||||
for proposal in &proposals {
|
||||
if let GroupUpdateRequest::RemoveMember(identity) = proposal {
|
||||
// Convert the address string to bytes for proper MLS credential matching
|
||||
let identity_bytes = if let Some(hex_string) = identity.strip_prefix("0x") {
|
||||
// Remove 0x prefix and convert to bytes
|
||||
hex::decode(hex_string)?
|
||||
} else {
|
||||
// Assume it's already a hex string without 0x prefix
|
||||
hex::decode(identity)?
|
||||
};
|
||||
|
||||
let member_index = self.find_member_index(identity_bytes).await?;
|
||||
member_indices.push(member_index);
|
||||
} else {
|
||||
member_indices.push(None);
|
||||
}
|
||||
}
|
||||
let mut mls_proposals = Vec::new();
|
||||
let (out_messages, welcome) = {
|
||||
let mut mls_group = self
|
||||
.mls_group
|
||||
.as_mut()
|
||||
.ok_or_else(|| GroupError::MlsGroupNotSet)?
|
||||
.lock()
|
||||
.await;
|
||||
|
||||
// Convert each GroupUpdateRequest to MLS proposal
|
||||
for (i, proposal) in proposals.iter().enumerate() {
|
||||
match proposal {
|
||||
GroupUpdateRequest::AddMember(boxed_key_package) => {
|
||||
let (mls_message_out, _proposal_ref) = mls_group.propose_add_member(
|
||||
provider,
|
||||
signer,
|
||||
boxed_key_package.as_ref(),
|
||||
)?;
|
||||
mls_proposals.push(mls_message_out.to_bytes()?);
|
||||
}
|
||||
GroupUpdateRequest::RemoveMember(identity) => {
|
||||
if let Some(index) = member_indices[i] {
|
||||
let (mls_message_out, _proposal_ref) =
|
||||
mls_group.propose_remove_member(provider, signer, index)?;
|
||||
mls_proposals.push(mls_message_out.to_bytes()?);
|
||||
} else {
|
||||
error!("[create_batch_proposals_message]: Failed to find member index for identity: {identity}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create commit with all proposals
|
||||
let (out_messages, welcome, _group_info) =
|
||||
mls_group.commit_to_pending_proposals(provider, signer)?;
|
||||
|
||||
// Merge the commit
|
||||
mls_group.merge_pending_commit(provider)?;
|
||||
(out_messages, welcome)
|
||||
};
|
||||
// Create batch proposals message (without welcome)
|
||||
let batch_msg: AppMessage = BatchProposalsMessage {
|
||||
group_name: self.group_name_bytes().to_vec(),
|
||||
mls_proposals,
|
||||
commit_message: out_messages.to_bytes()?,
|
||||
}
|
||||
.into();
|
||||
|
||||
let batch_waku_msg = OutboundPacket::new(
|
||||
batch_msg.encode_to_vec(),
|
||||
APP_MSG_SUBTOPIC,
|
||||
&self.group_name,
|
||||
self.app_id(),
|
||||
);
|
||||
|
||||
let mut messages = vec![batch_waku_msg];
|
||||
|
||||
// Create separate welcome message if there are new members
|
||||
if let Some(welcome) = welcome {
|
||||
let welcome_msg: WelcomeMessage = welcome.try_into()?;
|
||||
let welcome_waku_msg = OutboundPacket::new(
|
||||
welcome_msg.encode_to_vec(),
|
||||
WELCOME_SUBTOPIC,
|
||||
&self.group_name,
|
||||
self.app_id(),
|
||||
);
|
||||
messages.push(welcome_waku_msg);
|
||||
}
|
||||
|
||||
Ok(messages)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Group {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "Group: {:#?}", self.group_name)
|
||||
}
|
||||
}
|
||||
@@ -1,359 +0,0 @@
|
||||
use alloy::hex;
|
||||
use chrono::Utc;
|
||||
use ds::{
|
||||
ds_waku::{APP_MSG_SUBTOPIC, COMMIT_MSG_SUBTOPIC, WELCOME_SUBTOPIC},
|
||||
waku_actor::ProcessMessageToSend,
|
||||
};
|
||||
use kameo::Actor;
|
||||
use libsecp256k1::{PublicKey, SecretKey};
|
||||
use openmls::{group::*, prelude::*};
|
||||
use openmls_basic_credential::SignatureKeyPair;
|
||||
use std::{fmt::Display, sync::Arc};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::*;
|
||||
use mls_crypto::openmls_provider::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum GroupAction {
|
||||
MessageToPrint(MessageToPrint),
|
||||
RemoveGroup,
|
||||
DoNothing,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Actor)]
|
||||
pub struct Group {
|
||||
group_name: String,
|
||||
mls_group: Option<Arc<Mutex<MlsGroup>>>,
|
||||
admin: Option<Admin>,
|
||||
is_kp_shared: bool,
|
||||
app_id: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Group {
|
||||
pub fn new(
|
||||
group_name: String,
|
||||
is_creation: bool,
|
||||
provider: Option<&MlsCryptoProvider>,
|
||||
signer: Option<&SignatureKeyPair>,
|
||||
credential_with_key: Option<&CredentialWithKey>,
|
||||
) -> Result<Self, GroupError> {
|
||||
let uuid = uuid::Uuid::new_v4().as_bytes().to_vec();
|
||||
if is_creation {
|
||||
let group_id = group_name.as_bytes();
|
||||
// Create a new MLS group instance
|
||||
let group_config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
let mls_group = MlsGroup::new_with_group_id(
|
||||
provider.unwrap(),
|
||||
signer.unwrap(),
|
||||
&group_config,
|
||||
GroupId::from_slice(group_id),
|
||||
credential_with_key.unwrap().clone(),
|
||||
)?;
|
||||
Ok(Group {
|
||||
group_name,
|
||||
mls_group: Some(Arc::new(Mutex::new(mls_group))),
|
||||
admin: Some(Admin::new()),
|
||||
is_kp_shared: true,
|
||||
app_id: uuid.clone(),
|
||||
})
|
||||
} else {
|
||||
Ok(Group {
|
||||
group_name,
|
||||
mls_group: None,
|
||||
admin: None,
|
||||
is_kp_shared: false,
|
||||
app_id: uuid.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn members_identity(&self) -> Vec<String> {
|
||||
let mls_group = self.mls_group.as_ref().unwrap().lock().await;
|
||||
mls_group
|
||||
.members()
|
||||
.map(|m| hex::encode(m.credential.identity()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn set_mls_group(&mut self, mls_group: MlsGroup) -> Result<(), GroupError> {
|
||||
self.is_kp_shared = true;
|
||||
self.mls_group = Some(Arc::new(Mutex::new(mls_group)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_mls_group_initialized(&self) -> bool {
|
||||
self.mls_group.is_some()
|
||||
}
|
||||
|
||||
pub fn is_kp_shared(&self) -> bool {
|
||||
self.is_kp_shared
|
||||
}
|
||||
|
||||
pub fn set_kp_shared(&mut self, is_kp_shared: bool) {
|
||||
self.is_kp_shared = is_kp_shared;
|
||||
}
|
||||
|
||||
pub fn is_admin(&self) -> bool {
|
||||
self.admin.is_some()
|
||||
}
|
||||
|
||||
pub fn app_id(&self) -> Vec<u8> {
|
||||
self.app_id.clone()
|
||||
}
|
||||
|
||||
pub fn decrypt_admin_msg(&self, message: Vec<u8>) -> Result<KeyPackage, GroupError> {
|
||||
if !self.is_admin() {
|
||||
return Err(GroupError::AdminNotSetError);
|
||||
}
|
||||
let msg: KeyPackage = self.admin.as_ref().unwrap().decrypt_msg(message)?;
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
pub async fn add_members(
|
||||
&mut self,
|
||||
users_kp: Vec<KeyPackage>,
|
||||
provider: &MlsCryptoProvider,
|
||||
signer: &SignatureKeyPair,
|
||||
) -> Result<Vec<ProcessMessageToSend>, GroupError> {
|
||||
if !self.is_mls_group_initialized() {
|
||||
return Err(GroupError::MlsGroupNotInitializedError);
|
||||
}
|
||||
let mut mls_group = self.mls_group.as_mut().unwrap().lock().await;
|
||||
let (out_messages, welcome, _group_info) =
|
||||
mls_group.add_members(provider, signer, &users_kp)?;
|
||||
|
||||
mls_group.merge_pending_commit(provider)?;
|
||||
let msg_to_send_commit = ProcessMessageToSend {
|
||||
msg: out_messages.tls_serialize_detached()?,
|
||||
subtopic: COMMIT_MSG_SUBTOPIC.to_string(),
|
||||
group_id: self.group_name.clone(),
|
||||
app_id: self.app_id.clone(),
|
||||
};
|
||||
|
||||
let welcome_serialized = welcome.tls_serialize_detached()?;
|
||||
let welcome_msg: Vec<u8> = serde_json::to_vec(&WelcomeMessage {
|
||||
message_type: WelcomeMessageType::WelcomeShare,
|
||||
message_payload: welcome_serialized,
|
||||
})?;
|
||||
|
||||
let msg_to_send_welcome = ProcessMessageToSend {
|
||||
msg: welcome_msg,
|
||||
subtopic: WELCOME_SUBTOPIC.to_string(),
|
||||
group_id: self.group_name.clone(),
|
||||
app_id: self.app_id.clone(),
|
||||
};
|
||||
|
||||
Ok(vec![msg_to_send_commit, msg_to_send_welcome])
|
||||
}
|
||||
|
||||
pub async fn remove_members(
|
||||
&mut self,
|
||||
users: Vec<String>,
|
||||
provider: &MlsCryptoProvider,
|
||||
signer: &SignatureKeyPair,
|
||||
) -> Result<ProcessMessageToSend, GroupError> {
|
||||
if !self.is_mls_group_initialized() {
|
||||
return Err(GroupError::MlsGroupNotInitializedError);
|
||||
}
|
||||
let mut mls_group = self.mls_group.as_mut().unwrap().lock().await;
|
||||
let mut leaf_indexs = Vec::new();
|
||||
let members = mls_group.members().collect::<Vec<_>>();
|
||||
for user in users {
|
||||
for m in members.iter() {
|
||||
if hex::encode(m.credential.identity()) == user {
|
||||
leaf_indexs.push(m.index);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove operation on the mls group
|
||||
let (remove_message, _welcome, _group_info) =
|
||||
mls_group.remove_members(provider, signer, &leaf_indexs)?;
|
||||
|
||||
// Second, process the removal on our end.
|
||||
mls_group.merge_pending_commit(provider)?;
|
||||
|
||||
let msg_to_send_commit = ProcessMessageToSend {
|
||||
msg: remove_message.tls_serialize_detached()?,
|
||||
subtopic: COMMIT_MSG_SUBTOPIC.to_string(),
|
||||
group_id: self.group_name.clone(),
|
||||
app_id: self.app_id.clone(),
|
||||
};
|
||||
|
||||
Ok(msg_to_send_commit)
|
||||
}
|
||||
|
||||
pub async fn process_protocol_msg(
|
||||
&mut self,
|
||||
message: ProtocolMessage,
|
||||
provider: &MlsCryptoProvider,
|
||||
signature_key: Vec<u8>,
|
||||
) -> Result<GroupAction, GroupError> {
|
||||
let group_id = message.group_id().as_slice().to_vec();
|
||||
if group_id != self.group_name.as_bytes().to_vec() {
|
||||
return Ok(GroupAction::DoNothing);
|
||||
}
|
||||
if !self.is_mls_group_initialized() {
|
||||
return Err(GroupError::MlsGroupNotInitializedError);
|
||||
}
|
||||
let mut mls_group = self.mls_group.as_mut().unwrap().lock().await;
|
||||
|
||||
// If the message is from a previous epoch, we don't need to process it and it's a commit for welcome message
|
||||
if message.epoch() < mls_group.epoch() && message.epoch() == 0.into() {
|
||||
return Ok(GroupAction::DoNothing);
|
||||
}
|
||||
|
||||
let processed_message = mls_group.process_message(provider, message)?;
|
||||
let processed_message_credential: Credential = processed_message.credential().clone();
|
||||
|
||||
match processed_message.into_content() {
|
||||
ProcessedMessageContent::ApplicationMessage(application_message) => {
|
||||
let sender_name = {
|
||||
let user_id = mls_group.members().find_map(|m| {
|
||||
if m.credential.identity() == processed_message_credential.identity()
|
||||
&& (signature_key != m.signature_key.as_slice())
|
||||
{
|
||||
Some(hex::encode(m.credential.identity()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if user_id.is_none() {
|
||||
return Ok(GroupAction::DoNothing);
|
||||
}
|
||||
user_id.unwrap()
|
||||
};
|
||||
|
||||
let conversation_message = MessageToPrint::new(
|
||||
sender_name,
|
||||
String::from_utf8(application_message.into_bytes())?,
|
||||
self.group_name.clone(),
|
||||
);
|
||||
return Ok(GroupAction::MessageToPrint(conversation_message));
|
||||
}
|
||||
ProcessedMessageContent::ProposalMessage(_proposal_ptr) => (),
|
||||
ProcessedMessageContent::ExternalJoinProposalMessage(_external_proposal_ptr) => (),
|
||||
ProcessedMessageContent::StagedCommitMessage(commit_ptr) => {
|
||||
let mut remove_proposal: bool = false;
|
||||
if commit_ptr.self_removed() {
|
||||
remove_proposal = true;
|
||||
}
|
||||
mls_group.merge_staged_commit(provider, *commit_ptr)?;
|
||||
if remove_proposal {
|
||||
// here we need to remove group instance locally and
|
||||
// also remove correspond key package from local storage ans sc storage
|
||||
if mls_group.is_active() {
|
||||
return Err(GroupError::GroupStillActiveError);
|
||||
}
|
||||
return Ok(GroupAction::RemoveGroup);
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(GroupAction::DoNothing)
|
||||
}
|
||||
|
||||
pub fn generate_admin_message(&mut self) -> Result<ProcessMessageToSend, GroupError> {
|
||||
let admin = match self.admin.as_mut() {
|
||||
Some(a) => a,
|
||||
None => return Err(GroupError::AdminNotSetError),
|
||||
};
|
||||
admin.generate_new_key_pair();
|
||||
let admin_msg = admin.generate_admin_message();
|
||||
|
||||
let wm = WelcomeMessage {
|
||||
message_type: WelcomeMessageType::GroupAnnouncement,
|
||||
message_payload: serde_json::to_vec(&admin_msg)?,
|
||||
};
|
||||
let msg_to_send = ProcessMessageToSend {
|
||||
msg: serde_json::to_vec(&wm)?,
|
||||
subtopic: WELCOME_SUBTOPIC.to_string(),
|
||||
group_id: self.group_name.clone(),
|
||||
app_id: self.app_id.clone(),
|
||||
};
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
|
||||
pub async fn create_message(
|
||||
&mut self,
|
||||
provider: &MlsCryptoProvider,
|
||||
signer: &SignatureKeyPair,
|
||||
msg: &str,
|
||||
identity: Vec<u8>,
|
||||
) -> Result<ProcessMessageToSend, GroupError> {
|
||||
let message_out = self
|
||||
.mls_group
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.lock()
|
||||
.await
|
||||
.create_message(provider, signer, msg.as_bytes())?
|
||||
.tls_serialize_detached()?;
|
||||
let app_msg = serde_json::to_vec(&AppMessage {
|
||||
sender: identity,
|
||||
message: message_out,
|
||||
})?;
|
||||
Ok(ProcessMessageToSend {
|
||||
msg: app_msg,
|
||||
subtopic: APP_MSG_SUBTOPIC.to_string(),
|
||||
group_id: self.group_name.clone(),
|
||||
app_id: self.app_id.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Group {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "Group: {:#?}", self.group_name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Admin {
|
||||
current_key_pair: PublicKey,
|
||||
current_key_pair_private: SecretKey,
|
||||
key_pair_timestamp: u64,
|
||||
}
|
||||
|
||||
pub trait AdminTrait {
|
||||
fn new() -> Self;
|
||||
fn generate_new_key_pair(&mut self);
|
||||
fn generate_admin_message(&self) -> GroupAnnouncement;
|
||||
fn decrypt_msg(&self, message: Vec<u8>) -> Result<KeyPackage, MessageError>;
|
||||
}
|
||||
|
||||
impl AdminTrait for Admin {
|
||||
fn new() -> Self {
|
||||
let (public_key, secret_key) = generate_keypair();
|
||||
Admin {
|
||||
current_key_pair: public_key,
|
||||
current_key_pair_private: secret_key,
|
||||
key_pair_timestamp: Utc::now().timestamp() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_new_key_pair(&mut self) {
|
||||
let (public_key, secret_key) = generate_keypair();
|
||||
self.current_key_pair = public_key;
|
||||
self.current_key_pair_private = secret_key;
|
||||
self.key_pair_timestamp = Utc::now().timestamp() as u64;
|
||||
}
|
||||
|
||||
fn generate_admin_message(&self) -> GroupAnnouncement {
|
||||
let signature = sign_message(
|
||||
&self.current_key_pair.serialize_compressed(),
|
||||
&self.current_key_pair_private,
|
||||
);
|
||||
GroupAnnouncement::new(
|
||||
self.current_key_pair.serialize_compressed().to_vec(),
|
||||
signature,
|
||||
)
|
||||
}
|
||||
|
||||
fn decrypt_msg(&self, message: Vec<u8>) -> Result<KeyPackage, MessageError> {
|
||||
let msg: Vec<u8> = decrypt_message(&message, self.current_key_pair_private)?;
|
||||
let key_package: KeyPackage = serde_json::from_slice(&msg)?;
|
||||
Ok(key_package)
|
||||
}
|
||||
}
|
||||
31
src/group_registry.rs
Normal file
31
src/group_registry.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use std::collections::HashSet;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
// src/group_registry.rs
|
||||
#[derive(Default, Debug)]
|
||||
pub struct GroupRegistry {
|
||||
names: RwLock<HashSet<String>>,
|
||||
}
|
||||
|
||||
impl GroupRegistry {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub async fn exists(&self, name: &str) -> bool {
|
||||
self.names.read().await.contains(name)
|
||||
}
|
||||
|
||||
pub async fn insert(&self, name: String) -> bool {
|
||||
let mut g = self.names.write().await;
|
||||
if g.contains(&name) {
|
||||
return false;
|
||||
}
|
||||
g.insert(name);
|
||||
true
|
||||
}
|
||||
|
||||
pub async fn all(&self) -> Vec<String> {
|
||||
self.names.read().await.iter().cloned().collect()
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
use alloy::primitives::Address;
|
||||
use std::{collections::HashMap, fmt::Display};
|
||||
|
||||
use openmls::{credentials::CredentialWithKey, key_packages::*, prelude::*};
|
||||
use openmls_basic_credential::SignatureKeyPair;
|
||||
use openmls_traits::types::Ciphersuite;
|
||||
|
||||
use mls_crypto::openmls_provider::MlsCryptoProvider;
|
||||
|
||||
use crate::IdentityError;
|
||||
|
||||
pub struct Identity {
|
||||
pub(crate) kp: HashMap<Vec<u8>, KeyPackage>,
|
||||
pub(crate) credential_with_key: CredentialWithKey,
|
||||
pub(crate) signer: SignatureKeyPair,
|
||||
}
|
||||
|
||||
impl Identity {
|
||||
pub(crate) fn new(
|
||||
ciphersuite: Ciphersuite,
|
||||
crypto: &MlsCryptoProvider,
|
||||
user_wallet_address: &[u8],
|
||||
) -> Result<Identity, IdentityError> {
|
||||
let credential = Credential::new(user_wallet_address.to_vec(), CredentialType::Basic)?;
|
||||
let signature_keys = SignatureKeyPair::new(ciphersuite.signature_algorithm())?;
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential,
|
||||
signature_key: signature_keys.to_public_vec().into(),
|
||||
};
|
||||
signature_keys.store(crypto.key_store())?;
|
||||
|
||||
let mut kps = HashMap::new();
|
||||
let key_package = KeyPackage::builder().build(
|
||||
CryptoConfig {
|
||||
ciphersuite,
|
||||
version: ProtocolVersion::default(),
|
||||
},
|
||||
crypto,
|
||||
&signature_keys,
|
||||
credential_with_key.clone(),
|
||||
)?;
|
||||
let kp = key_package.hash_ref(crypto.crypto())?;
|
||||
kps.insert(kp.as_slice().to_vec(), key_package);
|
||||
|
||||
Ok(Identity {
|
||||
kp: kps,
|
||||
credential_with_key,
|
||||
signer: signature_keys,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create an additional key package using the credential_with_key/signer bound to this identity
|
||||
pub fn generate_key_package(
|
||||
&mut self,
|
||||
ciphersuite: Ciphersuite,
|
||||
crypto: &MlsCryptoProvider,
|
||||
) -> Result<KeyPackage, IdentityError> {
|
||||
let key_package = KeyPackage::builder().build(
|
||||
CryptoConfig::with_default_version(ciphersuite),
|
||||
crypto,
|
||||
&self.signer,
|
||||
self.credential_with_key.clone(),
|
||||
)?;
|
||||
|
||||
let kp = key_package.hash_ref(crypto.crypto())?;
|
||||
self.kp.insert(kp.as_slice().to_vec(), key_package.clone());
|
||||
Ok(key_package)
|
||||
}
|
||||
|
||||
/// Get the plain identity as byte vector.
|
||||
pub fn identity(&self) -> Vec<u8> {
|
||||
self.credential_with_key.credential.identity().to_vec()
|
||||
}
|
||||
|
||||
pub fn identity_string(&self) -> String {
|
||||
address_string(self.credential_with_key.credential.identity())
|
||||
}
|
||||
|
||||
pub fn signature_pub_key(&self) -> Vec<u8> {
|
||||
self.signer.public().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Identity {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
Address::from_slice(self.credential_with_key.credential.identity())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address_string(identity: &[u8]) -> String {
|
||||
Address::from_slice(identity).to_string()
|
||||
}
|
||||
380
src/lib.rs
380
src/lib.rs
@@ -1,106 +1,132 @@
|
||||
use alloy::signers::local::LocalSignerError;
|
||||
//! # DE-MLS: Distributed MLS Group Management System
|
||||
//!
|
||||
//! This crate provides a distributed group management system built on top of MLS (Message Layer Security).
|
||||
//! It implements a steward-based epoch management system with HashGraph-like consensus for secure group operations.
|
||||
//!
|
||||
//! ## Architecture Overview
|
||||
//!
|
||||
//! The system consists of several key components:
|
||||
//!
|
||||
//! ### Core Components
|
||||
//!
|
||||
//! - **Group Management** (`group.rs`): Orchestrates MLS group operations and state transitions
|
||||
//! - **State Machine** (`state_machine.rs`): Manages steward epoch states and transitions
|
||||
//! - **Steward** (`steward.rs`): Handles proposal collection and management
|
||||
//! - **Consensus** (`consensus/`): Provides distributed consensus for voting based on HashGraph-like protocol
|
||||
//! - **User Management** (`user.rs`): Manages individual user operations and message handling
|
||||
//!
|
||||
//! ### Actor System
|
||||
//!
|
||||
//! - **User Actor** (`user_actor.rs`): Actor-based user management with message handling
|
||||
//! - **WebSocket Actor** (`ws_actor.rs`): Handles WebSocket connections and message routing
|
||||
//! - **Action Handlers** (`action_handlers.rs`): Processes various system actions
|
||||
//!
|
||||
//! ### Communication
|
||||
//!
|
||||
//! - **Message Handling** (`message.rs`): Protobuf message serialization/deserialization
|
||||
//! - **Protocol Buffers** (`protos/`): Message definitions for network communication
|
||||
//! - **Consensus Messages** (`protos/messages/v1/consensus.proto`): Consensus-specific message types
|
||||
//!
|
||||
//! ## Steward Epoch Flow
|
||||
//!
|
||||
//! The system operates in epochs managed by a steward with robust state management:
|
||||
//!
|
||||
//! 1. **Working State**: Normal operation, all users can send any message freely
|
||||
//! 2. **Waiting State**: Steward epoch active, only steward can send BATCH_PROPOSALS_MESSAGE
|
||||
//! 3. **Voting State**: Consensus voting, restricted message types (VOTE/USER_VOTE for all, VOTE_PAYLOAD/PROPOSAL for steward only)
|
||||
//!
|
||||
//! ### Complete State Transitions
|
||||
//!
|
||||
//! ```text
|
||||
//! Working --start_steward_epoch()--> Waiting (if proposals exist)
|
||||
//! Working --start_steward_epoch()--> Working (if no proposals - no state change)
|
||||
//! Waiting --start_voting()---------> Voting
|
||||
//! Waiting --no_proposals_found()---> Working (edge case: proposals disappear during voting)
|
||||
//! Voting --complete_voting(YES)----> Waiting --apply_proposals()--> Working
|
||||
//! Voting --complete_voting(NO)-----> Working
|
||||
//! ```
|
||||
//!
|
||||
//! ### Steward State Guarantees
|
||||
//!
|
||||
//! - **Always returns to Working**: Steward transitions back to Working state after every epoch
|
||||
//! - **No proposals handling**: If no proposals exist, steward stays in Working state
|
||||
//! - **Edge case coverage**: All scenarios including proposal disappearance are handled
|
||||
//! - **Robust error handling**: Invalid state transitions are prevented and logged
|
||||
//! ## Message Flow
|
||||
//!
|
||||
//! ### Regular Messages
|
||||
//! ```text
|
||||
//! User --> Group --> MLS Group --> Other Users
|
||||
//! ```
|
||||
//!
|
||||
//! ### Steward Messages
|
||||
//! ```text
|
||||
//! Steward (with proposals) --> Group --> MLS Group --> Other Users
|
||||
//! ```
|
||||
//!
|
||||
//! ### Batch Proposals
|
||||
//! ```text
|
||||
//! Group --> Create MLS Proposals --> Commit --> Batch Message --> Users
|
||||
//! Users --> Parse Batch --> Apply Proposals --> Update MLS Group
|
||||
//! ```
|
||||
//!
|
||||
//! ## Testing
|
||||
//!
|
||||
//! The system includes comprehensive tests:
|
||||
//!
|
||||
//! - State machine transitions
|
||||
//! - Message handling
|
||||
//!
|
||||
//! Run tests with:
|
||||
//! ```bash
|
||||
//! cargo test
|
||||
//! ```
|
||||
//!
|
||||
//! ## Dependencies
|
||||
//!
|
||||
//! - **MLS**: Message Layer Security for group key management
|
||||
//! - **Tokio**: Async runtime for concurrent operations
|
||||
//! - **Kameo**: Actor system for distributed operations
|
||||
//! - **Prost**: Protocol buffer serialization
|
||||
//! - **OpenMLS**: MLS implementation
|
||||
//! - **Waku**: Decentralized messaging protocol
|
||||
//! - **Alloy**: Ethereum wallet and signing
|
||||
|
||||
use alloy::primitives::{Address, Signature};
|
||||
use ecies::{decrypt, encrypt};
|
||||
use kameo::{actor::ActorRef, error::SendError};
|
||||
use libsecp256k1::{sign, verify, Message, PublicKey, SecretKey, Signature as libSignature};
|
||||
use openmls::{error::LibraryError, prelude::*};
|
||||
use openmls_rust_crypto::MemoryKeyStoreError;
|
||||
use rand::thread_rng;
|
||||
use secp256k1::hashes::{sha256, Hash};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fmt::Display,
|
||||
str::Utf8Error,
|
||||
string::FromUtf8Error,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use waku_bindings::{WakuContentTopic, WakuMessage};
|
||||
|
||||
use ds::{
|
||||
waku_actor::{ProcessMessageToSend, ProcessSubscribeToGroup, WakuActor},
|
||||
DeliveryServiceError,
|
||||
};
|
||||
use error::{GroupError, MessageError};
|
||||
|
||||
pub mod group_actor;
|
||||
pub mod identity;
|
||||
pub mod main_loop;
|
||||
pub mod bootstrap;
|
||||
pub use bootstrap::{bootstrap_core, bootstrap_core_from_env, Bootstrap, BootstrapConfig};
|
||||
|
||||
pub mod consensus;
|
||||
pub mod error;
|
||||
pub mod group;
|
||||
pub mod group_registry;
|
||||
pub mod message;
|
||||
pub mod state_machine;
|
||||
pub mod steward;
|
||||
pub mod user;
|
||||
pub mod ws_actor;
|
||||
pub mod user_actor;
|
||||
pub mod user_app_instance;
|
||||
|
||||
pub struct AppState {
|
||||
pub waku_actor: ActorRef<WakuActor>,
|
||||
pub rooms: Mutex<HashSet<String>>,
|
||||
pub content_topics: Arc<Mutex<Vec<WakuContentTopic>>>,
|
||||
pub pubsub: tokio::sync::broadcast::Sender<WakuMessage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum WelcomeMessageType {
|
||||
GroupAnnouncement,
|
||||
KeyPackageShare,
|
||||
WelcomeShare,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WelcomeMessage {
|
||||
pub message_type: WelcomeMessageType,
|
||||
pub message_payload: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GroupAnnouncement {
|
||||
pub_key: Vec<u8>,
|
||||
signature: Vec<u8>,
|
||||
}
|
||||
|
||||
impl GroupAnnouncement {
|
||||
pub fn new(pub_key: Vec<u8>, signature: Vec<u8>) -> Self {
|
||||
GroupAnnouncement { pub_key, signature }
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> Result<bool, MessageError> {
|
||||
let verified = verify_message(&self.pub_key, &self.signature, &self.pub_key)?;
|
||||
Ok(verified)
|
||||
}
|
||||
|
||||
pub fn encrypt(&self, data: Vec<u8>) -> Result<Vec<u8>, MessageError> {
|
||||
let encrypted = encrypt_message(&data, &self.pub_key)?;
|
||||
Ok(encrypted)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AppMessage {
|
||||
pub sender: Vec<u8>,
|
||||
pub message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl AppMessage {
|
||||
pub fn new(sender: Vec<u8>, message: Vec<u8>) -> Self {
|
||||
AppMessage { sender, message }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct MessageToPrint {
|
||||
pub sender: String,
|
||||
pub message: String,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl MessageToPrint {
|
||||
pub fn new(sender: String, message: String, group_name: String) -> Self {
|
||||
MessageToPrint {
|
||||
sender,
|
||||
message,
|
||||
group_name,
|
||||
pub mod protos {
|
||||
pub mod consensus {
|
||||
pub mod v1 {
|
||||
include!(concat!(env!("OUT_DIR"), "/consensus.v1.rs"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for MessageToPrint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {}", self.sender, self.message)
|
||||
pub mod de_mls {
|
||||
pub mod messages {
|
||||
pub mod v1 {
|
||||
include!(concat!(env!("OUT_DIR"), "/de_mls.messages.v1.rs"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,10 +148,13 @@ pub fn verify_message(
|
||||
signature: &[u8],
|
||||
public_key: &[u8],
|
||||
) -> Result<bool, MessageError> {
|
||||
const COMPRESSED_PUBLIC_KEY_SIZE: usize = 33;
|
||||
|
||||
let digest = sha256::Hash::hash(message);
|
||||
let msg = Message::parse(&digest.to_byte_array());
|
||||
let signature = libSignature::parse_der(signature)?;
|
||||
let mut pub_key_bytes: [u8; 33] = [0; 33];
|
||||
|
||||
let mut pub_key_bytes: [u8; COMPRESSED_PUBLIC_KEY_SIZE] = [0; COMPRESSED_PUBLIC_KEY_SIZE];
|
||||
pub_key_bytes[..].copy_from_slice(public_key);
|
||||
let public_key = PublicKey::parse_compressed(&pub_key_bytes)?;
|
||||
Ok(verify(&msg, &signature, &public_key))
|
||||
@@ -142,137 +171,74 @@ pub fn decrypt_message(message: &[u8], secret_key: SecretKey) -> Result<Vec<u8>,
|
||||
Ok(decrypted)
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum IdentityError {
|
||||
#[error("Failed to create new key package: {0}")]
|
||||
MlsKeyPackageCreationError(#[from] KeyPackageNewError<MemoryKeyStoreError>),
|
||||
#[error(transparent)]
|
||||
MlsLibraryError(#[from] LibraryError),
|
||||
#[error("Failed to create signature: {0}")]
|
||||
MlsCryptoError(#[from] CryptoError),
|
||||
#[error("Failed to save signature key: {0}")]
|
||||
MlsKeyStoreError(#[from] MemoryKeyStoreError),
|
||||
#[error("Failed to create credential: {0}")]
|
||||
MlsCredentialError(#[from] CredentialError),
|
||||
#[error("An unknown error occurred: {0}")]
|
||||
Other(anyhow::Error),
|
||||
pub trait LocalSigner {
|
||||
fn local_sign_message(
|
||||
&self,
|
||||
message: &[u8],
|
||||
) -> impl std::future::Future<Output = Result<Vec<u8>, anyhow::Error>> + Send;
|
||||
|
||||
fn address(&self) -> Address;
|
||||
fn address_string(&self) -> String;
|
||||
fn address_bytes(&self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum GroupError {
|
||||
#[error("Admin not set")]
|
||||
AdminNotSetError,
|
||||
#[error(transparent)]
|
||||
MessageError(#[from] MessageError),
|
||||
#[error("MLS group not initialized")]
|
||||
MlsGroupNotInitializedError,
|
||||
|
||||
#[error("Error while creating MLS group: {0}")]
|
||||
MlsGroupCreationError(#[from] NewGroupError<MemoryKeyStoreError>),
|
||||
#[error("Error while adding member to MLS group: {0}")]
|
||||
MlsAddMemberError(#[from] AddMembersError<MemoryKeyStoreError>),
|
||||
#[error("Error while merging pending commit in MLS group: {0}")]
|
||||
MlsMergePendingCommitError(#[from] MergePendingCommitError<MemoryKeyStoreError>),
|
||||
#[error("Error while merging commit in MLS group: {0}")]
|
||||
MlsMergeCommitError(#[from] MergeCommitError<MemoryKeyStoreError>),
|
||||
#[error("Error processing unverified message: {0}")]
|
||||
MlsProcessMessageError(#[from] ProcessMessageError),
|
||||
#[error("Error while creating message: {0}")]
|
||||
MlsCreateMessageError(#[from] CreateMessageError),
|
||||
#[error("Failed to remove members: {0}")]
|
||||
MlsRemoveMembersError(#[from] RemoveMembersError<MemoryKeyStoreError>),
|
||||
#[error("Group still active")]
|
||||
GroupStillActiveError,
|
||||
|
||||
#[error("UTF-8 parsing error: {0}")]
|
||||
Utf8ParsingError(#[from] FromUtf8Error),
|
||||
#[error("JSON processing error: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Serialization error: {0}")]
|
||||
SerializationError(#[from] tls_codec::Error),
|
||||
|
||||
#[error("An unknown error occurred: {0}")]
|
||||
Other(anyhow::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum MessageError {
|
||||
#[error("Failed to verify signature: {0}")]
|
||||
SignatureVerificationError(#[from] libsecp256k1::Error),
|
||||
#[error("JSON processing error: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum UserError {
|
||||
#[error(transparent)]
|
||||
DeliveryServiceError(#[from] DeliveryServiceError),
|
||||
#[error(transparent)]
|
||||
IdentityError(#[from] IdentityError),
|
||||
#[error(transparent)]
|
||||
GroupError(#[from] GroupError),
|
||||
#[error(transparent)]
|
||||
MessageError(#[from] MessageError),
|
||||
|
||||
#[error("Group already exists: {0}")]
|
||||
GroupAlreadyExistsError(String),
|
||||
#[error("Group not found: {0}")]
|
||||
GroupNotFoundError(String),
|
||||
|
||||
#[error("Unsupported message type.")]
|
||||
UnsupportedMessageType,
|
||||
#[error("Welcome message cannot be empty.")]
|
||||
EmptyWelcomeMessageError,
|
||||
#[error("Message verification failed")]
|
||||
MessageVerificationFailed,
|
||||
|
||||
#[error("Unknown content topic type: {0}")]
|
||||
UnknownContentTopicType(String),
|
||||
|
||||
#[error("Failed to create staged join: {0}")]
|
||||
MlsWelcomeError(#[from] WelcomeError<MemoryKeyStoreError>),
|
||||
|
||||
#[error("UTF-8 parsing error: {0}")]
|
||||
Utf8ParsingError(#[from] FromUtf8Error),
|
||||
#[error("UTF-8 string parsing error: {0}")]
|
||||
Utf8StringParsingError(#[from] Utf8Error),
|
||||
#[error("JSON processing error: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("Serialization error: {0}")]
|
||||
SerializationError(#[from] tls_codec::Error),
|
||||
#[error("Failed to parse signer: {0}")]
|
||||
SignerParsingError(#[from] LocalSignerError),
|
||||
|
||||
#[error("Failed to subscribe to group: {0}")]
|
||||
KameoSubscribeToGroupError(#[from] SendError<ProcessSubscribeToGroup, DeliveryServiceError>),
|
||||
#[error("Failed to publish message: {0}")]
|
||||
KameoPublishMessageError(#[from] SendError<ProcessMessageToSend, DeliveryServiceError>),
|
||||
#[error("Failed to create group: {0}")]
|
||||
KameoCreateGroupError(String),
|
||||
#[error("Failed to send message to user: {0}")]
|
||||
KameoSendMessageError(String),
|
||||
pub fn verify_vote_hash(
|
||||
signature: &[u8],
|
||||
public_key: &[u8],
|
||||
message: &[u8],
|
||||
) -> Result<bool, MessageError> {
|
||||
let signature_bytes: [u8; 65] =
|
||||
signature
|
||||
.try_into()
|
||||
.map_err(|_| MessageError::MismatchedLength {
|
||||
expect: 65,
|
||||
actual: signature.len(),
|
||||
})?;
|
||||
let signature = Signature::from_raw_array(&signature_bytes)?;
|
||||
let address = signature.recover_address_from_msg(message)?;
|
||||
let address_bytes = address.as_slice().to_vec();
|
||||
Ok(address_bytes == public_key)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
|
||||
use crate::{verify_vote_hash, LocalSigner};
|
||||
|
||||
use super::{decrypt_message, encrypt_message, generate_keypair, sign_message, verify_message};
|
||||
|
||||
#[test]
|
||||
fn test_verify_message() {
|
||||
let message = b"Hello, world!";
|
||||
let (public_key, secret_key) = generate_keypair();
|
||||
let signature = sign_message(message, &secret_key);
|
||||
let verified = verify_message(message, &signature, &public_key.serialize_compressed());
|
||||
assert!(verified.is_ok());
|
||||
assert!(verified.unwrap());
|
||||
let verified = verify_message(message, &signature, &public_key.serialize_compressed())
|
||||
.expect("Failed to verify message");
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_message() {
|
||||
let message = b"Hello, world!";
|
||||
let (public_key, secret_key) = generate_keypair();
|
||||
let encrypted = encrypt_message(message, &public_key.serialize_compressed());
|
||||
let decrypted = decrypt_message(&encrypted.unwrap(), secret_key);
|
||||
assert_eq!(message, decrypted.unwrap().as_slice());
|
||||
let encrypted = encrypt_message(message, &public_key.serialize_compressed())
|
||||
.expect("Failed to encrypt message");
|
||||
let decrypted = decrypt_message(&encrypted, secret_key).expect("Failed to decrypt message");
|
||||
assert_eq!(message, decrypted.as_slice());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_local_signer() {
|
||||
let signer = PrivateKeySigner::random();
|
||||
let message = b"Hello, world!";
|
||||
let signature = signer
|
||||
.local_sign_message(message)
|
||||
.await
|
||||
.expect("Failed to sign message");
|
||||
|
||||
let verified = verify_vote_hash(&signature, &signer.address_bytes(), message)
|
||||
.expect("Failed to verify vote hash");
|
||||
assert!(verified);
|
||||
}
|
||||
}
|
||||
|
||||
337
src/main.rs
337
src/main.rs
@@ -1,337 +0,0 @@
|
||||
use axum::{
|
||||
extract::ws::{Message, WebSocket, WebSocketUpgrade},
|
||||
extract::State,
|
||||
http::Method,
|
||||
response::IntoResponse,
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use bounded_vec_deque::BoundedVecDeque;
|
||||
use futures::StreamExt;
|
||||
use kameo::actor::ActorRef;
|
||||
use log::{error, info};
|
||||
use serde_json::json;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use tokio::sync::mpsc::{channel, Sender};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
use waku_bindings::{waku_set_event_callback, WakuMessage};
|
||||
|
||||
use de_mls::{
|
||||
main_loop::{main_loop, Connection},
|
||||
user::{ProcessLeaveGroup, ProcessRemoveUser, ProcessSendMessage, User, UserAction},
|
||||
ws_actor::{RawWsMessage, WsAction, WsActor},
|
||||
AppState, MessageToPrint,
|
||||
};
|
||||
use ds::{
|
||||
ds_waku::{match_content_topic, setup_node_handle},
|
||||
waku_actor::{ProcessUnsubscribeFromGroup, WakuActor},
|
||||
};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
env_logger::init();
|
||||
let port = std::env::var("PORT")
|
||||
.map(|val| val.parse::<u16>())
|
||||
.unwrap_or(Ok(3000))?;
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||
|
||||
let node_name = std::env::var("NODE")?;
|
||||
let node = setup_node_handle(vec![node_name])?;
|
||||
let waku_actor = kameo::actor::spawn(WakuActor::new(Arc::new(node)));
|
||||
let (tx, _) = tokio::sync::broadcast::channel(100);
|
||||
let app_state = Arc::new(AppState {
|
||||
waku_actor,
|
||||
rooms: Mutex::new(HashSet::new()),
|
||||
content_topics: Arc::new(Mutex::new(Vec::new())),
|
||||
pubsub: tx.clone(),
|
||||
});
|
||||
|
||||
let (waku_sender, mut waku_receiver) = channel::<WakuMessage>(100);
|
||||
handle_waku(waku_sender, app_state.clone()).await;
|
||||
|
||||
let recv_messages = tokio::spawn(async move {
|
||||
info!("Running recv messages from waku");
|
||||
while let Some(msg) = waku_receiver.recv().await {
|
||||
let _ = tx.send(msg);
|
||||
}
|
||||
});
|
||||
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods(vec![Method::GET]);
|
||||
|
||||
let app = Router::new()
|
||||
.route("/", get(|| async { "Hello World!" }))
|
||||
.route("/ws", get(handler))
|
||||
.route("/rooms", get(get_rooms))
|
||||
.with_state(app_state)
|
||||
.layer(cors);
|
||||
|
||||
println!("Hosted on {:?}", addr);
|
||||
let res = axum::Server::bind(&addr).serve(app.into_make_service());
|
||||
tokio::select! {
|
||||
Err(x) = res => {
|
||||
error!("Error hosting server: {}", x);
|
||||
}
|
||||
Err(w) = recv_messages => {
|
||||
error!("Error receiving messages from waku: {}", w);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handler(ws: WebSocketUpgrade, State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
ws.on_upgrade(|socket| handle_socket(socket, state))
|
||||
}
|
||||
|
||||
async fn handle_waku(waku_sender: Sender<WakuMessage>, state: Arc<AppState>) {
|
||||
info!("Setting up waku event callback");
|
||||
let mut seen_messages = BoundedVecDeque::<String>::new(40);
|
||||
waku_set_event_callback(move |signal| {
|
||||
match signal.event() {
|
||||
waku_bindings::Event::WakuMessage(event) => {
|
||||
let msg_id = event.message_id();
|
||||
if seen_messages.contains(msg_id) {
|
||||
return;
|
||||
}
|
||||
seen_messages.push_back(msg_id.clone());
|
||||
let content_topic = event.waku_message().content_topic();
|
||||
// Check if message belongs to a relevant topic
|
||||
if !match_content_topic(&state.content_topics, content_topic) {
|
||||
error!("Content topic not match: {:?}", content_topic);
|
||||
return;
|
||||
};
|
||||
let msg = event.waku_message().clone();
|
||||
info!("Received message from waku: {:?}", event.message_id());
|
||||
waku_sender
|
||||
.blocking_send(msg)
|
||||
.expect("Failed to send message to waku");
|
||||
}
|
||||
|
||||
waku_bindings::Event::Unrecognized(data) => {
|
||||
error!("Unrecognized event!\n {data:?}");
|
||||
}
|
||||
_ => {
|
||||
error!(
|
||||
"Unrecognized signal!\n {:?}",
|
||||
serde_json::to_string(&signal)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn handle_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
let (ws_sender, mut ws_receiver) = socket.split();
|
||||
let ws_actor = kameo::spawn(WsActor::new(ws_sender));
|
||||
let mut main_loop_connection = None::<Connection>;
|
||||
let cancel_token = CancellationToken::new();
|
||||
while let Some(Ok(Message::Text(data))) = ws_receiver.next().await {
|
||||
let res = ws_actor.ask(RawWsMessage { message: data }).await;
|
||||
match res {
|
||||
Ok(WsAction::Connect(connect)) => {
|
||||
info!("Got connect: {:?}", &connect);
|
||||
main_loop_connection = Some(Connection {
|
||||
eth_private_key: connect.eth_private_key.clone(),
|
||||
group_id: connect.group_id.clone(),
|
||||
should_create_group: connect.should_create,
|
||||
});
|
||||
let mut rooms = state.rooms.lock().unwrap();
|
||||
if !rooms.contains(&connect.group_id.clone()) {
|
||||
rooms.insert(connect.group_id.clone());
|
||||
}
|
||||
info!("Prepare info for main loop: {:?}", main_loop_connection);
|
||||
break;
|
||||
}
|
||||
Ok(_) => {
|
||||
info!("Got chat message for non-existent user");
|
||||
}
|
||||
|
||||
Err(e) => error!("Error handling message: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
let user_actor = main_loop(main_loop_connection.unwrap().clone(), state.clone())
|
||||
.await
|
||||
.expect("Failed to start main loop");
|
||||
|
||||
let user_actor_clone = user_actor.clone();
|
||||
let state_clone = state.clone();
|
||||
let ws_actor_clone = ws_actor.clone();
|
||||
let mut waku_receiver = state.pubsub.subscribe();
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
let mut recv_messages = tokio::spawn(async move {
|
||||
info!("Running recv messages from waku");
|
||||
while let Ok(msg) = waku_receiver.recv().await {
|
||||
let res = handle_user_actions(
|
||||
msg,
|
||||
state_clone.waku_actor.clone(),
|
||||
ws_actor_clone.clone(),
|
||||
user_actor_clone.clone(),
|
||||
cancel_token_clone.clone(),
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
error!("Error handling waku message: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let user_ref_clone = user_actor.clone();
|
||||
let mut send_messages = {
|
||||
tokio::spawn(async move {
|
||||
info!("Running recieve messages from websocket");
|
||||
while let Some(Ok(Message::Text(text))) = ws_receiver.next().await {
|
||||
let res = handle_ws_message(
|
||||
RawWsMessage { message: text },
|
||||
ws_actor.clone(),
|
||||
user_ref_clone.clone(),
|
||||
state.waku_actor.clone(),
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
error!("Error handling websocket message: {}", e);
|
||||
}
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
info!("Waiting for main loop to finish");
|
||||
tokio::select! {
|
||||
_ = (&mut recv_messages) => {
|
||||
info!("recv_messages finished");
|
||||
send_messages.abort();
|
||||
}
|
||||
_ = (&mut send_messages) => {
|
||||
info!("send_messages finished");
|
||||
send_messages.abort();
|
||||
}
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Cancel token cancelled");
|
||||
send_messages.abort();
|
||||
recv_messages.abort();
|
||||
}
|
||||
};
|
||||
|
||||
info!("Main loop finished");
|
||||
}
|
||||
|
||||
async fn handle_user_actions(
|
||||
msg: WakuMessage,
|
||||
waku_actor: ActorRef<WakuActor>,
|
||||
ws_actor: ActorRef<WsActor>,
|
||||
user_actor: ActorRef<User>,
|
||||
cancel_token: CancellationToken,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let actions = user_actor.ask(msg).await?;
|
||||
for action in actions {
|
||||
match action {
|
||||
UserAction::SendToWaku(msg) => {
|
||||
let id = waku_actor.ask(msg).await?;
|
||||
info!("Successfully publish message with id: {:?}", id);
|
||||
}
|
||||
UserAction::SendToGroup(msg) => {
|
||||
info!("Send to group: {:?}", msg);
|
||||
ws_actor.ask(msg).await?;
|
||||
}
|
||||
UserAction::RemoveGroup(group_name) => {
|
||||
waku_actor
|
||||
.ask(ProcessUnsubscribeFromGroup {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
user_actor
|
||||
.ask(ProcessLeaveGroup {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
info!("Leave group: {:?}", &group_name);
|
||||
ws_actor
|
||||
.ask(MessageToPrint {
|
||||
sender: "system".to_string(),
|
||||
message: format!("Group {} removed you", group_name),
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
cancel_token.cancel();
|
||||
}
|
||||
UserAction::DoNothing => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_ws_message(
|
||||
msg: RawWsMessage,
|
||||
ws_actor: ActorRef<WsActor>,
|
||||
user_actor: ActorRef<User>,
|
||||
waku_actor: ActorRef<WakuActor>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let action = ws_actor.ask(msg).await?;
|
||||
match action {
|
||||
WsAction::Connect(connect) => {
|
||||
info!("Got unexpected connect: {:?}", &connect);
|
||||
}
|
||||
WsAction::UserMessage(msg) => {
|
||||
info!("Got user message: {:?}", &msg);
|
||||
let mtp = MessageToPrint {
|
||||
message: msg.message.clone(),
|
||||
group_name: msg.group_id.clone(),
|
||||
sender: "me".to_string(),
|
||||
};
|
||||
ws_actor.ask(mtp).await?;
|
||||
|
||||
let pmt = user_actor
|
||||
.ask(ProcessSendMessage {
|
||||
msg: msg.message,
|
||||
group_name: msg.group_id,
|
||||
})
|
||||
.await?;
|
||||
let id = waku_actor.ask(pmt).await?;
|
||||
info!("Successfully publish message with id: {:?}", id);
|
||||
}
|
||||
WsAction::RemoveUser(user_to_ban, group_name) => {
|
||||
info!("Got remove user: {:?}", &user_to_ban);
|
||||
let pmt = user_actor
|
||||
.ask(ProcessRemoveUser {
|
||||
user_to_ban: user_to_ban.clone(),
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
let id = waku_actor.ask(pmt).await?;
|
||||
info!("Successfully publish message with id: {:?}", id);
|
||||
ws_actor
|
||||
.ask(MessageToPrint {
|
||||
sender: "system".to_string(),
|
||||
message: format!("User {} was removed from group", user_to_ban),
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
WsAction::DoNothing => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_rooms(State(state): State<Arc<AppState>>) -> String {
|
||||
let rooms = state.rooms.lock().unwrap();
|
||||
let vec = rooms.iter().collect::<Vec<&String>>();
|
||||
match vec.len() {
|
||||
0 => json!({
|
||||
"status": "No rooms found yet!",
|
||||
"rooms": []
|
||||
})
|
||||
.to_string(),
|
||||
_ => json!({
|
||||
"status": "Success!",
|
||||
"rooms": vec
|
||||
})
|
||||
.to_string(),
|
||||
}
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use kameo::actor::ActorRef;
|
||||
use log::{error, info};
|
||||
use std::{str::FromStr, sync::Arc, time::Duration};
|
||||
|
||||
use crate::user::{ProcessAdminMessage, ProcessCreateGroup, User};
|
||||
use crate::{AppState, UserError};
|
||||
use ds::waku_actor::ProcessSubscribeToGroup;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Connection {
|
||||
pub eth_private_key: String,
|
||||
pub group_id: String,
|
||||
pub should_create_group: bool,
|
||||
}
|
||||
|
||||
pub async fn main_loop(
|
||||
connection: Connection,
|
||||
app_state: Arc<AppState>,
|
||||
) -> Result<ActorRef<User>, UserError> {
|
||||
let signer = PrivateKeySigner::from_str(&connection.eth_private_key)?;
|
||||
let user_address = signer.address().to_string();
|
||||
let group_name: String = connection.group_id.clone();
|
||||
// Create user
|
||||
let user = User::new(&connection.eth_private_key)?;
|
||||
let user_ref = kameo::spawn(user);
|
||||
user_ref
|
||||
.ask(ProcessCreateGroup {
|
||||
group_name: group_name.clone(),
|
||||
is_creation: connection.should_create_group,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| UserError::KameoCreateGroupError(e.to_string()))?;
|
||||
|
||||
let mut content_topics = app_state
|
||||
.waku_actor
|
||||
.ask(ProcessSubscribeToGroup {
|
||||
group_name: group_name.clone(),
|
||||
})
|
||||
.await?;
|
||||
app_state
|
||||
.content_topics
|
||||
.lock()
|
||||
.unwrap()
|
||||
.append(&mut content_topics);
|
||||
|
||||
if connection.should_create_group {
|
||||
info!(
|
||||
"User {:?} start sending admin message for group {:?}",
|
||||
user_address, group_name
|
||||
);
|
||||
let user_clone = user_ref.clone();
|
||||
let group_name_clone = group_name.clone();
|
||||
let node_clone = app_state.waku_actor.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(30));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let res = async {
|
||||
let msg = user_clone
|
||||
.ask(ProcessAdminMessage {
|
||||
group_name: group_name_clone.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| UserError::KameoSendMessageError(e.to_string()))?;
|
||||
let id = node_clone.ask(msg).await?;
|
||||
info!("Successfully publish admin message with id: {:?}", id);
|
||||
Ok::<(), UserError>(())
|
||||
}
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
error!("Error sending admin message to waku: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
Ok(user_ref)
|
||||
}
|
||||
270
src/message.rs
Normal file
270
src/message.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
//! This module contains the messages that are used to communicate between inside the application
|
||||
//! The high level message is a [`WakuMessage`](waku_bindings::WakuMessage)
|
||||
//! Inside the [`WakuMessage`](waku_bindings::WakuMessage) we have a [`ContentTopic`](waku_bindings::WakuContentTopic) and a payload
|
||||
//! The [`ContentTopic`](waku_bindings::WakuContentTopic) is used to identify the type of message and the payload is the actual message
|
||||
//! Based on the [`ContentTopic`](waku_bindings::WakuContentTopic) we distinguish between:
|
||||
//! - [`WelcomeMessage`] which includes next message types:
|
||||
//! - [`GroupAnnouncement`]
|
||||
//! - `GroupAnnouncement {
|
||||
//! eth_pub_key: Vec<u8>,
|
||||
//! signature: Vec<u8>,
|
||||
//! }`
|
||||
//! - [`UserKeyPackage`]
|
||||
//! - `Encrypted KeyPackage: Vec<u8>`
|
||||
//! - [`InvitationToJoin`]
|
||||
//! - `Serialized MlsMessageOut: Vec<u8>`
|
||||
//! - [`AppMessage`]
|
||||
//! - [`ConversationMessage`]
|
||||
//! - [`BatchProposalsMessage`]
|
||||
//! - [`BanRequest`]
|
||||
//! - [`VotePayload`]
|
||||
//! - [`UserVote`]
|
||||
//!
|
||||
use alloy::hex;
|
||||
use mls_crypto::identity::normalize_wallet_address;
|
||||
use openmls::prelude::{KeyPackage, MlsMessageOut};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use crate::{
|
||||
consensus::ConsensusEvent,
|
||||
encrypt_message,
|
||||
protos::{
|
||||
consensus::v1::{Outcome, Proposal, RequestType, UpdateRequest, Vote, VotePayload},
|
||||
de_mls::messages::v1::{
|
||||
app_message, welcome_message, AppMessage, BanRequest, BatchProposalsMessage,
|
||||
ConversationMessage, GroupAnnouncement, InvitationToJoin, ProposalAdded,
|
||||
UserKeyPackage, UserVote, WelcomeMessage,
|
||||
},
|
||||
},
|
||||
steward::GroupUpdateRequest,
|
||||
verify_message, MessageError,
|
||||
};
|
||||
|
||||
// Message type constants for consistency and type safety
|
||||
pub mod message_types {
|
||||
pub const CONVERSATION_MESSAGE: &str = "ConversationMessage";
|
||||
pub const BATCH_PROPOSALS_MESSAGE: &str = "BatchProposalsMessage";
|
||||
pub const BAN_REQUEST: &str = "BanRequest";
|
||||
pub const PROPOSAL: &str = "Proposal";
|
||||
pub const VOTE: &str = "Vote";
|
||||
pub const VOTE_PAYLOAD: &str = "VotePayload";
|
||||
pub const USER_VOTE: &str = "UserVote";
|
||||
pub const PROPOSAL_ADDED: &str = "ProposalAdded";
|
||||
pub const UNKNOWN: &str = "Unknown";
|
||||
}
|
||||
|
||||
/// Trait for getting message type as a string constant
|
||||
pub trait MessageType {
|
||||
fn message_type(&self) -> &'static str;
|
||||
}
|
||||
|
||||
impl MessageType for app_message::Payload {
|
||||
fn message_type(&self) -> &'static str {
|
||||
use message_types::*;
|
||||
match self {
|
||||
app_message::Payload::ConversationMessage(_) => CONVERSATION_MESSAGE,
|
||||
app_message::Payload::BatchProposalsMessage(_) => BATCH_PROPOSALS_MESSAGE,
|
||||
app_message::Payload::BanRequest(_) => BAN_REQUEST,
|
||||
app_message::Payload::Proposal(_) => PROPOSAL,
|
||||
app_message::Payload::Vote(_) => VOTE,
|
||||
app_message::Payload::VotePayload(_) => VOTE_PAYLOAD,
|
||||
app_message::Payload::UserVote(_) => USER_VOTE,
|
||||
app_message::Payload::ProposalAdded(_) => PROPOSAL_ADDED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageType for UpdateRequest {
|
||||
fn message_type(&self) -> &'static str {
|
||||
match RequestType::try_from(self.request_type) {
|
||||
Ok(RequestType::AddMember) => "Add Member",
|
||||
Ok(RequestType::RemoveMember) => "Remove Member",
|
||||
_ => "Unknown",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WELCOME MESSAGE SUBTOPIC
|
||||
impl GroupAnnouncement {
|
||||
pub fn new(pub_key: Vec<u8>, signature: Vec<u8>) -> Self {
|
||||
GroupAnnouncement {
|
||||
eth_pub_key: pub_key,
|
||||
signature,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> Result<bool, MessageError> {
|
||||
let verified = verify_message(&self.eth_pub_key, &self.signature, &self.eth_pub_key)?;
|
||||
Ok(verified)
|
||||
}
|
||||
|
||||
pub fn encrypt(&self, kp: KeyPackage) -> Result<Vec<u8>, MessageError> {
|
||||
let key_package = serde_json::to_vec(&kp)?;
|
||||
let encrypted = encrypt_message(&key_package, &self.eth_pub_key)?;
|
||||
Ok(encrypted)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GroupAnnouncement> for WelcomeMessage {
|
||||
fn from(group_announcement: GroupAnnouncement) -> Self {
|
||||
WelcomeMessage {
|
||||
payload: Some(welcome_message::Payload::GroupAnnouncement(
|
||||
group_announcement,
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<MlsMessageOut> for WelcomeMessage {
|
||||
type Error = MessageError;
|
||||
fn try_from(mls_message: MlsMessageOut) -> Result<Self, MessageError> {
|
||||
let mls_bytes = mls_message.to_bytes()?;
|
||||
let invitation = InvitationToJoin {
|
||||
mls_message_out_bytes: mls_bytes,
|
||||
};
|
||||
|
||||
Ok(WelcomeMessage {
|
||||
payload: Some(welcome_message::Payload::InvitationToJoin(invitation)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserKeyPackage> for WelcomeMessage {
|
||||
fn from(user_key_package: UserKeyPackage) -> Self {
|
||||
WelcomeMessage {
|
||||
payload: Some(welcome_message::Payload::UserKeyPackage(user_key_package)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VotePayload> for AppMessage {
|
||||
fn from(vote_payload: VotePayload) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::VotePayload(vote_payload)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserVote> for AppMessage {
|
||||
fn from(user_vote: UserVote) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::UserVote(user_vote)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ConversationMessage> for AppMessage {
|
||||
fn from(conversation_message: ConversationMessage) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::ConversationMessage(
|
||||
conversation_message,
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BatchProposalsMessage> for AppMessage {
|
||||
fn from(batch_proposals_message: BatchProposalsMessage) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::BatchProposalsMessage(
|
||||
batch_proposals_message,
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanRequest> for AppMessage {
|
||||
fn from(ban_request: BanRequest) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::BanRequest(ban_request)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Proposal> for AppMessage {
|
||||
fn from(proposal: Proposal) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::Proposal(proposal)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vote> for AppMessage {
|
||||
fn from(vote: Vote) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::Vote(vote)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProposalAdded> for AppMessage {
|
||||
fn from(proposal_added: ProposalAdded) -> Self {
|
||||
AppMessage {
|
||||
payload: Some(app_message::Payload::ProposalAdded(proposal_added)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ConsensusEvent> for Outcome {
|
||||
fn from(consensus_event: ConsensusEvent) -> Self {
|
||||
match consensus_event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: _,
|
||||
result: true,
|
||||
} => Outcome::Accepted,
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: _,
|
||||
result: false,
|
||||
} => Outcome::Rejected,
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id: _,
|
||||
reason: _,
|
||||
} => Outcome::Unspecified,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GroupUpdateRequest> for UpdateRequest {
|
||||
fn from(group_update_request: GroupUpdateRequest) -> Self {
|
||||
match group_update_request {
|
||||
GroupUpdateRequest::AddMember(kp) => UpdateRequest {
|
||||
request_type: RequestType::AddMember as i32,
|
||||
wallet_address: kp.leaf_node().credential().serialized_content().to_vec(),
|
||||
},
|
||||
GroupUpdateRequest::RemoveMember(id) => UpdateRequest {
|
||||
request_type: RequestType::RemoveMember as i32,
|
||||
wallet_address: hex::decode(id.strip_prefix("0x").unwrap_or(&id))
|
||||
.unwrap_or_else(|_| id.into_bytes()),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to convert protobuf UpdateRequest to display format
|
||||
pub fn convert_group_requests_to_display(
|
||||
group_requests: &[UpdateRequest],
|
||||
) -> Vec<(String, String)> {
|
||||
let mut results = Vec::new();
|
||||
|
||||
for req in group_requests {
|
||||
match RequestType::try_from(req.request_type) {
|
||||
Ok(RequestType::AddMember) => {
|
||||
results.push((
|
||||
"Add Member".to_string(),
|
||||
normalize_wallet_address(&req.wallet_address),
|
||||
));
|
||||
}
|
||||
Ok(RequestType::RemoveMember) => {
|
||||
results.push((
|
||||
"Remove Member".to_string(),
|
||||
normalize_wallet_address(&req.wallet_address),
|
||||
));
|
||||
}
|
||||
_ => {
|
||||
results.push(("Unknown".to_string(), "Invalid request".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
51
src/protos/messages/v1/application.proto
Normal file
51
src/protos/messages/v1/application.proto
Normal file
@@ -0,0 +1,51 @@
|
||||
// src/protos/messages/v1/application.proto
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package de_mls.messages.v1;
|
||||
|
||||
import "messages/v1/consensus.proto";
|
||||
|
||||
message AppMessage {
|
||||
oneof payload {
|
||||
ConversationMessage conversation_message = 1;
|
||||
BatchProposalsMessage batch_proposals_message = 2;
|
||||
BanRequest ban_request = 3;
|
||||
consensus.v1.Proposal proposal = 4;
|
||||
consensus.v1.Vote vote = 5;
|
||||
consensus.v1.VotePayload vote_payload = 6;
|
||||
UserVote user_vote = 7;
|
||||
ProposalAdded proposal_added = 8;
|
||||
}
|
||||
}
|
||||
|
||||
message BanRequest {
|
||||
string user_to_ban = 1;
|
||||
string requester = 2;
|
||||
string group_name = 3;
|
||||
}
|
||||
|
||||
message ConversationMessage {
|
||||
bytes message = 1;
|
||||
string sender = 2;
|
||||
string group_name = 3;
|
||||
}
|
||||
|
||||
message BatchProposalsMessage {
|
||||
bytes group_name = 1;
|
||||
repeated bytes mls_proposals = 2; // Individual MLS proposal messages
|
||||
bytes commit_message = 3; // MLS commit message
|
||||
}
|
||||
|
||||
// Yes/No vote for a given proposal. Based on the result, the `consensus.v1.Vote` will be created.
|
||||
message UserVote {
|
||||
uint32 proposal_id = 1;
|
||||
bool vote = 2;
|
||||
string group_name = 3;
|
||||
}
|
||||
|
||||
// Proposal added message is sent to the UI when a new proposal is added to the group.
|
||||
message ProposalAdded {
|
||||
string group_id = 1;
|
||||
consensus.v1.UpdateRequest request = 2;
|
||||
}
|
||||
61
src/protos/messages/v1/consensus.proto
Normal file
61
src/protos/messages/v1/consensus.proto
Normal file
@@ -0,0 +1,61 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package consensus.v1;
|
||||
|
||||
enum RequestType {
|
||||
REQUEST_TYPE_UNSPECIFIED = 0;
|
||||
REQUEST_TYPE_ADD_MEMBER = 1;
|
||||
REQUEST_TYPE_REMOVE_MEMBER = 2;
|
||||
}
|
||||
|
||||
// Proposal represents a consensus proposal that needs voting
|
||||
message Proposal {
|
||||
string name = 10; // Proposal name
|
||||
repeated UpdateRequest group_requests = 11; // Structured group update requests
|
||||
uint32 proposal_id = 12; // Unique identifier of the proposal
|
||||
bytes proposal_owner = 13; // Public key of the creator
|
||||
repeated Vote votes = 14; // Vote list in the proposal
|
||||
uint32 expected_voters_count = 15; // Maximum number of distinct voters
|
||||
uint32 round = 16; // Number of Votes
|
||||
uint64 timestamp = 17; // Creation time of proposal
|
||||
uint64 expiration_time = 18; // The time interval that the proposal is active.
|
||||
bool liveness_criteria_yes = 19; // Shows how managing the silent peers vote
|
||||
}
|
||||
|
||||
// Vote represents a single vote in a consensus proposal
|
||||
message Vote {
|
||||
uint32 vote_id = 20; // Unique identifier of the vote
|
||||
bytes vote_owner = 21; // Voter's public key
|
||||
uint32 proposal_id = 22; // Proposal ID (for the vote)
|
||||
uint64 timestamp = 23; // Time when the vote was cast
|
||||
bool vote = 24; // Vote bool value (true/false)
|
||||
bytes parent_hash = 25; // Hash of previous owner's Vote
|
||||
bytes received_hash = 26; // Hash of previous received Vote
|
||||
bytes vote_hash = 27; // Hash of all previously defined fields in Vote
|
||||
bytes signature = 28; // Signature of vote_hash
|
||||
}
|
||||
|
||||
enum Outcome {
|
||||
OUTCOME_UNSPECIFIED = 0;
|
||||
OUTCOME_ACCEPTED = 1;
|
||||
OUTCOME_REJECTED = 2;
|
||||
}
|
||||
|
||||
message ProposalResult {
|
||||
string group_id = 31;
|
||||
uint32 proposal_id = 32;
|
||||
Outcome outcome = 33;
|
||||
uint64 decided_at_ms = 34;
|
||||
}
|
||||
|
||||
message VotePayload {
|
||||
string group_id = 41;
|
||||
uint32 proposal_id = 42;
|
||||
repeated UpdateRequest group_requests = 43; // Structured group update requests
|
||||
uint64 timestamp = 44;
|
||||
}
|
||||
|
||||
message UpdateRequest {
|
||||
RequestType request_type = 51;
|
||||
bytes wallet_address = 52;
|
||||
}
|
||||
28
src/protos/messages/v1/welcome.proto
Normal file
28
src/protos/messages/v1/welcome.proto
Normal file
@@ -0,0 +1,28 @@
|
||||
// src/protos/messages/v1/welcome.proto
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package de_mls.messages.v1;
|
||||
|
||||
// The main message container. It can only contain ONE of the following
|
||||
// message types in its `payload` field.
|
||||
message WelcomeMessage {
|
||||
oneof payload {
|
||||
GroupAnnouncement group_announcement = 1;
|
||||
UserKeyPackage user_key_package = 2;
|
||||
InvitationToJoin invitation_to_join = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message GroupAnnouncement {
|
||||
bytes eth_pub_key = 1;
|
||||
bytes signature = 2;
|
||||
}
|
||||
|
||||
message UserKeyPackage {
|
||||
bytes encrypt_kp = 1;
|
||||
}
|
||||
|
||||
message InvitationToJoin {
|
||||
bytes mls_message_out_bytes = 1;
|
||||
}
|
||||
785
src/state_machine.rs
Normal file
785
src/state_machine.rs
Normal file
@@ -0,0 +1,785 @@
|
||||
//! State machine for steward epoch management and group operations.
|
||||
//!
|
||||
//! This module implements a state machine that manages the lifecycle of steward epochs,
|
||||
//! proposal collection, voting, and application. The state machine ensures proper
|
||||
//! transitions and enforces permissions at each state.
|
||||
//!
|
||||
//! # States
|
||||
//!
|
||||
//! - **Working**: Normal operation state where users can send any message freely
|
||||
//! - **Waiting**: Steward epoch state where only steward can send BATCH_PROPOSALS_MESSAGE (if proposals exist)
|
||||
//! - **Voting**: Voting state where everyone can send VOTE/USER_VOTE, only steward can send VOTE_PAYLOAD/PROPOSAL
|
||||
//! - **ConsensusReached**: Consensus achieved, waiting for steward to send batch proposals
|
||||
//! - **ConsensusFailed**: Consensus failed due to timeout or other reasons
|
||||
//!
|
||||
//! # State Transitions
|
||||
//!
|
||||
//! ```text
|
||||
//! Working -- start_steward_epoch_with_validation() --> Waiting (if proposals exist)
|
||||
//! Working -- start_steward_epoch_with_validation() --> Working (if no proposals, returns 0)
|
||||
//! Waiting -- start_voting() --> Voting
|
||||
//! Voting -- complete_voting(true) --> ConsensusReached (vote passed)
|
||||
//! Voting -- complete_voting(false) --> Working (vote failed)
|
||||
//! ConsensusReached -- start_waiting_after_consensus() --> Waiting (steward sends batch proposals)
|
||||
//! Waiting -- handle_yes_vote() --> Working (after successful vote and proposal application)
|
||||
//! ConsensusFailed -- recover_from_consensus_failure() --> Working (recovery)
|
||||
//! ```
|
||||
//!
|
||||
//! # Message Type Permissions by State
|
||||
//!
|
||||
//! ## Working State
|
||||
//! - **All users**: Can send any message type
|
||||
//!
|
||||
//! ## Waiting State
|
||||
//! - **Steward with proposals**: Can send BATCH_PROPOSALS_MESSAGE
|
||||
//! - **All users**: All other message types blocked
|
||||
//!
|
||||
//! ## Voting State
|
||||
//! - **All users**: Can send VOTE and USER_VOTE
|
||||
//! - **Steward only**: Can send VOTE_PAYLOAD and PROPOSAL
|
||||
//! - **All users**: All other message types blocked
|
||||
//!
|
||||
//! ## ConsensusReached State
|
||||
//! - **Steward with proposals**: Can send BATCH_PROPOSALS_MESSAGE
|
||||
//! - **All users**: All other message types blocked
|
||||
//!
|
||||
//! ## ConsensusFailed State
|
||||
//! - **All users**: No messages allowed
|
||||
//!
|
||||
//! # Steward Flow Scenarios
|
||||
//!
|
||||
//! ## Scenario 1: No Proposals Initially
|
||||
//! ```text
|
||||
//! Working --start_steward_epoch_with_validation()--> Working (stays in Working, returns 0)
|
||||
//! ```
|
||||
//!
|
||||
//! ## Scenario 2: Successful Vote with Proposals
|
||||
//! **Steward:**
|
||||
//! ```text
|
||||
//! Working --start_steward_epoch_with_validation()--> Waiting --start_voting()--> Voting
|
||||
//! --complete_voting(true)--> ConsensusReached --start_waiting_after_consensus()--> Waiting
|
||||
//! --handle_yes_vote()--> Working
|
||||
//! ```
|
||||
//! **Non-Steward:**
|
||||
//! ```text
|
||||
//! Working --steward_starts_epoch()--> Waiting --start_voting()--> Voting
|
||||
//! --start_consensus_reached()--> ConsensusReached --start_waiting()--> Waiting
|
||||
//! --handle_yes_vote()--> Working
|
||||
//! ```
|
||||
//!
|
||||
//! ## Scenario 3: Failed Vote
|
||||
//! **Steward:**
|
||||
//! ```text
|
||||
//! Working --start_steward_epoch_with_validation()--> Waiting --start_voting()--> Voting
|
||||
//! --complete_voting(false)--> Working
|
||||
//! ```
|
||||
//! **Non-Steward:**
|
||||
//! ```text
|
||||
//! Working --steward_starts_epoch()--> Waiting --start_voting()--> Voting
|
||||
//! --start_consensus_reached()--> ConsensusReached --start_consensus_failed()--> ConsensusFailed
|
||||
//! --recover_from_consensus_failure()--> Working
|
||||
//! ```
|
||||
//!
|
||||
//! # Key Methods
|
||||
//!
|
||||
//! - `start_steward_epoch_with_validation()`: Main entry point for starting steward epochs with proposal validation
|
||||
//! - `start_voting()`: Transitions to voting state from any non-voting state
|
||||
//! - `complete_voting(vote_result)`: Handles voting completion and transitions based on result
|
||||
//! - `handle_yes_vote()`: Applies proposals and returns to working state after successful vote
|
||||
//! - `start_waiting_after_consensus()`: Transitions from ConsensusReached to Waiting for batch proposal processing
|
||||
//! - `recover_from_consensus_failure()`: Recovers from consensus failure back to Working state
|
||||
//!
|
||||
//! # Proposal Management
|
||||
//!
|
||||
//! - Proposals are collected in the current epoch and moved to voting epoch when steward epoch starts
|
||||
//! - After successful voting, proposals are applied and cleared from voting epoch
|
||||
//! - Failed votes result in proposals being discarded and return to working state
|
||||
|
||||
use std::fmt::Display;
|
||||
use tracing::info;
|
||||
|
||||
use crate::message::message_types;
|
||||
use crate::steward::Steward;
|
||||
use crate::{steward::GroupUpdateRequest, GroupError};
|
||||
|
||||
/// Represents the different states a group can be in during the steward epoch flow
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum GroupState {
|
||||
/// Normal operation state - users can send any message freely
|
||||
Working,
|
||||
/// Waiting state during steward epoch - only steward can send BATCH_PROPOSALS_MESSAGE
|
||||
Waiting,
|
||||
/// Voting state - everyone can send VOTE/USER_VOTE, only steward can send VOTE_PAYLOAD/PROPOSAL
|
||||
Voting,
|
||||
/// Consensus reached state - consensus achieved, waiting for steward to send batch proposals
|
||||
ConsensusReached,
|
||||
}
|
||||
|
||||
impl Display for GroupState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let state = match self {
|
||||
GroupState::Working => "Working",
|
||||
GroupState::Waiting => "Waiting",
|
||||
GroupState::Voting => "Voting",
|
||||
GroupState::ConsensusReached => "ConsensusReached",
|
||||
};
|
||||
write!(f, "{state}")
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine for managing group steward epoch flow
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GroupStateMachine {
|
||||
/// Current state of the group
|
||||
state: GroupState,
|
||||
/// Optional steward for epoch management
|
||||
steward: Option<Steward>,
|
||||
}
|
||||
|
||||
impl GroupStateMachine {
|
||||
/// Create a new group state machine
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: GroupState::Working,
|
||||
steward: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new group state machine with steward
|
||||
pub fn new_with_steward() -> Self {
|
||||
Self {
|
||||
state: GroupState::Working,
|
||||
steward: Some(Steward::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current state
|
||||
pub fn current_state(&self) -> GroupState {
|
||||
self.state.clone()
|
||||
}
|
||||
|
||||
/// Check if a specific message type can be sent in the current state.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `is_steward`: Whether the sender is a steward
|
||||
/// - `has_proposals`: Whether there are proposals available (for steward operations)
|
||||
/// - `message_type`: The type of message to check
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if the message can be sent, `false` otherwise
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to enforce message type permissions based on current state and sender role.
|
||||
/// This ensures proper state machine behavior and prevents invalid operations.
|
||||
pub fn can_send_message_type(
|
||||
&self,
|
||||
is_steward: bool,
|
||||
has_proposals: bool,
|
||||
message_type: &str,
|
||||
) -> bool {
|
||||
match self.state {
|
||||
GroupState::Working => true, // Anyone can send any message in working state
|
||||
GroupState::Waiting => {
|
||||
// In waiting state, only steward can send BATCH_PROPOSALS_MESSAGE
|
||||
match message_type {
|
||||
message_types::BATCH_PROPOSALS_MESSAGE => is_steward && has_proposals,
|
||||
_ => false, // All other messages blocked during waiting
|
||||
}
|
||||
}
|
||||
GroupState::Voting => {
|
||||
// In voting state, only voting-related messages allowed
|
||||
match message_type {
|
||||
message_types::VOTE => true, // Everyone can send votes
|
||||
message_types::USER_VOTE => true, // Everyone can send user votes
|
||||
message_types::VOTE_PAYLOAD => is_steward, // Only steward can send voting proposals
|
||||
message_types::PROPOSAL => is_steward, // Only steward can send proposals
|
||||
_ => false, // All other messages blocked during voting
|
||||
}
|
||||
}
|
||||
GroupState::ConsensusReached => {
|
||||
// In ConsensusReached state, only steward can send BATCH_PROPOSALS_MESSAGE
|
||||
match message_type {
|
||||
message_types::BATCH_PROPOSALS_MESSAGE => is_steward && has_proposals,
|
||||
_ => false, // All other messages blocked during ConsensusReached
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start voting on proposals for the current epoch, transitioning to Voting state.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Can be called from any state except Voting (prevents double voting)
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// Any State (except Voting) → Voting
|
||||
pub fn start_voting(&mut self) -> Result<(), GroupError> {
|
||||
if self.state == GroupState::Voting {
|
||||
return Err(GroupError::InvalidStateTransition {
|
||||
from: self.state.to_string(),
|
||||
to: "Voting".to_string(),
|
||||
});
|
||||
}
|
||||
self.state = GroupState::Voting;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Complete voting and update state based on result.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must be in Voting state
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// - Vote YES: Voting → ConsensusReached (consensus achieved, waiting for batch proposals)
|
||||
/// - Vote NO: Voting → Working (proposals discarded)
|
||||
pub fn complete_voting(&mut self, vote_result: bool) -> Result<(), GroupError> {
|
||||
if self.state != GroupState::Voting {
|
||||
return Err(GroupError::InvalidStateTransition {
|
||||
from: self.state.to_string(),
|
||||
to: if vote_result {
|
||||
"ConsensusReached"
|
||||
} else {
|
||||
"Working"
|
||||
}
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if vote_result {
|
||||
// Vote YES - go to ConsensusReached state to wait for steward to send batch proposals
|
||||
info!("[complete_voting]: Vote YES, transitioning to ConsensusReached state");
|
||||
self.start_consensus_reached();
|
||||
} else {
|
||||
// Vote NO - return to working state
|
||||
info!("[complete_voting]: Vote NO, transitioning to Working state");
|
||||
self.start_working();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start consensus reached state (for non-steward peers after consensus).
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// Any State → ConsensusReached
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called by non-steward peers when consensus is reached during voting.
|
||||
/// This allows them to transition to the appropriate state for waiting
|
||||
/// for the steward to process and send batch proposals.
|
||||
pub fn start_consensus_reached(&mut self) {
|
||||
self.state = GroupState::ConsensusReached;
|
||||
info!("[start_consensus_reached] Transitioning to ConsensusReached state");
|
||||
}
|
||||
|
||||
/// Start working state (for non-steward peers after consensus or edge case recovery).
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// Any State → Working
|
||||
///
|
||||
/// ## Usage:
|
||||
/// - Non-steward peers: Called after receiving consensus results
|
||||
/// - Edge case recovery: Called when proposals disappear during voting phase
|
||||
/// - General recovery: Can be used to reset to normal operation from any state
|
||||
///
|
||||
/// ## Note:
|
||||
/// This method provides a safe way to transition back to normal operation
|
||||
/// and is commonly used for recovery scenarios.
|
||||
pub fn start_working(&mut self) {
|
||||
self.state = GroupState::Working;
|
||||
info!("[start_working] Transitioning to Working state");
|
||||
}
|
||||
|
||||
/// Start waiting state (for non-steward peers after consensus).
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// Any State → Waiting
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called by non-steward peers to transition to waiting state,
|
||||
/// typically after consensus is reached and they need to wait for
|
||||
/// the steward to process and send batch proposals.
|
||||
pub fn start_waiting(&mut self) {
|
||||
self.state = GroupState::Waiting;
|
||||
info!("[start_waiting] Transitioning to Waiting state");
|
||||
}
|
||||
|
||||
/// Get the count of proposals in the current epoch.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Number of proposals currently collected for the next steward epoch
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to check if there are proposals to vote on before starting a steward epoch.
|
||||
pub async fn get_current_epoch_proposals_count(&self) -> usize {
|
||||
if let Some(steward) = &self.steward {
|
||||
steward.get_current_epoch_proposals_count().await
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current epoch proposals for UI display.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of proposals currently collected for the next steward epoch
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to display current proposals in the UI for stewards.
|
||||
pub async fn get_current_epoch_proposals(&self) -> Vec<crate::steward::GroupUpdateRequest> {
|
||||
if let Some(steward) = &self.steward {
|
||||
steward.get_current_epoch_proposals().await
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the count of proposals in the voting epoch.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Number of proposals currently being voted on
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used during voting to track how many proposals are being considered.
|
||||
pub async fn get_voting_epoch_proposals_count(&self) -> usize {
|
||||
if let Some(steward) = &self.steward {
|
||||
steward.get_voting_epoch_proposals_count().await
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the proposals in the voting epoch.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of proposals currently being voted on
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used during voting to access the actual proposal details for processing.
|
||||
pub async fn get_voting_epoch_proposals(&self) -> Vec<GroupUpdateRequest> {
|
||||
if let Some(steward) = &self.steward {
|
||||
steward.get_voting_epoch_proposals().await
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a proposal to the current epoch.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `proposal`: The group update request to add
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called to submit new proposals for consideration in the next steward epoch.
|
||||
/// Proposals are collected and will be moved to the voting epoch when
|
||||
/// `start_steward_epoch_with_validation()` is called.
|
||||
pub async fn add_proposal(&mut self, proposal: GroupUpdateRequest) {
|
||||
if let Some(steward) = &mut self.steward {
|
||||
steward.add_proposal(proposal).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this state machine has a steward configured.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if a steward is configured, `false` otherwise
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to verify steward availability before attempting steward epoch operations.
|
||||
pub fn has_steward(&self) -> bool {
|
||||
self.steward.is_some()
|
||||
}
|
||||
|
||||
/// Get a reference to the steward (if available).
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `Some(&Steward)` if steward is configured, `None` otherwise
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to access steward functionality for read-only operations.
|
||||
pub fn get_steward(&self) -> Option<&Steward> {
|
||||
self.steward.as_ref()
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the steward (if available).
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `Some(&mut Steward)` if steward is configured, `None` otherwise
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used to access steward functionality for read-write operations.
|
||||
pub fn get_steward_mut(&mut self) -> Option<&mut Steward> {
|
||||
self.steward.as_mut()
|
||||
}
|
||||
|
||||
/// Handle steward epoch start with proposal validation.
|
||||
/// This is the main entry point for starting steward epochs.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must be in Working state
|
||||
/// - Must have a steward configured
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// - **With proposals**: Working → Waiting (returns proposal count)
|
||||
/// - **No proposals**: Working → Working (stays in Working, returns 0)
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Number of proposals collected for voting (0 if no proposals)
|
||||
///
|
||||
/// ## Usage:
|
||||
/// This method should be used instead of `start_steward_epoch()` for external calls
|
||||
/// as it provides proper proposal validation and state management.
|
||||
pub async fn start_steward_epoch_with_validation(&mut self) -> Result<usize, GroupError> {
|
||||
if self.state != GroupState::Working {
|
||||
return Err(GroupError::InvalidStateTransition {
|
||||
from: self.state.to_string(),
|
||||
to: "Waiting".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Always check if steward is set - required for steward epoch operations
|
||||
if !self.has_steward() {
|
||||
return Err(GroupError::StewardNotSet);
|
||||
}
|
||||
|
||||
// Check if there are proposals to vote on
|
||||
let proposal_count = self.get_current_epoch_proposals_count().await;
|
||||
|
||||
if proposal_count == 0 {
|
||||
// No proposals, stay in Working state but still return 0
|
||||
// This indicates a successful steward epoch start with no proposals
|
||||
Ok(0)
|
||||
} else {
|
||||
// Start steward epoch and transition to Waiting
|
||||
self.state = GroupState::Waiting;
|
||||
self.steward
|
||||
.as_mut()
|
||||
.ok_or(GroupError::StewardNotSet)?
|
||||
.start_new_epoch()
|
||||
.await;
|
||||
Ok(proposal_count)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle proposal application and completion after successful voting.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must be in ConsensusReached or Waiting state
|
||||
/// - Must have a steward configured
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// ConsensusReached/Waiting → Working
|
||||
///
|
||||
/// ## Actions:
|
||||
/// - Clears voting epoch proposals
|
||||
/// - Transitions to Working state
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called after successful voting to empty the voting epoch proposals and transition to Working state.
|
||||
pub async fn handle_yes_vote(&mut self) -> Result<(), GroupError> {
|
||||
// Check state transition validity - can be called from ConsensusReached or Waiting state
|
||||
if self.state != GroupState::ConsensusReached && self.state != GroupState::Waiting {
|
||||
return Err(GroupError::InvalidStateTransition {
|
||||
from: self.state.to_string(),
|
||||
to: "Working".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let steward = self.steward.as_mut().ok_or(GroupError::StewardNotSet)?;
|
||||
steward.empty_voting_epoch_proposals().await;
|
||||
|
||||
self.state = GroupState::Working;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start waiting state when steward sends batch proposals after consensus.
|
||||
/// This transitions from ConsensusReached to Waiting state.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must be in ConsensusReached state
|
||||
///
|
||||
/// ## State Transition:
|
||||
/// ConsensusReached → Waiting
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called when steward needs to send batch proposals after consensus is reached.
|
||||
/// This allows the steward to process and send proposals while maintaining proper state flow.
|
||||
pub fn start_waiting_after_consensus(&mut self) -> Result<(), GroupError> {
|
||||
if self.state != GroupState::ConsensusReached {
|
||||
return Err(GroupError::InvalidStateTransition {
|
||||
from: self.state.to_string(),
|
||||
to: "Waiting".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
self.state = GroupState::Waiting;
|
||||
info!(
|
||||
"[start_waiting_after_consensus] Transitioning from ConsensusReached to Waiting state"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle failed vote cleanup.
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Must have a steward configured
|
||||
///
|
||||
/// ## Actions:
|
||||
/// - Clears voting epoch proposals
|
||||
/// - Does not change state
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Called after failed votes to clean up proposals. The caller is responsible
|
||||
/// for transitioning to the appropriate state (typically Working).
|
||||
pub async fn handle_no_vote(&mut self) -> Result<(), GroupError> {
|
||||
let steward = self.steward.as_mut().ok_or(GroupError::StewardNotSet)?;
|
||||
steward.empty_voting_epoch_proposals().await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GroupStateMachine {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_machine_creation() {
|
||||
let state_machine = GroupStateMachine::new();
|
||||
assert_eq!(state_machine.current_state(), GroupState::Working);
|
||||
assert!(!state_machine.has_steward());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_machine_with_steward_creation() {
|
||||
let state_machine = GroupStateMachine::new_with_steward();
|
||||
assert_eq!(state_machine.current_state(), GroupState::Working);
|
||||
assert!(state_machine.has_steward());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_transitions() {
|
||||
let mut state_machine = GroupStateMachine::new_with_steward();
|
||||
|
||||
// Initial state should be Working
|
||||
assert_eq!(state_machine.current_state(), GroupState::Working);
|
||||
|
||||
// Add a proposal to switch to waiting state
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::RemoveMember(
|
||||
"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc".to_string(),
|
||||
))
|
||||
.await;
|
||||
|
||||
// Test start_steward_epoch
|
||||
state_machine
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(state_machine.current_state(), GroupState::Waiting);
|
||||
|
||||
// Test start_voting
|
||||
state_machine
|
||||
.start_voting()
|
||||
.expect("Failed to start voting");
|
||||
assert_eq!(state_machine.current_state(), GroupState::Voting);
|
||||
|
||||
// Test complete_voting with success
|
||||
state_machine
|
||||
.complete_voting(true)
|
||||
.expect("Failed to complete voting");
|
||||
assert_eq!(state_machine.current_state(), GroupState::ConsensusReached);
|
||||
|
||||
// Test start_waiting_after_consensus
|
||||
state_machine
|
||||
.start_waiting_after_consensus()
|
||||
.expect("Failed to start waiting after consensus");
|
||||
assert_eq!(state_machine.current_state(), GroupState::Waiting);
|
||||
|
||||
// Test apply_proposals_and_complete
|
||||
state_machine
|
||||
.handle_yes_vote()
|
||||
.await
|
||||
.expect("Failed to apply proposals");
|
||||
assert_eq!(state_machine.current_state(), GroupState::Working);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_message_type_permissions() {
|
||||
let mut state_machine = GroupStateMachine::new_with_steward();
|
||||
|
||||
// Working state - all message types allowed
|
||||
assert!(state_machine.can_send_message_type(false, false, message_types::BAN_REQUEST));
|
||||
assert!(state_machine.can_send_message_type(
|
||||
false,
|
||||
false,
|
||||
message_types::CONVERSATION_MESSAGE
|
||||
));
|
||||
assert!(state_machine.can_send_message_type(
|
||||
true,
|
||||
false,
|
||||
message_types::BATCH_PROPOSALS_MESSAGE
|
||||
));
|
||||
|
||||
// Add a proposal to switch to waiting state
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::RemoveMember(
|
||||
"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc".to_string(),
|
||||
))
|
||||
.await;
|
||||
|
||||
// Start steward epoch
|
||||
state_machine
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
|
||||
// Waiting state - test specific message types
|
||||
// All messages not allowed from anyone EXCEPT BATCH_PROPOSALS_MESSAGE
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::BAN_REQUEST));
|
||||
assert!(!state_machine.can_send_message_type(
|
||||
false,
|
||||
false,
|
||||
message_types::CONVERSATION_MESSAGE
|
||||
));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::VOTE));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::USER_VOTE));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::VOTE_PAYLOAD));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::PROPOSAL));
|
||||
|
||||
// BatchProposalsMessage should only be allowed from steward with proposals
|
||||
assert!(!state_machine.can_send_message_type(
|
||||
false,
|
||||
false,
|
||||
message_types::BATCH_PROPOSALS_MESSAGE
|
||||
));
|
||||
assert!(!state_machine.can_send_message_type(
|
||||
true,
|
||||
false,
|
||||
message_types::BATCH_PROPOSALS_MESSAGE
|
||||
));
|
||||
assert!(state_machine.can_send_message_type(
|
||||
true,
|
||||
true,
|
||||
message_types::BATCH_PROPOSALS_MESSAGE
|
||||
));
|
||||
|
||||
// Start voting
|
||||
state_machine
|
||||
.start_voting()
|
||||
.expect("Failed to start voting");
|
||||
|
||||
// Voting state - only voting-related messages allowed
|
||||
// Everyone can send votes and user votes
|
||||
assert!(state_machine.can_send_message_type(false, false, message_types::VOTE));
|
||||
assert!(state_machine.can_send_message_type(false, false, message_types::USER_VOTE));
|
||||
|
||||
// Only steward can send voting proposals and proposals
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::VOTE_PAYLOAD));
|
||||
assert!(state_machine.can_send_message_type(true, false, message_types::VOTE_PAYLOAD));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::PROPOSAL));
|
||||
assert!(state_machine.can_send_message_type(true, false, message_types::PROPOSAL));
|
||||
|
||||
// All other message types blocked during voting
|
||||
assert!(!state_machine.can_send_message_type(
|
||||
false,
|
||||
false,
|
||||
message_types::CONVERSATION_MESSAGE
|
||||
));
|
||||
assert!(!state_machine.can_send_message_type(false, false, message_types::BAN_REQUEST));
|
||||
assert!(!state_machine.can_send_message_type(
|
||||
false,
|
||||
false,
|
||||
message_types::BATCH_PROPOSALS_MESSAGE
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_state_transitions() {
|
||||
let mut state_machine = GroupStateMachine::new();
|
||||
|
||||
// Cannot complete voting from Working state
|
||||
let result = state_machine.complete_voting(true);
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
|
||||
// Cannot apply proposals from Working state
|
||||
let result = state_machine.handle_yes_vote().await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_proposal_management() {
|
||||
let mut state_machine = GroupStateMachine::new_with_steward();
|
||||
|
||||
// Add some proposals
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::RemoveMember(
|
||||
"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc".to_string(),
|
||||
))
|
||||
.await;
|
||||
|
||||
// Start steward epoch - should collect proposals
|
||||
state_machine
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(state_machine.get_voting_epoch_proposals_count().await, 1);
|
||||
|
||||
// Complete the flow
|
||||
state_machine
|
||||
.start_voting()
|
||||
.expect("Failed to start voting");
|
||||
state_machine
|
||||
.complete_voting(true)
|
||||
.expect("Failed to complete voting");
|
||||
state_machine
|
||||
.handle_yes_vote()
|
||||
.await
|
||||
.expect("Failed to apply proposals");
|
||||
|
||||
// Proposals should be applied and count should be reset
|
||||
assert_eq!(state_machine.get_current_epoch_proposals_count().await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_snapshot_consistency() {
|
||||
let mut state_machine = GroupStateMachine::new_with_steward();
|
||||
|
||||
// Add some proposals
|
||||
state_machine
|
||||
.add_proposal(GroupUpdateRequest::RemoveMember(
|
||||
"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc".to_string(),
|
||||
))
|
||||
.await;
|
||||
|
||||
// Get a snapshot before state transition
|
||||
let snapshot1 = state_machine.get_current_epoch_proposals_count().await;
|
||||
assert_eq!(snapshot1, 1);
|
||||
|
||||
// Start steward epoch
|
||||
state_machine
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
|
||||
// Get a snapshot after state transition
|
||||
let snapshot2 = state_machine.get_current_epoch_proposals_count().await;
|
||||
assert_eq!(snapshot2, 0);
|
||||
|
||||
// Verify that the snapshots are consistent within themselves
|
||||
assert!(snapshot1 > 0);
|
||||
assert_ne!(snapshot1, snapshot2);
|
||||
}
|
||||
}
|
||||
168
src/steward.rs
Normal file
168
src/steward.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
use alloy::primitives::Address;
|
||||
use libsecp256k1::{PublicKey, SecretKey};
|
||||
use openmls::prelude::KeyPackage;
|
||||
use std::{fmt::Display, str::FromStr, sync::Arc};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
decrypt_message, error::MessageError, generate_keypair,
|
||||
protos::de_mls::messages::v1::GroupAnnouncement, sign_message,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Steward {
|
||||
eth_pub: Arc<Mutex<PublicKey>>,
|
||||
eth_secr: Arc<Mutex<SecretKey>>,
|
||||
current_epoch_proposals: Arc<Mutex<Vec<GroupUpdateRequest>>>,
|
||||
voting_epoch_proposals: Arc<Mutex<Vec<GroupUpdateRequest>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum GroupUpdateRequest {
|
||||
AddMember(Box<KeyPackage>),
|
||||
RemoveMember(String),
|
||||
}
|
||||
|
||||
impl Display for GroupUpdateRequest {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
GroupUpdateRequest::AddMember(kp) => {
|
||||
let id = Address::from_slice(kp.leaf_node().credential().serialized_content());
|
||||
writeln!(f, "Add Member: {id:#?}")
|
||||
}
|
||||
GroupUpdateRequest::RemoveMember(id) => {
|
||||
let id = Address::from_str(id).unwrap();
|
||||
writeln!(f, "Remove Member: {id:#?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Steward {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Steward {
|
||||
pub fn new() -> Self {
|
||||
let (public_key, private_key) = generate_keypair();
|
||||
Steward {
|
||||
eth_pub: Arc::new(Mutex::new(public_key)),
|
||||
eth_secr: Arc::new(Mutex::new(private_key)),
|
||||
current_epoch_proposals: Arc::new(Mutex::new(Vec::new())),
|
||||
voting_epoch_proposals: Arc::new(Mutex::new(Vec::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn refresh_key_pair(&mut self) {
|
||||
let (public_key, private_key) = generate_keypair();
|
||||
*self.eth_pub.lock().await = public_key;
|
||||
*self.eth_secr.lock().await = private_key;
|
||||
}
|
||||
|
||||
pub async fn create_announcement(&self) -> GroupAnnouncement {
|
||||
let pub_key = self.eth_pub.lock().await;
|
||||
let sec_key = self.eth_secr.lock().await;
|
||||
let signature = sign_message(&pub_key.serialize_compressed(), &sec_key);
|
||||
GroupAnnouncement::new(pub_key.serialize_compressed().to_vec(), signature)
|
||||
}
|
||||
|
||||
pub async fn decrypt_message(&self, message: Vec<u8>) -> Result<KeyPackage, MessageError> {
|
||||
let sec_key = self.eth_secr.lock().await;
|
||||
let msg: Vec<u8> = decrypt_message(&message, *sec_key)?;
|
||||
let key_package: KeyPackage = serde_json::from_slice(&msg)?;
|
||||
Ok(key_package)
|
||||
}
|
||||
|
||||
/// Start a new steward epoch, moving current proposals to the epoch proposals map.
|
||||
pub async fn start_new_epoch(&mut self) {
|
||||
// Use a single atomic operation to move proposals between epochs
|
||||
let proposals = {
|
||||
let mut current = self.current_epoch_proposals.lock().await;
|
||||
current.drain(0..).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// Store proposals for this epoch (for voting and application)
|
||||
if !proposals.is_empty() {
|
||||
let mut voting = self.voting_epoch_proposals.lock().await;
|
||||
voting.extend(proposals);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_current_epoch_proposals_count(&self) -> usize {
|
||||
self.current_epoch_proposals.lock().await.len()
|
||||
}
|
||||
|
||||
/// Get proposals for the current epoch (for voting).
|
||||
pub async fn get_voting_epoch_proposals(&self) -> Vec<GroupUpdateRequest> {
|
||||
self.voting_epoch_proposals.lock().await.clone()
|
||||
}
|
||||
|
||||
/// Get the count of proposals in the current epoch.
|
||||
pub async fn get_voting_epoch_proposals_count(&self) -> usize {
|
||||
self.voting_epoch_proposals.lock().await.len()
|
||||
}
|
||||
|
||||
/// Get the current epoch proposals for UI display.
|
||||
pub async fn get_current_epoch_proposals(&self) -> Vec<GroupUpdateRequest> {
|
||||
self.current_epoch_proposals.lock().await.clone()
|
||||
}
|
||||
|
||||
/// Apply proposals for the current epoch (called after successful voting).
|
||||
pub async fn empty_voting_epoch_proposals(&mut self) {
|
||||
self.voting_epoch_proposals.lock().await.clear();
|
||||
}
|
||||
|
||||
/// Add a proposal to the current epoch
|
||||
pub async fn add_proposal(&mut self, proposal: GroupUpdateRequest) {
|
||||
self.current_epoch_proposals.lock().await.push(proposal);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use mls_crypto::openmls_provider::{MlsProvider, CIPHERSUITE};
|
||||
use openmls::prelude::{BasicCredential, CredentialWithKey, KeyPackage};
|
||||
use openmls_basic_credential::SignatureKeyPair;
|
||||
|
||||
use crate::steward::GroupUpdateRequest;
|
||||
#[tokio::test]
|
||||
async fn test_display_group_update_request() {
|
||||
let user_eth_priv_key =
|
||||
"0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
|
||||
let signer =
|
||||
PrivateKeySigner::from_str(user_eth_priv_key).expect("Failed to create signer");
|
||||
let user_address = signer.address();
|
||||
|
||||
let ciphersuite = CIPHERSUITE;
|
||||
let provider = MlsProvider::default();
|
||||
|
||||
let credential = BasicCredential::new(user_address.as_slice().to_vec());
|
||||
let signer = SignatureKeyPair::new(ciphersuite.signature_algorithm())
|
||||
.expect("Error generating a signature key pair.");
|
||||
let credential_with_key = CredentialWithKey {
|
||||
credential: credential.into(),
|
||||
signature_key: signer.public().into(),
|
||||
};
|
||||
let key_package_bundle = KeyPackage::builder()
|
||||
.build(ciphersuite, &provider, &signer, credential_with_key)
|
||||
.expect("Error building key package bundle.");
|
||||
let key_package = key_package_bundle.key_package();
|
||||
|
||||
let proposal_add_member = GroupUpdateRequest::AddMember(Box::new(key_package.clone()));
|
||||
assert_eq!(
|
||||
proposal_add_member.to_string(),
|
||||
"Add Member: 0x70997970c51812dc3a010c7d01b50e0d17dc79c8\n"
|
||||
);
|
||||
|
||||
let proposal_remove_member = GroupUpdateRequest::RemoveMember(user_address.to_string());
|
||||
assert_eq!(
|
||||
proposal_remove_member.to_string(),
|
||||
"Remove Member: 0x70997970c51812dc3a010c7d01b50e0d17dc79c8\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
511
src/user.rs
511
src/user.rs
@@ -1,511 +0,0 @@
|
||||
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
|
||||
use kameo::{
|
||||
message::{Context, Message},
|
||||
Actor,
|
||||
};
|
||||
use log::info;
|
||||
use openmls::{group::*, prelude::*};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
str::{from_utf8, FromStr},
|
||||
};
|
||||
use waku_bindings::WakuMessage;
|
||||
|
||||
use ds::{
|
||||
ds_waku::{APP_MSG_SUBTOPIC, COMMIT_MSG_SUBTOPIC, WELCOME_SUBTOPIC},
|
||||
waku_actor::ProcessMessageToSend,
|
||||
};
|
||||
use mls_crypto::openmls_provider::*;
|
||||
|
||||
use crate::{
|
||||
group_actor::{Group, GroupAction},
|
||||
AppMessage, GroupAnnouncement, MessageToPrint, WelcomeMessage, WelcomeMessageType,
|
||||
};
|
||||
use crate::{identity::Identity, UserError};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum UserAction {
|
||||
SendToWaku(ProcessMessageToSend),
|
||||
SendToGroup(MessageToPrint),
|
||||
RemoveGroup(String),
|
||||
DoNothing,
|
||||
}
|
||||
|
||||
#[derive(Actor)]
|
||||
pub struct User {
|
||||
identity: Identity,
|
||||
groups: HashMap<String, Group>,
|
||||
provider: MlsCryptoProvider,
|
||||
eth_signer: PrivateKeySigner,
|
||||
}
|
||||
|
||||
impl Message<WakuMessage> for User {
|
||||
type Reply = Result<Vec<UserAction>, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: WakuMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let actions = self.process_waku_msg(msg).await?;
|
||||
Ok(actions)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessCreateGroup {
|
||||
pub group_name: String,
|
||||
pub is_creation: bool,
|
||||
}
|
||||
|
||||
impl Message<ProcessCreateGroup> for User {
|
||||
type Reply = Result<(), UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessCreateGroup,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.create_group(msg.group_name.clone(), msg.is_creation)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessAdminMessage {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<ProcessAdminMessage> for User {
|
||||
type Reply = Result<ProcessMessageToSend, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessAdminMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.prepare_admin_msg(msg.group_name.clone()).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessLeaveGroup {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<ProcessLeaveGroup> for User {
|
||||
type Reply = Result<(), UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessLeaveGroup,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.leave_group(msg.group_name.clone()).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessSendMessage {
|
||||
pub msg: String,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<ProcessSendMessage> for User {
|
||||
type Reply = Result<ProcessMessageToSend, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessSendMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.prepare_msg_to_send(&msg.msg, msg.group_name.clone())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessRemoveUser {
|
||||
pub user_to_ban: String,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<ProcessRemoveUser> for User {
|
||||
type Reply = Result<ProcessMessageToSend, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ProcessRemoveUser,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.remove_users_from_group(vec![msg.user_to_ban], msg.group_name.clone())
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl User {
|
||||
/// Create a new user with the given name and a fresh set of credentials.
|
||||
pub fn new(user_eth_priv_key: &str) -> Result<Self, UserError> {
|
||||
let signer = PrivateKeySigner::from_str(user_eth_priv_key)?;
|
||||
let user_address = signer.address();
|
||||
|
||||
let crypto = MlsCryptoProvider::default();
|
||||
let id = Identity::new(CIPHERSUITE, &crypto, user_address.as_slice())?;
|
||||
|
||||
let user = User {
|
||||
groups: HashMap::new(),
|
||||
identity: id,
|
||||
eth_signer: signer,
|
||||
provider: crypto,
|
||||
};
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn create_group(
|
||||
&mut self,
|
||||
group_name: String,
|
||||
is_creation: bool,
|
||||
) -> Result<(), UserError> {
|
||||
if self.if_group_exists(group_name.clone()) {
|
||||
return Err(UserError::GroupAlreadyExistsError(group_name));
|
||||
}
|
||||
let group = if is_creation {
|
||||
Group::new(
|
||||
group_name.clone(),
|
||||
true,
|
||||
Some(&self.provider),
|
||||
Some(&self.identity.signer),
|
||||
Some(&self.identity.credential_with_key),
|
||||
)?
|
||||
} else {
|
||||
Group::new(group_name.clone(), false, None, None, None)?
|
||||
};
|
||||
|
||||
self.groups.insert(group_name.clone(), group);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_group(&self, group_name: String) -> Result<Group, UserError> {
|
||||
match self.groups.get(&group_name) {
|
||||
Some(g) => Ok(g.clone()),
|
||||
None => Err(UserError::GroupNotFoundError(group_name)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn if_group_exists(&self, group_name: String) -> bool {
|
||||
self.groups.contains_key(&group_name)
|
||||
}
|
||||
|
||||
pub async fn handle_welcome_subtopic(
|
||||
&mut self,
|
||||
msg: WakuMessage,
|
||||
group_name: String,
|
||||
) -> Result<Vec<UserAction>, UserError> {
|
||||
let group = match self.groups.get_mut(&group_name) {
|
||||
Some(g) => g,
|
||||
None => return Err(UserError::GroupNotFoundError(group_name)),
|
||||
};
|
||||
let welcome_msg: WelcomeMessage = serde_json::from_slice(msg.payload())?;
|
||||
match welcome_msg.message_type {
|
||||
WelcomeMessageType::GroupAnnouncement => {
|
||||
let app_id = group.app_id();
|
||||
if group.is_admin() || group.is_kp_shared() {
|
||||
Ok(vec![UserAction::DoNothing])
|
||||
} else {
|
||||
info!(
|
||||
"User {:?} received group announcement message for group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
let group_announcement: GroupAnnouncement =
|
||||
serde_json::from_slice(&welcome_msg.message_payload)?;
|
||||
if !group_announcement.verify()? {
|
||||
return Err(UserError::MessageVerificationFailed);
|
||||
}
|
||||
|
||||
let key_package = serde_json::to_vec(
|
||||
&self
|
||||
.identity
|
||||
.generate_key_package(CIPHERSUITE, &self.provider)?,
|
||||
)?;
|
||||
|
||||
let encrypted_key_package = group_announcement.encrypt(key_package)?;
|
||||
let msg: Vec<u8> = serde_json::to_vec(&WelcomeMessage {
|
||||
message_type: WelcomeMessageType::KeyPackageShare,
|
||||
message_payload: encrypted_key_package,
|
||||
})?;
|
||||
|
||||
group.set_kp_shared(true);
|
||||
|
||||
Ok(vec![UserAction::SendToWaku(ProcessMessageToSend {
|
||||
msg,
|
||||
subtopic: WELCOME_SUBTOPIC.to_string(),
|
||||
group_id: group_name.clone(),
|
||||
app_id: app_id.clone(),
|
||||
})])
|
||||
}
|
||||
}
|
||||
WelcomeMessageType::KeyPackageShare => {
|
||||
// We already shared the key package with the group admin and we don't need to do it again
|
||||
if !group.is_admin() {
|
||||
Ok(vec![UserAction::DoNothing])
|
||||
} else {
|
||||
info!(
|
||||
"User {:?} received key package share message for group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
let key_package = group.decrypt_admin_msg(welcome_msg.message_payload)?;
|
||||
let msgs = self.invite_users(vec![key_package], group_name).await?;
|
||||
Ok(msgs
|
||||
.iter()
|
||||
.map(|msg| UserAction::SendToWaku(msg.clone()))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
WelcomeMessageType::WelcomeShare => {
|
||||
if group.is_admin() {
|
||||
Ok(vec![UserAction::DoNothing])
|
||||
} else {
|
||||
info!(
|
||||
"User {:?} received welcome share message for group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
let welc = MlsMessageIn::tls_deserialize_bytes(welcome_msg.message_payload)?;
|
||||
let welcome = match welc.into_welcome() {
|
||||
Some(w) => w,
|
||||
None => return Err(UserError::EmptyWelcomeMessageError),
|
||||
};
|
||||
// find the key package in the welcome message
|
||||
if welcome.secrets().iter().any(|egs| {
|
||||
let hash_ref = egs.new_member().as_slice().to_vec();
|
||||
self.provider
|
||||
.key_store()
|
||||
.read(&hash_ref)
|
||||
.map(|kp: KeyPackage| (kp, hash_ref))
|
||||
.is_some()
|
||||
}) {
|
||||
self.join_group(welcome)?;
|
||||
let msg = self
|
||||
.prepare_msg_to_send("User joined to the group", group_name)
|
||||
.await?;
|
||||
Ok(vec![UserAction::SendToWaku(msg)])
|
||||
} else {
|
||||
Ok(vec![UserAction::DoNothing])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn process_waku_msg(
|
||||
&mut self,
|
||||
msg: WakuMessage,
|
||||
) -> Result<Vec<UserAction>, UserError> {
|
||||
let ct = msg.content_topic();
|
||||
let group_name = ct.application_name.to_string();
|
||||
let group = match self.groups.get(&group_name) {
|
||||
Some(g) => g,
|
||||
None => return Err(UserError::GroupNotFoundError(group_name)),
|
||||
};
|
||||
let app_id = group.app_id();
|
||||
if msg.meta() == app_id {
|
||||
return Ok(vec![UserAction::DoNothing]);
|
||||
}
|
||||
let ct = ct.content_topic_name.to_string();
|
||||
match ct.as_str() {
|
||||
WELCOME_SUBTOPIC => self.handle_welcome_subtopic(msg, group_name).await,
|
||||
COMMIT_MSG_SUBTOPIC => {
|
||||
if group.is_mls_group_initialized() {
|
||||
info!(
|
||||
"User {:?} received commit message for group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
let res = MlsMessageIn::tls_deserialize_bytes(msg.payload())?;
|
||||
let action = match res.extract() {
|
||||
MlsMessageInBody::PrivateMessage(message) => {
|
||||
self.process_protocol_msg(message.into()).await?
|
||||
}
|
||||
MlsMessageInBody::PublicMessage(message) => {
|
||||
self.process_protocol_msg(message.into()).await?
|
||||
}
|
||||
_ => return Err(UserError::UnsupportedMessageType),
|
||||
};
|
||||
Ok(vec![action])
|
||||
} else {
|
||||
Ok(vec![UserAction::DoNothing])
|
||||
}
|
||||
}
|
||||
APP_MSG_SUBTOPIC => {
|
||||
info!(
|
||||
"User {:?} received app message for group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
let buf: AppMessage = serde_json::from_slice(msg.payload())?;
|
||||
if buf.sender == self.identity.identity() {
|
||||
return Ok(vec![UserAction::DoNothing]);
|
||||
}
|
||||
let res = MlsMessageIn::tls_deserialize_bytes(&buf.message)?;
|
||||
let action = match res.extract() {
|
||||
MlsMessageInBody::PrivateMessage(message) => {
|
||||
self.process_protocol_msg(message.into()).await?
|
||||
}
|
||||
MlsMessageInBody::PublicMessage(message) => {
|
||||
self.process_protocol_msg(message.into()).await?
|
||||
}
|
||||
_ => return Err(UserError::UnsupportedMessageType),
|
||||
};
|
||||
Ok(vec![action])
|
||||
}
|
||||
_ => Err(UserError::UnknownContentTopicType(ct)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn invite_users(
|
||||
&mut self,
|
||||
users_kp: Vec<KeyPackage>,
|
||||
group_name: String,
|
||||
) -> Result<Vec<ProcessMessageToSend>, UserError> {
|
||||
// Build a proposal with this key package and do the MLS bits.
|
||||
let group = match self.groups.get_mut(&group_name) {
|
||||
Some(g) => g,
|
||||
None => return Err(UserError::GroupNotFoundError(group_name)),
|
||||
};
|
||||
let out_messages = group
|
||||
.add_members(users_kp, &self.provider, &self.identity.signer)
|
||||
.await?;
|
||||
info!(
|
||||
"User {:?} invited users to group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
Ok(out_messages)
|
||||
}
|
||||
|
||||
fn join_group(&mut self, welcome: Welcome) -> Result<(), UserError> {
|
||||
let group_config = MlsGroupConfig::builder()
|
||||
.use_ratchet_tree_extension(true)
|
||||
.build();
|
||||
|
||||
let mls_group = MlsGroup::new_from_welcome(&self.provider, &group_config, welcome, None)?;
|
||||
|
||||
let group_id = mls_group.group_id().to_vec();
|
||||
let group_name = String::from_utf8(group_id)?;
|
||||
|
||||
if !self.if_group_exists(group_name.clone()) {
|
||||
return Err(UserError::GroupNotFoundError(group_name));
|
||||
}
|
||||
|
||||
self.groups
|
||||
.get_mut(&group_name)
|
||||
.unwrap()
|
||||
.set_mls_group(mls_group)?;
|
||||
|
||||
info!(
|
||||
"User {:?} joined group {:?}",
|
||||
self.identity.identity_string(),
|
||||
group_name
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn process_protocol_msg(
|
||||
&mut self,
|
||||
message: ProtocolMessage,
|
||||
) -> Result<UserAction, UserError> {
|
||||
let group_name = from_utf8(message.group_id().as_slice())?.to_string();
|
||||
let group = match self.groups.get_mut(&group_name) {
|
||||
Some(g) => g,
|
||||
None => return Err(UserError::GroupNotFoundError(group_name)),
|
||||
};
|
||||
if !group.is_mls_group_initialized() {
|
||||
return Ok(UserAction::DoNothing);
|
||||
}
|
||||
let res = group
|
||||
.process_protocol_msg(
|
||||
message,
|
||||
&self.provider,
|
||||
self.identity
|
||||
.credential_with_key
|
||||
.signature_key
|
||||
.as_slice()
|
||||
.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
match res {
|
||||
GroupAction::MessageToPrint(msg) => Ok(UserAction::SendToGroup(msg)),
|
||||
GroupAction::RemoveGroup => Ok(UserAction::RemoveGroup(group_name)),
|
||||
GroupAction::DoNothing => Ok(UserAction::DoNothing),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn prepare_admin_msg(
|
||||
&mut self,
|
||||
group_name: String,
|
||||
) -> Result<ProcessMessageToSend, UserError> {
|
||||
if !self.if_group_exists(group_name.clone()) {
|
||||
return Err(UserError::GroupNotFoundError(group_name));
|
||||
}
|
||||
let msg_to_send = self
|
||||
.groups
|
||||
.get_mut(&group_name)
|
||||
.unwrap()
|
||||
.generate_admin_message()?;
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
|
||||
pub async fn prepare_msg_to_send(
|
||||
&mut self,
|
||||
msg: &str,
|
||||
group_name: String,
|
||||
) -> Result<ProcessMessageToSend, UserError> {
|
||||
let group = match self.groups.get_mut(&group_name) {
|
||||
Some(g) => g,
|
||||
None => return Err(UserError::GroupNotFoundError(group_name)),
|
||||
};
|
||||
|
||||
if !group.is_mls_group_initialized() {
|
||||
Err(UserError::GroupNotFoundError(group_name))
|
||||
} else {
|
||||
let msg_to_send = group
|
||||
.create_message(
|
||||
&self.provider,
|
||||
&self.identity.signer,
|
||||
msg,
|
||||
self.identity.identity().to_vec(),
|
||||
)
|
||||
.await?;
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn remove_users_from_group(
|
||||
&mut self,
|
||||
users: Vec<String>,
|
||||
group_name: String,
|
||||
) -> Result<ProcessMessageToSend, UserError> {
|
||||
if !self.if_group_exists(group_name.clone()) {
|
||||
return Err(UserError::GroupNotFoundError(group_name));
|
||||
}
|
||||
let group = self.groups.get_mut(&group_name).unwrap();
|
||||
let msg = group
|
||||
.remove_members(users, &self.provider, &self.identity.signer)
|
||||
.await?;
|
||||
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
pub async fn leave_group(&mut self, group_name: String) -> Result<(), UserError> {
|
||||
if !self.if_group_exists(group_name.clone()) {
|
||||
return Err(UserError::GroupNotFoundError(group_name));
|
||||
}
|
||||
self.groups.remove(&group_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn wallet(&self) -> EthereumWallet {
|
||||
EthereumWallet::from(self.eth_signer.clone())
|
||||
}
|
||||
}
|
||||
133
src/user/README.md
Normal file
133
src/user/README.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# User Module
|
||||
|
||||
The `user` module encapsulates everything the desktop gateway needs to manage a single MLS participant.
|
||||
It owns the MLS identity, tracks the local view of every group the user joined,
|
||||
drives steward epochs, and bridges consensus messages between the core runtime, Waku network, and UI.
|
||||
|
||||
## Directory Layout
|
||||
|
||||
``` bash
|
||||
src/user/
|
||||
├── consensus.rs # Voting lifecycle, consensus event fan-in/out
|
||||
├── groups.rs # Group creation/join/leave utilities and state queries
|
||||
├── messaging.rs # Application-level messaging helpers (ban requests, signing)
|
||||
├── mod.rs # `User` actor definition and shared types
|
||||
├── proposals.rs # Steward batch proposal processing and pending queues
|
||||
├── README.md # This file
|
||||
├── steward.rs # Steward-only helpers: epochs, proposals, apply flow
|
||||
└── waku.rs # Waku ingestion + routing to per-topic handlers
|
||||
```
|
||||
|
||||
Each file extends `impl User` with domain-specific responsibilities,
|
||||
keeping the top-level actor (`mod.rs`) lean.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **`User` actor** – Holds one `Identity`, an `MlsProvider`,
|
||||
a map of per-group `Arc<RwLock<Group>>`, the consensus facade, and the Ethereum signer.
|
||||
|
||||
- **`UserAction` enum (`mod.rs`)** – Return type for most handlers.
|
||||
Signals what the caller (gateway) should do next:
|
||||
- `SendToWaku(WakuMessageToSend)`
|
||||
- `SendToApp(AppMessage)`
|
||||
- `LeaveGroup(String)` (triggers UI + cleanup)
|
||||
- `DoNothing` (no side effects)
|
||||
|
||||
- **Per-group concurrency** – Keeps a `HashMap<String, Arc<RwLock<Group>>>`.
|
||||
Each group has its own `RwLock`, allowing independent state transitions without blocking unrelated groups.
|
||||
|
||||
- **Pending batches (`proposals.rs`)** – Non-steward clients may receive `BatchProposalsMessage` while
|
||||
still catching up to state `Waiting`.
|
||||
`PendingBatches` stores the payload until the group is ready to apply it.
|
||||
|
||||
## Lifecycle Overview
|
||||
|
||||
1. **Login** – The gateway calls `User::new` with a private key.
|
||||
The actor derives an MLS identity (`mls_crypto::Identity`)
|
||||
and keeps an `alloy::PrivateKeySigner` for consensus signatures.
|
||||
|
||||
2. **Group discovery** – `groups.rs` exposes helpers:
|
||||
- `create_group` – Either launches a steward-owned MLS group or prepares a placeholder for later joining.
|
||||
- `join_group` – Applies a received `Welcome` message to hydrate the MLS state.
|
||||
- `get_group_members` / `get_group_state` / `leave_group` – Used by the UI for metadata and teardown.
|
||||
|
||||
3. **Message routing** – Incoming Waku traffic is delivered to `waku.rs::process_waku_message`, which:
|
||||
- Filters out self-originated packets (matching `msg.meta` to the group `app_id`).
|
||||
- Routes `WELCOME_SUBTOPIC` payloads to `process_welcome_subtopic`.
|
||||
- Routes `APP_MSG_SUBTOPIC` payloads to `process_app_subtopic`.
|
||||
- Converts MLS protocol messages into `GroupAction`s,
|
||||
mapping them back into `UserAction`s so the gateway can forward data to the UI or back onto Waku.
|
||||
|
||||
4. **Consensus bridge** – `consensus.rs` is the glue to `ConsensusService`:
|
||||
- `process_consensus_proposal` and `process_consensus_vote` persist incoming messages and
|
||||
transition group state.
|
||||
- `process_user_vote` produces either consensus votes (steward) or user votes (regular members)
|
||||
and wraps them for Waku transmission.
|
||||
- `handle_consensus_event` reacts to events emitted by the service,
|
||||
ensuring the MLS state machine aligns with voting outcomes.
|
||||
It reuses `handle_consensus_result` to collapse steward/non-steward flows
|
||||
and trigger follow-up actions (apply proposals, queue batch data, or leave the group).
|
||||
|
||||
5. **Steward duties** – `steward.rs` layers steward-only APIs:
|
||||
- Introspection helpers (`is_user_steward_for_group`, `get_current_epoch_proposals`, etc.).
|
||||
- Epoch management (`start_steward_epoch`, `get_proposals_for_steward_voting`),
|
||||
which kicks the MLS state machine into `Waiting` / `Voting`.
|
||||
- Proposal actions (`add_remove_proposal`, `apply_proposals`) that serialize MLS proposals,
|
||||
commits, and optional welcomes into `WakuMessageToSend` objects for broadcast.
|
||||
|
||||
6. **Application-facing messaging** – `messaging.rs` contains:
|
||||
- `build_group_message` – Wraps an `AppMessage` in MLS encryption for the target group.
|
||||
- `process_ban_request` – Normalizes addresses, routes steward vs. member behavior
|
||||
(queueing steward removal proposals or forwarding the request back to the group).
|
||||
- An implementation of `LocalSigner` for `PrivateKeySigner`,
|
||||
allowing consensus code to request signatures uniformly.
|
||||
|
||||
7. **Proposal batches** – `proposals.rs` handles the post-consensus MLS churn:
|
||||
- `process_batch_proposals_message` – Applies proposals, deserializes MLS commits,
|
||||
and emits the resulting `UserAction`.
|
||||
- `process_stored_batch_proposals` – Replays a deferred batch once the group transitions into `Waiting`.
|
||||
|
||||
## Waku Topics & Message Types
|
||||
|
||||
| Subtopic | Handler | Purpose |
|
||||
|---------------------|-------------------------------|---------|
|
||||
| `WELCOME_SUBTOPIC` | `process_welcome_subtopic` | Steward announcements, encrypted key packages, welcome messages |
|
||||
| `APP_MSG_SUBTOPIC` | `process_app_subtopic` | Batch proposals, encrypted MLS traffic, consensus proposals/votes |
|
||||
|
||||
`process_welcome_subtopic` contains the joining handshake logic:
|
||||
|
||||
- **GroupAnnouncement** → Non-stewards encrypt and send their key package back.
|
||||
- **UserKeyPackage** → Stewards decrypt, store invite proposals, and notify the UI.
|
||||
- **InvitationToJoin** → Non-stewards validate, call `join_group`, and broadcast a system chat message.
|
||||
|
||||
## State Machine Touch Points
|
||||
|
||||
Although the primary MLS state machine lives in `crate::state_machine`, the user module coordinates transitions by calling:
|
||||
|
||||
- `start_steward_epoch_with_validation`
|
||||
- `start_voting`, `complete_voting`, `handle_yes_vote`, `handle_no_vote`
|
||||
- `start_waiting`, `start_consensus_reached`, `start_working`
|
||||
|
||||
This ensures both steward and non-steward clients converge on the same `GroupState` after each consensus result or batch commit.
|
||||
|
||||
## Extending the Module
|
||||
|
||||
When adding new functionality:
|
||||
|
||||
1. Decide whether the behavior is steward-specific, consensus-related,
|
||||
or generic per-group logic, then extend the appropriate file.
|
||||
Keeping concerns separated avoids monolithic impl blocks.
|
||||
2. Return a `UserAction` so the caller (gateway) can decide where to forward the outcome.
|
||||
3. Prefer reusing `group_ref(group_name)` to fetch the per-group lock; release the lock (`drop`) before performing long-running work to avoid deadlocks.
|
||||
4. If new Waku payloads are introduced, update `process_waku_message` with a deterministic routing branch and ensure the UI/gateway understands the resulting `AppMessage`.
|
||||
|
||||
## Related Tests
|
||||
|
||||
Integration scenarios that exercise this module live under:
|
||||
|
||||
- `tests/user_test.rs`
|
||||
- `tests/consensus_realtime_test.rs`
|
||||
- `tests/state_machine_test.rs`
|
||||
- `tests/consensus_multi_group_test.rs`
|
||||
|
||||
These are good references when updating state transitions or consensus flows.
|
||||
354
src/user/consensus.rs
Normal file
354
src/user/consensus.rs
Normal file
@@ -0,0 +1,354 @@
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
consensus::ConsensusEvent,
|
||||
error::UserError,
|
||||
protos::{
|
||||
consensus::v1::{Proposal, Vote, VotePayload},
|
||||
de_mls::messages::v1::AppMessage,
|
||||
},
|
||||
state_machine::GroupState,
|
||||
user::{User, UserAction},
|
||||
};
|
||||
use ds::transport::OutboundPacket;
|
||||
|
||||
impl User {
|
||||
pub async fn set_up_consensus_threshold_for_group(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
proposal_id: u32,
|
||||
consensus_threshold: f64,
|
||||
) -> Result<(), UserError> {
|
||||
self.consensus_service
|
||||
.set_consensus_threshold_for_group_session(group_name, proposal_id, consensus_threshold)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
/// Handle consensus result after it's determined.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group the consensus is for
|
||||
/// - `vote_result`: Whether the consensus passed (true) or failed (false)
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of Waku messages to send (if any)
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// **Steward:**
|
||||
/// - **Vote YES**: Voting → ConsensusReached → Waiting → Working (creates and sends batch proposals, then applies them)
|
||||
/// - **Vote NO**: Voting → Working (discards proposals)
|
||||
///
|
||||
/// **Non-Steward:**
|
||||
/// - **Vote YES**: Voting → ConsensusReached → Waiting → Working (waits for consensus + batch proposals, then applies them)
|
||||
/// - **Vote NO**: Voting → Working (no proposals to apply)
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Completes voting in the group
|
||||
/// - Handles proposal application or cleanup based on result
|
||||
/// - Manages state transitions for both steward and non-steward users
|
||||
/// - Processes pending batch proposals if available
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various state machine and proposal processing errors
|
||||
async fn handle_consensus_result(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
vote_result: bool,
|
||||
) -> Result<Vec<OutboundPacket>, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
group.write().await.complete_voting(vote_result).await?;
|
||||
|
||||
// Handle vote result based on steward status
|
||||
if group.read().await.is_steward().await {
|
||||
if vote_result {
|
||||
// Vote YES: Apply proposals and send commit messages
|
||||
info!("[handle_consensus_result]: Vote YES, sending commit message");
|
||||
|
||||
// Apply proposals and complete (state must be ConsensusReached for this)
|
||||
let messages = self.apply_proposals(group_name).await?;
|
||||
group.write().await.handle_yes_vote().await?;
|
||||
Ok(messages)
|
||||
} else {
|
||||
// Vote NO: Empty proposal queue without applying, no commit messages
|
||||
info!(
|
||||
"[handle_consensus_result]: Vote NO, emptying proposal queue without applying"
|
||||
);
|
||||
|
||||
// Empty proposals without state requirement (direct steward call)
|
||||
group.write().await.handle_no_vote().await?;
|
||||
|
||||
Ok(vec![])
|
||||
}
|
||||
} else if vote_result {
|
||||
// Vote YES: Group already moved to ConsensusReached during complete_voting; transition to Waiting
|
||||
{
|
||||
let mut group_guard = group.write().await;
|
||||
group_guard.start_waiting_after_consensus().await?;
|
||||
}
|
||||
info!("[handle_consensus_result]: Non-steward user transitioning to Waiting state to await batch proposals");
|
||||
|
||||
// Check if there are pending batch proposals that can now be processed
|
||||
if self.pending_batch_proposals.contains(group_name).await {
|
||||
info!("[handle_consensus_result]: Non-steward user has pending batch proposals, processing them now");
|
||||
let action = self.process_stored_batch_proposals(group_name).await?;
|
||||
info!("[handle_consensus_result]: Successfully processed pending batch proposals");
|
||||
if let Some(action) = action {
|
||||
match action {
|
||||
UserAction::Outbound(outbound_packet) => {
|
||||
info!("[handle_consensus_result]: Sending waku message to backend");
|
||||
Ok(vec![outbound_packet])
|
||||
}
|
||||
UserAction::LeaveGroup(group_name) => {
|
||||
self.leave_group(group_name.as_str()).await?;
|
||||
info!("[handle_consensus_result]: Non-steward user left group {group_name}");
|
||||
Ok(vec![])
|
||||
}
|
||||
UserAction::DoNothing => {
|
||||
info!("[handle_consensus_result]: No action to process");
|
||||
Ok(vec![])
|
||||
}
|
||||
_ => {
|
||||
error!("[handle_consensus_result]: Invalid action to process");
|
||||
Err(UserError::InvalidUserAction(action.to_string()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("[handle_consensus_result]: No action to process");
|
||||
Ok(vec![])
|
||||
}
|
||||
} else {
|
||||
info!("[handle_consensus_result]: No pending batch proposals to process");
|
||||
Ok(vec![])
|
||||
}
|
||||
} else {
|
||||
// Vote NO: Transition to Working state
|
||||
group.write().await.start_working().await;
|
||||
info!("[handle_consensus_result]: Non-steward user transitioning to Working state after failed vote");
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle incoming consensus events and return commit messages if needed.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group the consensus event is for
|
||||
/// - `event`: The consensus event to handle
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of Waku messages to send (if any)
|
||||
///
|
||||
/// ## Event Types Handled:
|
||||
/// - **ConsensusReached**: Handles successful consensus with result
|
||||
/// - **ConsensusFailed**: Handles consensus failure with liveness criteria
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Routes consensus events to appropriate handlers
|
||||
/// - Manages state transitions based on consensus results
|
||||
/// - Applies liveness criteria for failed consensus
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::InvalidGroupState` if group is in invalid state
|
||||
/// - Various consensus handling errors
|
||||
pub async fn handle_consensus_event(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
event: ConsensusEvent,
|
||||
) -> Result<Vec<OutboundPacket>, UserError> {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id,
|
||||
result,
|
||||
} => {
|
||||
info!(
|
||||
"[handle_consensus_event]: Consensus reached for proposal {proposal_id} in group {group_name}: {result}"
|
||||
);
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
let current_state = group.read().await.get_state().await;
|
||||
info!(
|
||||
"[handle_consensus_event]: Current state: {:?} for proposal {proposal_id}",
|
||||
current_state
|
||||
);
|
||||
|
||||
// Handle the consensus result and return commit messages
|
||||
let messages = self.handle_consensus_result(group_name, result).await?;
|
||||
Ok(messages)
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id,
|
||||
reason,
|
||||
} => {
|
||||
info!(
|
||||
"[handle_consensus_event]: Consensus failed for proposal {proposal_id} in group {group_name}: {reason}"
|
||||
);
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
let current_state = group.read().await.get_state().await;
|
||||
|
||||
info!("[handle_consensus_event]: Handling consensus failure in {:?} state for proposal {proposal_id}", current_state);
|
||||
|
||||
// Handle consensus failure based on current state
|
||||
match current_state {
|
||||
GroupState::Voting => {
|
||||
// If we're in Voting state, complete voting with liveness criteria
|
||||
// Get liveness criteria from the actual proposal
|
||||
let liveness_result = self
|
||||
.consensus_service
|
||||
.get_proposal_liveness_criteria(group_name, proposal_id)
|
||||
.await
|
||||
.unwrap_or(false); // Default to false if proposal not found
|
||||
|
||||
info!("[handle_consensus_result]:Applying liveness criteria for failed proposal {proposal_id}: {liveness_result}");
|
||||
let messages = self
|
||||
.handle_consensus_result(group_name, liveness_result)
|
||||
.await?;
|
||||
Ok(messages)
|
||||
}
|
||||
_ => Err(UserError::InvalidGroupState(current_state.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process incoming consensus proposal.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `proposal`: The consensus proposal to process
|
||||
/// - `group_name`: The name of the group the proposal is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Stores proposal in consensus service
|
||||
/// - Starts voting phase in the group
|
||||
/// - Creates voting proposal for frontend
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// - Any state → Voting (starts voting phase)
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various consensus service errors
|
||||
pub async fn process_consensus_proposal(
|
||||
&mut self,
|
||||
proposal: Proposal,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
self.consensus_service
|
||||
.process_incoming_proposal(group_name, proposal.clone())
|
||||
.await?;
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
group.write().await.start_voting().await?;
|
||||
info!(
|
||||
"[process_consensus_proposal]: Starting voting for proposal {}",
|
||||
proposal.proposal_id
|
||||
);
|
||||
|
||||
// Send voting proposal to frontend
|
||||
let voting_proposal: AppMessage = VotePayload {
|
||||
group_id: group_name.to_string(),
|
||||
proposal_id: proposal.proposal_id,
|
||||
group_requests: proposal.group_requests.clone(),
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)?
|
||||
.as_secs(),
|
||||
}
|
||||
.into();
|
||||
|
||||
Ok(UserAction::SendToApp(voting_proposal))
|
||||
}
|
||||
|
||||
/// Process user vote from frontend.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `proposal_id`: The ID of the proposal to vote on
|
||||
/// - `user_vote`: The user's vote (true for yes, false for no)
|
||||
/// - `group_name`: The name of the group the vote is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - For stewards: Creates consensus vote and sends to group
|
||||
/// - For regular users: Processes user vote in consensus service
|
||||
/// - Builds and sends appropriate message to group
|
||||
///
|
||||
/// ## Message Types:
|
||||
/// - **Steward**: Sends consensus vote message
|
||||
/// - **Regular User**: Sends user vote message
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various consensus service and message building errors
|
||||
pub async fn process_user_vote(
|
||||
&mut self,
|
||||
proposal_id: u32,
|
||||
user_vote: bool,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let app_message = if group.read().await.is_steward().await {
|
||||
info!(
|
||||
"[process_user_vote]: Steward voting for proposal {proposal_id} in group {group_name}"
|
||||
);
|
||||
let proposal = self
|
||||
.consensus_service
|
||||
.vote_on_proposal(group_name, proposal_id, user_vote, self.eth_signer.clone())
|
||||
.await?;
|
||||
proposal.into()
|
||||
} else {
|
||||
info!(
|
||||
"[process_user_vote]: User voting for proposal {proposal_id} in group {group_name}"
|
||||
);
|
||||
let vote = self
|
||||
.consensus_service
|
||||
.process_user_vote(group_name, proposal_id, user_vote, self.eth_signer.clone())
|
||||
.await?;
|
||||
vote.into()
|
||||
};
|
||||
|
||||
self.build_group_message(app_message, group_name)
|
||||
.await
|
||||
.map(UserAction::Outbound)
|
||||
}
|
||||
|
||||
/// Process incoming consensus vote and handle immediate state transitions.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `vote`: The consensus vote to process
|
||||
/// - `group_name`: The name of the group the vote is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Stores vote in consensus service
|
||||
/// - Handles immediate state transitions if consensus is reached
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// When consensus is reached immediately after processing a vote:
|
||||
/// - **Vote YES**: Non-steward transitions to Waiting state to await batch proposals
|
||||
/// - **Vote NO**: Non-steward transitions to Working state immediately
|
||||
/// - **Steward**: Relies on event-driven system for full proposal management
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various consensus service errors
|
||||
pub(crate) async fn process_consensus_vote(
|
||||
&mut self,
|
||||
vote: Vote,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
self.consensus_service
|
||||
.process_incoming_vote(group_name, vote.clone())
|
||||
.await?;
|
||||
|
||||
Ok(UserAction::DoNothing)
|
||||
}
|
||||
}
|
||||
184
src/user/groups.rs
Normal file
184
src/user/groups.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use openmls::{
|
||||
group::MlsGroupJoinConfig,
|
||||
prelude::{StagedWelcome, Welcome},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{error::UserError, group::Group, state_machine::GroupState, user::User};
|
||||
use mls_crypto::identity::normalize_wallet_address;
|
||||
|
||||
impl User {
|
||||
/// Fetch the shared reference to a group by name.
|
||||
pub(crate) async fn group_ref(&self, name: &str) -> Result<Arc<RwLock<Group>>, UserError> {
|
||||
self.groups
|
||||
.read()
|
||||
.await
|
||||
.get(name)
|
||||
.cloned()
|
||||
.ok_or(UserError::GroupNotFoundError)
|
||||
}
|
||||
|
||||
/// Create a new group for this user.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to create
|
||||
/// - `is_creation`: Whether this is a group creation (true) or joining (false)
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - If `is_creation` is true: Creates MLS group with steward capabilities
|
||||
/// - If `is_creation` is false: Creates empty group for later joining
|
||||
/// - Adds group to user's groups map
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupAlreadyExistsError` if group already exists
|
||||
/// - Various MLS group creation errors
|
||||
pub async fn create_group(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
is_creation: bool,
|
||||
) -> Result<(), UserError> {
|
||||
let mut groups = self.groups.write().await;
|
||||
if groups.contains_key(group_name) {
|
||||
return Err(UserError::GroupAlreadyExistsError);
|
||||
}
|
||||
let group = if is_creation {
|
||||
Group::new(
|
||||
group_name,
|
||||
true,
|
||||
Some(&self.provider),
|
||||
Some(self.identity.signer()),
|
||||
Some(&self.identity.credential_with_key()),
|
||||
)?
|
||||
} else {
|
||||
Group::new(group_name, false, None, None, None)?
|
||||
};
|
||||
|
||||
groups.insert(group_name.to_string(), Arc::new(RwLock::new(group)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Join a group after receiving a welcome message.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `welcome`: The MLS welcome message containing group information
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Creates new MLS group from welcome message
|
||||
/// - Sets the MLS group in the user's group instance
|
||||
/// - Updates group state to reflect successful joining
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Group must already exist in user's groups map
|
||||
/// - Welcome message must be valid and contain proper group data
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various MLS group creation errors
|
||||
pub(crate) async fn join_group(&mut self, welcome: Welcome) -> Result<(), UserError> {
|
||||
let group_config = MlsGroupJoinConfig::builder().build();
|
||||
let mls_group =
|
||||
StagedWelcome::new_from_welcome(&self.provider, &group_config, welcome, None)?
|
||||
.into_group(&self.provider)?;
|
||||
|
||||
let group_id = mls_group.group_id().to_vec();
|
||||
let group_name = String::from_utf8(group_id)?;
|
||||
|
||||
let group = self.group_ref(&group_name).await?;
|
||||
group.write().await.set_mls_group(mls_group)?;
|
||||
|
||||
info!("[join_group]: User joined group {group_name}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the state of a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to get the state of
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `GroupState` of the group
|
||||
pub async fn get_group_state(&self, group_name: &str) -> Result<GroupState, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let state = group.read().await.get_state().await;
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Get the number of members in a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to get the number of members of
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - The number of members in the group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
pub async fn get_group_number_of_members(&self, group_name: &str) -> Result<usize, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let members = group.read().await.members_identity().await?;
|
||||
Ok(members.len())
|
||||
}
|
||||
|
||||
/// Retrieve the list of member identities for a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: Target group
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of normalized wallet addresses (e.g., `0xabc...`)
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group is missing
|
||||
pub async fn get_group_members(&self, group_name: &str) -> Result<Vec<String>, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
if !group.read().await.is_mls_group_initialized() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let members = group.read().await.members_identity().await?;
|
||||
Ok(members
|
||||
.into_iter()
|
||||
.map(|raw| normalize_wallet_address(raw.as_slice()).to_string())
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Get the MLS epoch of a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to get the MLS epoch of
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - The MLS epoch of the group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
pub async fn get_group_mls_epoch(&self, group_name: &str) -> Result<u64, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let epoch = group.read().await.epoch().await?;
|
||||
Ok(epoch.as_u64())
|
||||
}
|
||||
|
||||
/// Leave a group and clean up associated resources.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to leave
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Removes group from user's groups map
|
||||
/// - Cleans up all group-related resources
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Group must exist in user's groups map
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
pub async fn leave_group(&mut self, group_name: &str) -> Result<(), UserError> {
|
||||
info!("[leave_group]: Leaving group {group_name}");
|
||||
let group = self.group_ref(group_name).await?;
|
||||
self.groups.write().await.remove(group_name);
|
||||
drop(group);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
139
src/user/messaging.rs
Normal file
139
src/user/messaging.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use alloy::{
|
||||
primitives::Address,
|
||||
signers::{local::PrivateKeySigner, Signer},
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
error::UserError,
|
||||
protos::de_mls::messages::v1::{AppMessage, BanRequest, ProposalAdded},
|
||||
user::{User, UserAction},
|
||||
LocalSigner,
|
||||
};
|
||||
use ds::transport::OutboundPacket;
|
||||
use mls_crypto::normalize_wallet_address_str;
|
||||
|
||||
impl User {
|
||||
/// Prepare data to build a waku message for a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `app_message`: The application message to send to the group
|
||||
/// - `group_name`: The name of the group to send the message to
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Waku message ready for transmission
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Group must be initialized with MLS group
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Preserves original AppMessage structure without wrapping
|
||||
/// - Builds MLS message through the group
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Used for consensus-related messages like proposals and votes
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::MlsGroupNotInitialized` if MLS group not initialized
|
||||
/// - Various MLS message building errors
|
||||
pub async fn build_group_message(
|
||||
&mut self,
|
||||
app_message: AppMessage,
|
||||
group_name: &str,
|
||||
) -> Result<OutboundPacket, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
if !group.read().await.is_mls_group_initialized() {
|
||||
return Err(UserError::MlsGroupNotInitialized);
|
||||
}
|
||||
|
||||
let msg_to_send = group
|
||||
.write()
|
||||
.await
|
||||
.build_message(&self.provider, self.identity.signer(), &app_message)
|
||||
.await?;
|
||||
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
|
||||
/// Process incoming ban request.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `ban_request`: The ban request to process
|
||||
/// - `group_name`: The name of the group the ban request is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Waku message to send to the group
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - **For stewards**: Adds remove proposal to steward queue and sends system message
|
||||
/// - **For regular users**: Forwards ban request to the group
|
||||
///
|
||||
/// ## Message Types:
|
||||
/// - **Steward**: Sends system message about proposal addition
|
||||
/// - **Regular User**: Sends ban request message to group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various message building errors
|
||||
pub async fn process_ban_request(
|
||||
&mut self,
|
||||
ban_request: BanRequest,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
let normalized_user_to_ban = normalize_wallet_address_str(&ban_request.user_to_ban)?;
|
||||
info!("[process_ban_request]: Processing ban request for user {normalized_user_to_ban} in group {group_name}");
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let is_steward = group.read().await.is_steward().await;
|
||||
if is_steward {
|
||||
// Steward: add the remove proposal to the queue
|
||||
info!(
|
||||
"[process_ban_request]: Steward adding remove proposal for user {normalized_user_to_ban}"
|
||||
);
|
||||
let request = self
|
||||
.add_remove_proposal(group_name, normalized_user_to_ban.clone())
|
||||
.await?;
|
||||
|
||||
// Send notification to UI about the new proposal
|
||||
let proposal_added_msg: AppMessage = ProposalAdded {
|
||||
group_id: group_name.to_string(),
|
||||
request: request.into(),
|
||||
}
|
||||
.into();
|
||||
|
||||
Ok(UserAction::SendToApp(proposal_added_msg))
|
||||
} else {
|
||||
// Regular user: send the ban request to the group
|
||||
let updated_ban_request = BanRequest {
|
||||
user_to_ban: normalized_user_to_ban,
|
||||
requester: self.identity_string(),
|
||||
group_name: ban_request.group_name,
|
||||
};
|
||||
let msg = self
|
||||
.build_group_message(updated_ban_request.into(), group_name)
|
||||
.await?;
|
||||
Ok(UserAction::Outbound(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalSigner for PrivateKeySigner {
|
||||
async fn local_sign_message(&self, message: &[u8]) -> Result<Vec<u8>, anyhow::Error> {
|
||||
let signature = self.sign_message(message).await?;
|
||||
let signature_bytes = signature.as_bytes().to_vec();
|
||||
Ok(signature_bytes)
|
||||
}
|
||||
|
||||
fn address(&self) -> Address {
|
||||
self.address()
|
||||
}
|
||||
|
||||
fn address_string(&self) -> String {
|
||||
self.address().to_string()
|
||||
}
|
||||
|
||||
fn address_bytes(&self) -> Vec<u8> {
|
||||
self.address().as_slice().to_vec()
|
||||
}
|
||||
}
|
||||
122
src/user/mod.rs
Normal file
122
src/user/mod.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
pub mod consensus;
|
||||
pub mod groups;
|
||||
pub mod messaging;
|
||||
pub mod proposals;
|
||||
pub mod steward;
|
||||
pub mod waku;
|
||||
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use kameo::Actor;
|
||||
use std::{collections::HashMap, fmt::Display, str::FromStr, sync::Arc};
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
|
||||
use ds::transport::OutboundPacket;
|
||||
use mls_crypto::{
|
||||
identity::Identity,
|
||||
openmls_provider::{MlsProvider, CIPHERSUITE},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
consensus::{ConsensusEvent, ConsensusService},
|
||||
error::UserError,
|
||||
group::Group,
|
||||
protos::de_mls::messages::v1::AppMessage,
|
||||
user::proposals::PendingBatches,
|
||||
};
|
||||
|
||||
/// Represents the action to take after processing a user message or event.
|
||||
///
|
||||
/// This enum defines the possible outcomes when processing user-related operations,
|
||||
/// allowing the caller to determine the appropriate next steps for message handling,
|
||||
/// group management, and network communication.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum UserAction {
|
||||
Outbound(OutboundPacket),
|
||||
SendToApp(AppMessage),
|
||||
LeaveGroup(String),
|
||||
DoNothing,
|
||||
}
|
||||
|
||||
impl Display for UserAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
UserAction::Outbound(_) => write!(f, "Outbound"),
|
||||
UserAction::SendToApp(_) => write!(f, "SendToApp"),
|
||||
UserAction::LeaveGroup(group_name) => write!(f, "LeaveGroup({group_name})"),
|
||||
UserAction::DoNothing => write!(f, "DoNothing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a user in the MLS-based messaging system.
|
||||
///
|
||||
/// The User struct manages the lifecycle of multiple groups, handles consensus operations,
|
||||
/// and coordinates communication between the application layer and the Waku network.
|
||||
/// It integrates with the consensus service for proposal management and voting.
|
||||
///
|
||||
/// ## Key Features:
|
||||
/// - Multi-group management and coordination
|
||||
/// - Consensus service integration for proposal handling
|
||||
/// - Waku message processing and routing
|
||||
/// - Steward epoch coordination
|
||||
/// - Member management through proposals
|
||||
#[derive(Actor)]
|
||||
pub struct User {
|
||||
identity: Identity,
|
||||
// Each group has its own lock for better concurrency
|
||||
groups: Arc<RwLock<HashMap<String, Arc<RwLock<Group>>>>>,
|
||||
provider: MlsProvider,
|
||||
consensus_service: ConsensusService,
|
||||
eth_signer: PrivateKeySigner,
|
||||
// Queue for batch proposals that arrive before consensus is reached
|
||||
pending_batch_proposals: PendingBatches,
|
||||
}
|
||||
|
||||
impl User {
|
||||
/// Create a new user instance with the specified Ethereum private key.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `user_eth_priv_key`: The user's Ethereum private key as a hex string
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - New User instance with initialized identity and services
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError` if private key parsing or identity creation fails
|
||||
pub fn new(
|
||||
user_eth_priv_key: &str,
|
||||
consensus_service: &ConsensusService,
|
||||
) -> Result<Self, UserError> {
|
||||
let signer = PrivateKeySigner::from_str(user_eth_priv_key)?;
|
||||
let user_address = signer.address();
|
||||
|
||||
let crypto = MlsProvider::default();
|
||||
let id = Identity::new(CIPHERSUITE, &crypto, user_address.as_slice())?;
|
||||
|
||||
let user = User {
|
||||
groups: Arc::new(RwLock::new(HashMap::new())),
|
||||
identity: id,
|
||||
eth_signer: signer,
|
||||
provider: crypto,
|
||||
consensus_service: consensus_service.clone(),
|
||||
pending_batch_proposals: PendingBatches::default(),
|
||||
};
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
/// Get a subscription to consensus events
|
||||
pub fn subscribe_to_consensus_events(&self) -> broadcast::Receiver<(String, ConsensusEvent)> {
|
||||
self.consensus_service.subscribe_to_events()
|
||||
}
|
||||
|
||||
/// Get the identity string for debugging and identification purposes.
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - String representation of the user's identity
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Primarily used for debugging, logging, and user identification
|
||||
pub fn identity_string(&self) -> String {
|
||||
self.identity.identity_string()
|
||||
}
|
||||
}
|
||||
146
src/user/proposals.rs
Normal file
146
src/user/proposals.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use openmls::prelude::{DeserializeBytes, MlsMessageIn};
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
error::UserError,
|
||||
group::GroupAction,
|
||||
protos::de_mls::messages::v1::BatchProposalsMessage,
|
||||
state_machine::GroupState,
|
||||
user::{User, UserAction},
|
||||
};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct PendingBatches {
|
||||
inner: Arc<RwLock<HashMap<String, BatchProposalsMessage>>>,
|
||||
}
|
||||
|
||||
impl PendingBatches {
|
||||
pub(crate) async fn store(&self, group: &str, batch: BatchProposalsMessage) {
|
||||
self.inner.write().await.insert(group.to_string(), batch);
|
||||
}
|
||||
|
||||
pub(crate) async fn take(&self, group: &str) -> Option<BatchProposalsMessage> {
|
||||
self.inner.write().await.remove(group)
|
||||
}
|
||||
|
||||
pub(crate) async fn contains(&self, group: &str) -> bool {
|
||||
self.inner.read().await.contains_key(group)
|
||||
}
|
||||
}
|
||||
|
||||
impl User {
|
||||
/// Process batch proposals message from the steward.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `batch_msg`: The batch proposals message to process
|
||||
/// - `group_name`: The name of the group these proposals are for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Processes all MLS proposals in the batch
|
||||
/// - Applies the commit message to complete the group update
|
||||
/// - Transitions group to Working state after successful processing
|
||||
///
|
||||
/// ## State Requirements:
|
||||
/// - Group must be in Waiting state to process batch proposals
|
||||
/// - If not in correct state, stores proposals for later processing
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various MLS processing errors
|
||||
pub(crate) async fn process_batch_proposals_message(
|
||||
&mut self,
|
||||
batch_msg: BatchProposalsMessage,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
// Get the group lock
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let initial_state = group.read().await.get_state().await;
|
||||
if initial_state != GroupState::Waiting {
|
||||
info!(
|
||||
"[process_batch_proposals_message]: Cannot process batch proposals in {initial_state} state, storing for later processing"
|
||||
);
|
||||
// Store the batch proposals for later processing
|
||||
self.pending_batch_proposals
|
||||
.store(group_name, batch_msg)
|
||||
.await;
|
||||
return Ok(UserAction::DoNothing);
|
||||
}
|
||||
|
||||
// Process all proposals before the commit
|
||||
for proposal_bytes in batch_msg.mls_proposals {
|
||||
let (mls_message_in, _) = MlsMessageIn::tls_deserialize_bytes(&proposal_bytes)?;
|
||||
let protocol_message = mls_message_in.try_into_protocol_message()?;
|
||||
|
||||
let _res = group
|
||||
.write()
|
||||
.await
|
||||
.process_protocol_msg(protocol_message, &self.provider)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Then process the commit message
|
||||
let (mls_message_in, _) = MlsMessageIn::tls_deserialize_bytes(&batch_msg.commit_message)?;
|
||||
let protocol_message = mls_message_in.try_into_protocol_message()?;
|
||||
|
||||
let res = group
|
||||
.write()
|
||||
.await
|
||||
.process_protocol_msg(protocol_message, &self.provider)
|
||||
.await?;
|
||||
|
||||
group.write().await.start_working().await;
|
||||
|
||||
match res {
|
||||
GroupAction::GroupAppMsg(msg) => Ok(UserAction::SendToApp(msg)),
|
||||
GroupAction::LeaveGroup => Ok(UserAction::LeaveGroup(group_name.to_string())),
|
||||
GroupAction::DoNothing => Ok(UserAction::DoNothing),
|
||||
GroupAction::GroupProposal(proposal) => {
|
||||
self.process_consensus_proposal(proposal, group_name).await
|
||||
}
|
||||
GroupAction::GroupVote(vote) => self.process_consensus_vote(vote, group_name).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to process a batch proposals message that was deferred earlier.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group whose stored batch should be retried
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `Some(UserAction)` if a stored batch was processed, `None` otherwise
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Checks for a cached `BatchProposalsMessage` and processes it immediately if present
|
||||
/// - Removes the stored batch once processing succeeds
|
||||
///
|
||||
/// ## Usage:
|
||||
/// Call after transitioning into `Waiting` so any deferred steward batch can be replayed.
|
||||
pub(crate) async fn process_stored_batch_proposals(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
) -> Result<Option<UserAction>, UserError> {
|
||||
if self.pending_batch_proposals.contains(group_name).await {
|
||||
if let Some(batch_msg) = self.pending_batch_proposals.take(group_name).await {
|
||||
info!(
|
||||
"[process_stored_batch_proposals]: Processing stored batch proposals for group {}",
|
||||
group_name
|
||||
);
|
||||
let action = self
|
||||
.process_batch_proposals_message(batch_msg, group_name)
|
||||
.await?;
|
||||
Ok(Some(action))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
279
src/user/steward.rs
Normal file
279
src/user/steward.rs
Normal file
@@ -0,0 +1,279 @@
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
error::UserError,
|
||||
protos::{
|
||||
consensus::v1::{UpdateRequest, VotePayload},
|
||||
de_mls::messages::v1::AppMessage,
|
||||
},
|
||||
user::{User, UserAction},
|
||||
};
|
||||
use ds::transport::OutboundPacket;
|
||||
|
||||
impl User {
|
||||
/// Check if the user is a steward for the given group.
|
||||
/// ## Returns:
|
||||
/// - Serialized `UiUpdateRequest` representing the removal request for UI consumption
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to check
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if the user is a steward for the group, `false` otherwise
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if the group does not exist
|
||||
pub async fn is_user_steward_for_group(&self, group_name: &str) -> Result<bool, UserError> {
|
||||
let group = {
|
||||
let groups = self.groups.read().await;
|
||||
groups
|
||||
.get(group_name)
|
||||
.cloned()
|
||||
.ok_or_else(|| UserError::GroupNotFoundError)?
|
||||
};
|
||||
let is_steward = group.read().await.is_steward().await;
|
||||
Ok(is_steward)
|
||||
}
|
||||
|
||||
/// Check if the MLS group is initialized for the given group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to check
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `true` if the MLS group is initialized for the group, `false` otherwise
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if the group does not exist
|
||||
pub async fn is_user_mls_group_initialized_for_group(
|
||||
&self,
|
||||
group_name: &str,
|
||||
) -> Result<bool, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let is_initialized = group.read().await.is_mls_group_initialized();
|
||||
Ok(is_initialized)
|
||||
}
|
||||
|
||||
/// Get the current epoch proposals for the given group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to get the proposals for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - A vector of `GroupUpdateRequest` representing the current epoch proposals
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if the group does not exist
|
||||
pub async fn get_current_epoch_proposals(
|
||||
&self,
|
||||
group_name: &str,
|
||||
) -> Result<Vec<crate::steward::GroupUpdateRequest>, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let proposals = group.read().await.get_current_epoch_proposals().await;
|
||||
Ok(proposals)
|
||||
}
|
||||
|
||||
/// Prepare a steward announcement message for a group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to prepare the message for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Waku message containing the steward announcement
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `GroupError::StewardNotSet` if no steward is configured
|
||||
pub async fn prepare_steward_msg(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
) -> Result<OutboundPacket, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let msg_to_send = group.write().await.generate_steward_message().await?;
|
||||
Ok(msg_to_send)
|
||||
}
|
||||
|
||||
/// Start a new steward epoch for the given group. It includes validation of the current state
|
||||
/// and the number of proposals. If there are no proposals, it will stay in the Working state and return 0.
|
||||
/// If there are proposals, it will change the state to Waiting and return the number of proposals.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to start steward epoch for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Number of proposals that will be voted on (0 if no proposals)
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// - **With proposals**: Working → Waiting (returns proposal count)
|
||||
/// - **No proposals**: Working → Working (stays in Working, returns 0)
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `GroupError::InvalidStateTransition` if the group is not in the Working state
|
||||
/// - `GroupError::StewardNotSet` if no steward is configured
|
||||
pub async fn start_steward_epoch(&mut self, group_name: &str) -> Result<usize, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let proposal_count = group
|
||||
.write()
|
||||
.await
|
||||
.start_steward_epoch_with_validation()
|
||||
.await?;
|
||||
|
||||
if proposal_count == 0 {
|
||||
info!("[start_steward_epoch]: No proposals to vote on, skipping steward epoch");
|
||||
} else {
|
||||
info!("[start_steward_epoch]: Started steward epoch with {proposal_count} proposals");
|
||||
}
|
||||
|
||||
Ok(proposal_count)
|
||||
}
|
||||
/// Start voting for the given group, returning the proposal ID.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to start voting for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Tuple of (proposal_id, UserAction) for steward actions
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Starts voting phase in the group
|
||||
/// - Creates consensus proposal for voting
|
||||
/// - Sends voting proposal to frontend
|
||||
///
|
||||
/// ## State Transitions:
|
||||
/// - **Waiting → Voting**: If proposals found and steward starts voting
|
||||
/// - **Waiting → Working**: If no proposals found (edge case fix)
|
||||
///
|
||||
/// ## Edge Case Handling:
|
||||
/// If no proposals are found during voting phase (rare edge case where proposals
|
||||
/// disappear between epoch start and voting), transitions back to Working state
|
||||
/// to prevent getting stuck in Waiting state.
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::NoProposalsFound` if no proposals exist
|
||||
/// - `ConsensusError::SystemTimeError` if the system time creation fails
|
||||
pub async fn get_proposals_for_steward_voting(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
) -> Result<(u32, UserAction), UserError> {
|
||||
info!(
|
||||
"[get_proposals_for_steward_voting]: Getting proposals for steward voting in group {group_name}"
|
||||
);
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
// If this is the steward, create proposal with vote and send to group
|
||||
if group.read().await.is_steward().await {
|
||||
let proposals = group
|
||||
.read()
|
||||
.await
|
||||
.get_proposals_for_voting_epoch_as_ui_update_requests()
|
||||
.await;
|
||||
if !proposals.is_empty() {
|
||||
group.write().await.start_voting().await?;
|
||||
|
||||
// Get group members for expected voters count
|
||||
let members = group.read().await.members_identity().await?;
|
||||
let participant_ids: Vec<Vec<u8>> = members.into_iter().collect();
|
||||
let expected_voters_count = participant_ids.len() as u32;
|
||||
|
||||
// Create consensus proposal
|
||||
let proposal = self
|
||||
.consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
proposals.clone(),
|
||||
self.identity.identity_string().into(),
|
||||
expected_voters_count,
|
||||
3600, // 1 hour expiration
|
||||
true, // liveness criteria
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
"[get_proposals_for_steward_voting]: Created consensus proposal with ID {} and {} expected voters",
|
||||
proposal.proposal_id, expected_voters_count
|
||||
);
|
||||
|
||||
// Send voting proposal to frontend
|
||||
let voting_proposal: AppMessage = VotePayload {
|
||||
group_id: group_name.to_string(),
|
||||
proposal_id: proposal.proposal_id,
|
||||
group_requests: proposal.group_requests.clone(),
|
||||
timestamp: proposal.timestamp,
|
||||
}
|
||||
.into();
|
||||
|
||||
Ok((proposal.proposal_id, UserAction::SendToApp(voting_proposal)))
|
||||
} else {
|
||||
error!("[get_proposals_for_steward_voting]: No proposals found");
|
||||
Err(UserError::NoProposalsFound)
|
||||
}
|
||||
} else {
|
||||
// Not steward, do nothing
|
||||
info!("[get_proposals_for_steward_voting]: Not steward, doing nothing");
|
||||
Ok((0, UserAction::DoNothing))
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a remove proposal into the `steward::current_epoch_proposals` vector for the given group.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to add the proposal to
|
||||
/// - `identity`: The identity string of the member to remove
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `GroupError::InvalidIdentity` if the identity is invalid
|
||||
pub async fn add_remove_proposal(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
identity: String,
|
||||
) -> Result<UpdateRequest, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let request = group.write().await.store_remove_proposal(identity).await?;
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
/// Apply proposals for the given group, returning the batch message(s).
|
||||
/// - Creates MLS proposals for all pending group updates
|
||||
/// - Commits all proposals to the MLS group
|
||||
/// - Generates batch proposals message and welcome message if needed
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `group_name`: The name of the group to apply proposals for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - Vector of Waku messages containing batch proposals and welcome messages
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Group must be initialized with MLS group
|
||||
/// - User must be steward for the group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::MlsGroupNotInitialized` if MLS group not initialized
|
||||
/// - `GroupError::StewardNotSet` if no steward is configured
|
||||
/// - `GroupError::EmptyProposals` if no proposals exist
|
||||
/// - `GroupError::InvalidStateTransition` if the group is not in the Waiting state
|
||||
pub async fn apply_proposals(
|
||||
&mut self,
|
||||
group_name: &str,
|
||||
) -> Result<Vec<OutboundPacket>, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
if !group.read().await.is_mls_group_initialized() {
|
||||
return Err(UserError::MlsGroupNotInitialized);
|
||||
}
|
||||
|
||||
let messages = group
|
||||
.write()
|
||||
.await
|
||||
.create_batch_proposals_message(&self.provider, self.identity.signer())
|
||||
.await?;
|
||||
info!("[apply_proposals]: Applied proposals for group {group_name}");
|
||||
Ok(messages)
|
||||
}
|
||||
}
|
||||
308
src/user/waku.rs
Normal file
308
src/user/waku.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
use openmls::prelude::{DeserializeBytes, MlsMessageBodyIn, MlsMessageIn};
|
||||
use prost::Message;
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use ds::{
|
||||
transport::{InboundPacket, OutboundPacket},
|
||||
APP_MSG_SUBTOPIC, WELCOME_SUBTOPIC,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
error::UserError,
|
||||
group::GroupAction,
|
||||
message::MessageType,
|
||||
protos::de_mls::messages::v1::{
|
||||
app_message, welcome_message, AppMessage, ConversationMessage, ProposalAdded,
|
||||
UserKeyPackage, WelcomeMessage,
|
||||
},
|
||||
user::{User, UserAction},
|
||||
};
|
||||
|
||||
impl User {
|
||||
/// Process messages from the welcome subtopic.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `msg`: The Waku message to process
|
||||
/// - `group_name`: The name of the group this message is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Message Types Handled:
|
||||
/// - **GroupAnnouncement**: Steward announcements for group joining
|
||||
/// - **UserKeyPackage**: Encrypted key packages from new members
|
||||
/// - **InvitationToJoin**: MLS welcome messages for group joining
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - For group announcements: Generates and sends key package
|
||||
/// - For user key packages: Decrypts and stores invite proposals (steward only)
|
||||
/// - For invitations: Processes MLS welcome and joins group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::MessageVerificationFailed` if announcement verification fails
|
||||
/// - Various MLS and encryption errors
|
||||
pub async fn process_welcome_subtopic(
|
||||
&mut self,
|
||||
msg: InboundPacket,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
// Get the group lock first
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
let is_steward = {
|
||||
let group = group.read().await;
|
||||
group.is_steward().await
|
||||
};
|
||||
let is_kp_shared = {
|
||||
let group = group.read().await;
|
||||
group.is_kp_shared()
|
||||
};
|
||||
let is_mls_group_initialized = {
|
||||
let group = group.read().await;
|
||||
group.is_mls_group_initialized()
|
||||
};
|
||||
|
||||
let received_msg = WelcomeMessage::decode(msg.payload.as_slice())?;
|
||||
if let Some(payload) = &received_msg.payload {
|
||||
match payload {
|
||||
welcome_message::Payload::GroupAnnouncement(group_announcement) => {
|
||||
if is_steward || is_kp_shared {
|
||||
Ok(UserAction::DoNothing)
|
||||
} else {
|
||||
info!(
|
||||
"[process_welcome_subtopic]: User received group announcement msg for group {group_name}"
|
||||
);
|
||||
if !group_announcement.verify()? {
|
||||
return Err(UserError::MessageVerificationFailed);
|
||||
}
|
||||
|
||||
let new_kp = self.identity.generate_key_package(&self.provider)?;
|
||||
let encrypted_key_package = group_announcement.encrypt(new_kp)?;
|
||||
group.write().await.set_kp_shared(true);
|
||||
|
||||
let welcome_msg: WelcomeMessage = UserKeyPackage {
|
||||
encrypt_kp: encrypted_key_package,
|
||||
}
|
||||
.into();
|
||||
let packet = OutboundPacket::new(
|
||||
welcome_msg.encode_to_vec(),
|
||||
WELCOME_SUBTOPIC,
|
||||
group_name,
|
||||
group.read().await.app_id(),
|
||||
);
|
||||
Ok(UserAction::Outbound(packet))
|
||||
}
|
||||
}
|
||||
welcome_message::Payload::UserKeyPackage(user_key_package) => {
|
||||
if is_steward {
|
||||
info!(
|
||||
"[process_welcome_subtopic]: Steward received key package for the group {group_name}"
|
||||
);
|
||||
let key_package = group
|
||||
.write()
|
||||
.await
|
||||
.decrypt_steward_msg(user_key_package.encrypt_kp.clone())
|
||||
.await?;
|
||||
|
||||
let request = group
|
||||
.write()
|
||||
.await
|
||||
.store_invite_proposal(Box::new(key_package))
|
||||
.await?;
|
||||
|
||||
// Send notification to UI about the new proposal
|
||||
let proposal_added_msg: AppMessage = ProposalAdded {
|
||||
group_id: group_name.to_string(),
|
||||
request: Some(request),
|
||||
}
|
||||
.into();
|
||||
|
||||
Ok(UserAction::SendToApp(proposal_added_msg))
|
||||
} else {
|
||||
Ok(UserAction::DoNothing)
|
||||
}
|
||||
}
|
||||
welcome_message::Payload::InvitationToJoin(invitation_to_join) => {
|
||||
if is_steward || is_mls_group_initialized {
|
||||
Ok(UserAction::DoNothing)
|
||||
} else {
|
||||
// Release the lock before calling join_group
|
||||
drop(group);
|
||||
|
||||
// Parse the MLS message to get the welcome
|
||||
let (mls_in, _) = MlsMessageIn::tls_deserialize_bytes(
|
||||
&invitation_to_join.mls_message_out_bytes,
|
||||
)?;
|
||||
|
||||
let welcome = match mls_in.extract() {
|
||||
MlsMessageBodyIn::Welcome(welcome) => welcome,
|
||||
_ => return Err(UserError::FailedToExtractWelcomeMessage),
|
||||
};
|
||||
|
||||
if welcome.secrets().iter().any(|egs| {
|
||||
let hash_ref = egs.new_member().as_slice().to_vec();
|
||||
self.identity.is_key_package_exists(&hash_ref)
|
||||
}) {
|
||||
self.join_group(welcome).await?;
|
||||
let app_msg = ConversationMessage {
|
||||
message: format!(
|
||||
"User {} joined to the group",
|
||||
self.identity.identity_string()
|
||||
)
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
sender: "SYSTEM".to_string(),
|
||||
group_name: group_name.to_string(),
|
||||
}
|
||||
.into();
|
||||
let msg = self.build_group_message(app_msg, group_name).await?;
|
||||
Ok(UserAction::Outbound(msg))
|
||||
} else {
|
||||
Ok(UserAction::DoNothing)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(UserError::EmptyWelcomeMessageError)
|
||||
}
|
||||
}
|
||||
|
||||
/// Process messages from the application message subtopic.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `msg`: The Waku message to process
|
||||
/// - `group_name`: The name of the group this message is for
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Message Types Handled:
|
||||
/// - **BatchProposalsMessage**: Batch proposals from steward
|
||||
/// - **MLS Protocol Messages**: Encrypted group messages
|
||||
/// - **Application Messages**: Various app-level messages
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Processes batch proposals and applies them to the group
|
||||
/// - Handles MLS protocol messages through the group
|
||||
/// - Routes consensus proposals and votes to appropriate handlers
|
||||
///
|
||||
/// ## Preconditions:
|
||||
/// - Group must be initialized with MLS group
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - Various MLS processing errors
|
||||
pub async fn process_app_subtopic(
|
||||
&mut self,
|
||||
msg: InboundPacket,
|
||||
group_name: &str,
|
||||
) -> Result<UserAction, UserError> {
|
||||
let group = self.group_ref(group_name).await?;
|
||||
|
||||
if !group.read().await.is_mls_group_initialized() {
|
||||
return Ok(UserAction::DoNothing);
|
||||
}
|
||||
|
||||
// Try to parse as AppMessage first
|
||||
// This one required for commit messages as they are sent as AppMessage
|
||||
// without group encryption
|
||||
if let Ok(app_message) = AppMessage::decode(msg.payload.as_slice()) {
|
||||
match app_message.payload {
|
||||
Some(app_message::Payload::BatchProposalsMessage(batch_msg)) => {
|
||||
info!(
|
||||
"[process_app_subtopic]: Processing batch proposals message for group {group_name}"
|
||||
);
|
||||
// Release the lock before calling self methods
|
||||
return self
|
||||
.process_batch_proposals_message(batch_msg, group_name)
|
||||
.await;
|
||||
}
|
||||
_ => {
|
||||
error!(
|
||||
"[process_app_subtopic]: Cannot process another app message here: {:?}",
|
||||
app_message.payload.unwrap().message_type()
|
||||
);
|
||||
return Err(UserError::InvalidAppMessageType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to MLS protocol message
|
||||
let (mls_message_in, _) = MlsMessageIn::tls_deserialize_bytes(msg.payload.as_slice())?;
|
||||
let mls_message = mls_message_in.try_into_protocol_message()?;
|
||||
|
||||
let group = self.group_ref(group_name).await?;
|
||||
let res = group
|
||||
.write()
|
||||
.await
|
||||
.process_protocol_msg(mls_message, &self.provider)
|
||||
.await?;
|
||||
|
||||
// Handle the result outside of any lock scope
|
||||
match res {
|
||||
GroupAction::GroupAppMsg(msg) => {
|
||||
info!("[process_app_subtopic]: sending to app");
|
||||
Ok(UserAction::SendToApp(msg))
|
||||
}
|
||||
GroupAction::LeaveGroup => {
|
||||
info!("[process_app_subtopic]: leaving group");
|
||||
Ok(UserAction::LeaveGroup(group_name.to_string()))
|
||||
}
|
||||
GroupAction::DoNothing => {
|
||||
info!("[process_app_subtopic]: doing nothing");
|
||||
Ok(UserAction::DoNothing)
|
||||
}
|
||||
GroupAction::GroupProposal(proposal) => {
|
||||
info!("[process_app_subtopic]: processing consensus proposal");
|
||||
self.process_consensus_proposal(proposal, group_name).await
|
||||
}
|
||||
GroupAction::GroupVote(vote) => {
|
||||
info!("[process_app_subtopic]: processing consensus vote");
|
||||
self.process_consensus_vote(vote, group_name).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process incoming Waku messages and route them to appropriate handlers.
|
||||
///
|
||||
/// ## Parameters:
|
||||
/// - `msg`: The Waku message to process
|
||||
///
|
||||
/// ## Returns:
|
||||
/// - `UserAction` indicating what action should be taken
|
||||
///
|
||||
/// ## Message Routing:
|
||||
/// - **Welcome Subtopic**: Routes to `process_welcome_subtopic()`
|
||||
/// - **App Message Subtopic**: Routes to `process_app_subtopic()`
|
||||
/// - **Unknown Topics**: Returns error
|
||||
///
|
||||
/// ## Effects:
|
||||
/// - Processes messages based on content topic
|
||||
/// - Skips messages from the same app instance
|
||||
/// - Routes to appropriate subtopic handlers
|
||||
///
|
||||
/// ## Errors:
|
||||
/// - `UserError::GroupNotFoundError` if group doesn't exist
|
||||
/// - `UserError::UnknownContentTopicType` for unsupported topics
|
||||
/// - Various processing errors from subtopic handlers
|
||||
pub async fn process_inbound_packet(
|
||||
&mut self,
|
||||
msg: InboundPacket,
|
||||
) -> Result<UserAction, UserError> {
|
||||
let group_name = msg.group_id.clone();
|
||||
let group = self.group_ref(&group_name).await?;
|
||||
if msg.app_id == group.read().await.app_id() {
|
||||
debug!("[process_waku_message]: Message is from the same app, skipping");
|
||||
return Ok(UserAction::DoNothing);
|
||||
}
|
||||
|
||||
let ct_name = msg.subtopic.clone();
|
||||
match ct_name.as_str() {
|
||||
WELCOME_SUBTOPIC => self.process_welcome_subtopic(msg, &group_name).await,
|
||||
APP_MSG_SUBTOPIC => self.process_app_subtopic(msg, &group_name).await,
|
||||
_ => Err(UserError::UnknownContentTopicType(ct_name)),
|
||||
}
|
||||
}
|
||||
}
|
||||
246
src/user_actor.rs
Normal file
246
src/user_actor.rs
Normal file
@@ -0,0 +1,246 @@
|
||||
use kameo::message::{Context, Message};
|
||||
|
||||
use ds::transport::{InboundPacket, OutboundPacket};
|
||||
|
||||
use crate::{
|
||||
consensus::ConsensusEvent,
|
||||
error::UserError,
|
||||
protos::de_mls::messages::v1::{BanRequest, ConversationMessage},
|
||||
user::{User, UserAction},
|
||||
};
|
||||
|
||||
impl Message<InboundPacket> for User {
|
||||
type Reply = Result<UserAction, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: InboundPacket,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.process_inbound_packet(msg).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CreateGroupRequest {
|
||||
pub group_name: String,
|
||||
pub is_creation: bool,
|
||||
}
|
||||
|
||||
impl Message<CreateGroupRequest> for User {
|
||||
type Reply = Result<(), UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: CreateGroupRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.create_group(&msg.group_name, msg.is_creation).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct StewardMessageRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<StewardMessageRequest> for User {
|
||||
type Reply = Result<OutboundPacket, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: StewardMessageRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.prepare_steward_msg(&msg.group_name).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeaveGroupRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<LeaveGroupRequest> for User {
|
||||
type Reply = Result<(), UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: LeaveGroupRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.leave_group(&msg.group_name).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SendGroupMessage {
|
||||
pub message: Vec<u8>,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<SendGroupMessage> for User {
|
||||
type Reply = Result<OutboundPacket, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: SendGroupMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let app_msg = ConversationMessage {
|
||||
message: msg.message,
|
||||
sender: self.identity_string(),
|
||||
group_name: msg.group_name.clone(),
|
||||
}
|
||||
.into();
|
||||
self.build_group_message(app_msg, &msg.group_name).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BuildBanMessage {
|
||||
pub ban_request: BanRequest,
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<BuildBanMessage> for User {
|
||||
type Reply = Result<UserAction, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: BuildBanMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.process_ban_request(msg.ban_request, &msg.group_name)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
// New state machine message types
|
||||
pub struct StartStewardEpochRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<StartStewardEpochRequest> for User {
|
||||
type Reply = Result<usize, UserError>; // Returns number of proposals
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: StartStewardEpochRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.start_steward_epoch(&msg.group_name).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GetGroupMembersRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<GetGroupMembersRequest> for User {
|
||||
type Reply = Result<Vec<String>, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: GetGroupMembersRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.get_group_members(&msg.group_name).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GetProposalsForStewardVotingRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<GetProposalsForStewardVotingRequest> for User {
|
||||
type Reply = Result<UserAction, UserError>; // Returns proposal_id
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: GetProposalsForStewardVotingRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let (_, action) = self
|
||||
.get_proposals_for_steward_voting(&msg.group_name)
|
||||
.await?;
|
||||
Ok(action)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UserVoteRequest {
|
||||
pub group_name: String,
|
||||
pub proposal_id: u32,
|
||||
pub vote: bool,
|
||||
}
|
||||
|
||||
impl Message<UserVoteRequest> for User {
|
||||
type Reply = Result<Option<OutboundPacket>, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: UserVoteRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let action = self
|
||||
.process_user_vote(msg.proposal_id, msg.vote, &msg.group_name)
|
||||
.await?;
|
||||
match action {
|
||||
UserAction::Outbound(outbound_packet) => Ok(Some(outbound_packet)),
|
||||
UserAction::DoNothing => Ok(None),
|
||||
_ => Err(UserError::InvalidUserAction(
|
||||
"Vote action must result in Waku message".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Consensus event message handler
|
||||
pub struct ConsensusEventMessage {
|
||||
pub group_name: String,
|
||||
pub event: ConsensusEvent,
|
||||
}
|
||||
|
||||
impl Message<ConsensusEventMessage> for User {
|
||||
type Reply = Result<Vec<OutboundPacket>, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: ConsensusEventMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.handle_consensus_event(&msg.group_name, msg.event)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IsStewardStatusRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<IsStewardStatusRequest> for User {
|
||||
type Reply = Result<bool, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: IsStewardStatusRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let is_steward = self.is_user_steward_for_group(&msg.group_name).await?;
|
||||
Ok(is_steward)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GetCurrentEpochProposalsRequest {
|
||||
pub group_name: String,
|
||||
}
|
||||
|
||||
impl Message<GetCurrentEpochProposalsRequest> for User {
|
||||
type Reply = Result<Vec<crate::steward::GroupUpdateRequest>, UserError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: GetCurrentEpochProposalsRequest,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
let proposals = self.get_current_epoch_proposals(&msg.group_name).await?;
|
||||
Ok(proposals)
|
||||
}
|
||||
}
|
||||
95
src/user_app_instance.rs
Normal file
95
src/user_app_instance.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
// src/user_app_instance.rs
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use kameo::actor::ActorRef;
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use tokio::sync::broadcast::Sender;
|
||||
use tracing::{error, info};
|
||||
|
||||
use ds::{
|
||||
topic_filter::TopicFilter,
|
||||
transport::{DeliveryService, InboundPacket},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
consensus::ConsensusService, error::UserError, group_registry::GroupRegistry, user::User,
|
||||
user_actor::ConsensusEventMessage, LocalSigner,
|
||||
};
|
||||
|
||||
pub const STEWARD_EPOCH: u64 = 15;
|
||||
|
||||
pub struct AppState<DS: DeliveryService> {
|
||||
pub delivery: DS,
|
||||
pub pubsub: Sender<InboundPacket>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CoreCtx<DS: DeliveryService> {
|
||||
pub app_state: Arc<AppState<DS>>,
|
||||
pub groups: Arc<GroupRegistry>,
|
||||
pub topics: Arc<TopicFilter>,
|
||||
pub consensus: Arc<ConsensusService>,
|
||||
}
|
||||
|
||||
impl<DS: DeliveryService> CoreCtx<DS> {
|
||||
pub fn new(app_state: Arc<AppState<DS>>) -> Self {
|
||||
Self {
|
||||
app_state,
|
||||
groups: Arc::new(GroupRegistry::new()),
|
||||
topics: Arc::new(TopicFilter::new()),
|
||||
consensus: Arc::new(ConsensusService::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_user_instance<DS: DeliveryService>(
|
||||
eth_private_key: String,
|
||||
app_state: Arc<AppState<DS>>,
|
||||
consensus_service: &ConsensusService,
|
||||
) -> Result<(ActorRef<User>, String), UserError> {
|
||||
let signer = PrivateKeySigner::from_str(ð_private_key)?;
|
||||
let user_address = signer.address_string();
|
||||
// Create user
|
||||
let user = User::new(ð_private_key, consensus_service)?;
|
||||
|
||||
// Set up consensus event forwarding before spawning the actor
|
||||
let consensus_events = user.subscribe_to_consensus_events();
|
||||
|
||||
let user_ref = kameo::spawn(user);
|
||||
let app_state_consensus = app_state.clone();
|
||||
let user_ref_consensus = user_ref.clone();
|
||||
let mut consensus_events_receiver = consensus_events;
|
||||
tokio::spawn(async move {
|
||||
info!("Starting consensus event forwarding loop (user-only)");
|
||||
while let Ok((group_name, event)) = consensus_events_receiver.recv().await {
|
||||
let result = user_ref_consensus
|
||||
.ask(ConsensusEventMessage {
|
||||
group_name: group_name.clone(),
|
||||
event,
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(commit_messages) => {
|
||||
if !commit_messages.is_empty() {
|
||||
info!(
|
||||
"Sending {} commit messages to Waku for group {}",
|
||||
commit_messages.len(),
|
||||
group_name
|
||||
);
|
||||
for msg in commit_messages {
|
||||
if let Err(e) = app_state_consensus.delivery.send(msg).await {
|
||||
error!("Error sending commit message to delivery service: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error forwarding consensus event: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Consensus forwarding loop ended");
|
||||
});
|
||||
|
||||
Ok((user_ref, user_address))
|
||||
}
|
||||
133
src/ws_actor.rs
133
src/ws_actor.rs
@@ -1,133 +0,0 @@
|
||||
use axum::extract::ws::{Message as WsMessage, WebSocket};
|
||||
use futures::{stream::SplitSink, SinkExt};
|
||||
use kameo::{
|
||||
message::{Context, Message},
|
||||
Actor,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::MessageToPrint;
|
||||
|
||||
/// This actor is used to handle messages from web socket
|
||||
#[derive(Debug, Actor)]
|
||||
pub struct WsActor {
|
||||
/// This is the sender of the open web socket connection
|
||||
pub ws_sender: SplitSink<WebSocket, WsMessage>,
|
||||
/// This variable is used to check if the user has connected to the ws, if not, we parce message as ConnectMessage
|
||||
pub is_initialized: bool,
|
||||
}
|
||||
|
||||
impl WsActor {
|
||||
pub fn new(ws_sender: SplitSink<WebSocket, WsMessage>) -> Self {
|
||||
Self {
|
||||
ws_sender,
|
||||
is_initialized: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This enum is used to represent the actions that can be performed on the web socket
|
||||
/// Connect - this action is used to return connection data to the user
|
||||
/// UserMessage - this action is used to handle message from web socket and return it to the user
|
||||
/// RemoveUser - this action is used to remove a user from the group
|
||||
/// DoNothing - this action is used for test purposes (return empty action if message is not valid)
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum WsAction {
|
||||
Connect(ConnectMessage),
|
||||
UserMessage(UserMessage),
|
||||
RemoveUser(String, String),
|
||||
DoNothing,
|
||||
}
|
||||
|
||||
/// This struct is used to represent the message from the user that we got from web socket
|
||||
#[derive(Deserialize, Debug, PartialEq, Serialize)]
|
||||
pub struct UserMessage {
|
||||
pub message: String,
|
||||
pub group_id: String,
|
||||
}
|
||||
|
||||
/// This struct is used to represent the connection data that web socket sends to the user
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
pub struct ConnectMessage {
|
||||
/// This is the private key of the user that we will use to authenticate the user
|
||||
pub eth_private_key: String,
|
||||
/// This is the id of the group that the user is joining
|
||||
pub group_id: String,
|
||||
/// This is the flag that indicates if the user should create a new group or subscribe to an existing one
|
||||
pub should_create: bool,
|
||||
}
|
||||
|
||||
/// This struct is used to represent the raw message from the web socket.
|
||||
/// It is used to handle the message from the web socket and return it to the user
|
||||
/// We can parse it to the ConnectMessage or UserMessage
|
||||
/// if it starts with "/ban" it will be parsed to RemoveUser, otherwise it will be parsed to UserMessage
|
||||
#[derive(Deserialize, Debug, PartialEq)]
|
||||
pub struct RawWsMessage {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl Message<RawWsMessage> for WsActor {
|
||||
type Reply = Result<WsAction, WsError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: RawWsMessage,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
if !self.is_initialized {
|
||||
let connect_message = serde_json::from_str(&msg.message)?;
|
||||
self.is_initialized = true;
|
||||
return Ok(WsAction::Connect(connect_message));
|
||||
}
|
||||
match serde_json::from_str(&msg.message) {
|
||||
Ok(UserMessage { message, group_id }) => {
|
||||
if message.starts_with("/") {
|
||||
let mut tokens = message.split_whitespace();
|
||||
match tokens.next() {
|
||||
Some("/ban") => {
|
||||
let user_to_ban = tokens.next();
|
||||
if user_to_ban.is_none() {
|
||||
return Err(WsError::InvalidMessage);
|
||||
} else {
|
||||
let user_to_ban = user_to_ban.unwrap().to_lowercase();
|
||||
return Ok(WsAction::RemoveUser(
|
||||
user_to_ban.to_string(),
|
||||
group_id.clone(),
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => return Err(WsError::InvalidMessage),
|
||||
}
|
||||
}
|
||||
Ok(WsAction::UserMessage(UserMessage { message, group_id }))
|
||||
}
|
||||
Err(_) => Err(WsError::InvalidMessage),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This impl is used to send messages to the websocket
|
||||
impl Message<MessageToPrint> for WsActor {
|
||||
type Reply = Result<(), WsError>;
|
||||
|
||||
async fn handle(
|
||||
&mut self,
|
||||
msg: MessageToPrint,
|
||||
_ctx: Context<'_, Self, Self::Reply>,
|
||||
) -> Self::Reply {
|
||||
self.ws_sender
|
||||
.send(WsMessage::Text(msg.to_string()))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum WsError {
|
||||
#[error("Invalid message")]
|
||||
InvalidMessage,
|
||||
#[error("Malformed json")]
|
||||
MalformedJson(#[from] serde_json::Error),
|
||||
#[error("Failed to send message")]
|
||||
SendMessageError(#[from] axum::Error),
|
||||
}
|
||||
359
tests/consensus_multi_group_test.rs
Normal file
359
tests/consensus_multi_group_test.rs
Normal file
@@ -0,0 +1,359 @@
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use de_mls::consensus::{compute_vote_hash, ConsensusEvent, ConsensusService};
|
||||
use de_mls::protos::consensus::v1::Vote;
|
||||
use de_mls::LocalSigner;
|
||||
use prost::Message;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_consensus_service() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group";
|
||||
let expected_voters_count = 3;
|
||||
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner_address = signer.address();
|
||||
let proposal_owner = proposal_owner_address.to_string().as_bytes().to_vec();
|
||||
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
// Verify proposal was created
|
||||
let active_proposals = consensus_service.get_active_proposals(group_name).await;
|
||||
assert_eq!(active_proposals.len(), 1);
|
||||
assert_eq!(active_proposals[0].proposal_id, proposal.proposal_id);
|
||||
|
||||
// Verify group statistics
|
||||
let group_stats = consensus_service.get_group_stats(group_name).await;
|
||||
assert_eq!(group_stats.total_sessions, 1);
|
||||
assert_eq!(group_stats.active_sessions, 1);
|
||||
|
||||
// Verify consensus threshold calculation
|
||||
// With 3 expected voters, we need 2n/3 = 2 votes for consensus
|
||||
// Initially we have 1 vote (steward), so we don't have sufficient votes
|
||||
assert!(
|
||||
!consensus_service
|
||||
.has_sufficient_votes(group_name, proposal.proposal_id)
|
||||
.await
|
||||
);
|
||||
|
||||
let signer_2 = PrivateKeySigner::random();
|
||||
let proposal_owner_2 = signer_2.address_bytes();
|
||||
// Add 1 more vote (total 2 votes)
|
||||
let mut vote = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: proposal_owner_2,
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
vote: true,
|
||||
parent_hash: Vec::new(),
|
||||
received_hash: proposal.votes[0].vote_hash.clone(), // Reference steward's vote hash
|
||||
vote_hash: Vec::new(),
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
// Compute vote hash
|
||||
vote.vote_hash = compute_vote_hash(&vote);
|
||||
let vote_bytes = vote.encode_to_vec();
|
||||
vote.signature = signer_2
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.expect("Failed to sign vote");
|
||||
|
||||
consensus_service
|
||||
.process_incoming_vote(group_name, vote)
|
||||
.await
|
||||
.expect("Failed to process vote");
|
||||
|
||||
// Now we should have sufficient votes (2 out of 3 expected voters)
|
||||
assert!(
|
||||
consensus_service
|
||||
.has_sufficient_votes(group_name, proposal.proposal_id)
|
||||
.await
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multi_group_consensus_service() {
|
||||
// Create consensus service with max 10 sessions per group
|
||||
let consensus_service = ConsensusService::new_with_max_sessions(10);
|
||||
|
||||
// Test group 1
|
||||
let group1_name = "test_group_1";
|
||||
let group1_members_count = 3;
|
||||
let signer_1 = PrivateKeySigner::random();
|
||||
let proposal_owner_1 = signer_1.address_bytes();
|
||||
|
||||
// Test group 2
|
||||
let group2_name = "test_group_2";
|
||||
let group2_members_count = 3;
|
||||
let signer_2 = PrivateKeySigner::random();
|
||||
let proposal_owner_2 = signer_2.address_bytes();
|
||||
|
||||
// Create proposals for group 1
|
||||
let proposal_1 = consensus_service
|
||||
.create_proposal(
|
||||
group1_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner_1,
|
||||
group1_members_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let _proposal_1 = consensus_service
|
||||
.vote_on_proposal(group1_name, proposal_1.proposal_id, true, signer_1)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
let proposal_2 = consensus_service
|
||||
.create_proposal(
|
||||
group2_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner_2.clone(),
|
||||
group2_members_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let _proposal_2 = consensus_service
|
||||
.vote_on_proposal(group2_name, proposal_2.proposal_id, true, signer_2.clone())
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
// Create proposal for group 2
|
||||
let proposal_3 = consensus_service
|
||||
.create_proposal(
|
||||
group2_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner_2,
|
||||
group2_members_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let _proposal_3 = consensus_service
|
||||
.vote_on_proposal(group2_name, proposal_3.proposal_id, true, signer_2)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
// Verify proposals are created for both groups
|
||||
let group1_proposals = consensus_service.get_active_proposals(group1_name).await;
|
||||
let group2_proposals = consensus_service.get_active_proposals(group2_name).await;
|
||||
|
||||
assert_eq!(group1_proposals.len(), 1);
|
||||
assert_eq!(group2_proposals.len(), 2);
|
||||
|
||||
// Verify group statistics
|
||||
let group1_stats = consensus_service.get_group_stats(group1_name).await;
|
||||
let group2_stats = consensus_service.get_group_stats(group2_name).await;
|
||||
|
||||
assert_eq!(group1_stats.total_sessions, 1);
|
||||
assert_eq!(group1_stats.active_sessions, 1);
|
||||
assert_eq!(group2_stats.total_sessions, 2);
|
||||
assert_eq!(group2_stats.active_sessions, 2);
|
||||
|
||||
// Verify overall statistics
|
||||
let overall_stats = consensus_service.get_overall_stats().await;
|
||||
assert_eq!(overall_stats.total_sessions, 3);
|
||||
assert_eq!(overall_stats.active_sessions, 3);
|
||||
|
||||
// Verify active groups
|
||||
let active_groups = consensus_service.get_active_groups().await;
|
||||
assert_eq!(active_groups.len(), 2);
|
||||
assert!(active_groups.contains(&group1_name.to_string()));
|
||||
assert!(active_groups.contains(&group2_name.to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_consensus_threshold_calculation() {
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut consensus_events = consensus_service.subscribe_to_events();
|
||||
|
||||
let group_name = "test_group_threshold";
|
||||
let expected_voters_count = 5;
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
// With 5 expected voters, we need 2n/3 = 3.33... -> 4 votes for consensus
|
||||
// Initially we have 1 vote (steward), so we don't have sufficient votes
|
||||
assert!(
|
||||
!consensus_service
|
||||
.has_sufficient_votes(group_name, proposal.proposal_id)
|
||||
.await
|
||||
);
|
||||
|
||||
for _ in 0..4 {
|
||||
let signer = PrivateKeySigner::random();
|
||||
let vote_owner = signer.address_bytes();
|
||||
let mut vote = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: vote_owner.clone(),
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
vote: true,
|
||||
parent_hash: Vec::new(),
|
||||
received_hash: proposal.votes[0].vote_hash.clone(), // Reference previous vote's hash
|
||||
vote_hash: Vec::new(),
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
// Compute vote hash
|
||||
vote.vote_hash = compute_vote_hash(&vote);
|
||||
let vote_bytes = vote.encode_to_vec();
|
||||
vote.signature = signer
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.expect("Failed to sign vote");
|
||||
|
||||
let result = consensus_service
|
||||
.process_incoming_vote(group_name, vote.clone())
|
||||
.await;
|
||||
|
||||
result.expect("Failed to process vote");
|
||||
}
|
||||
|
||||
// With 4 out of 5 votes, we should have sufficient votes for consensus
|
||||
assert!(
|
||||
consensus_service
|
||||
.has_sufficient_votes(group_name, proposal.proposal_id)
|
||||
.await
|
||||
);
|
||||
|
||||
// Subscribe to consensus events and wait for natural consensus
|
||||
let proposal_id = proposal.proposal_id;
|
||||
let group_name_clone = group_name;
|
||||
|
||||
// Wait for consensus event with timeout
|
||||
let timeout_duration = Duration::from_secs(15);
|
||||
let consensus_result = tokio::time::timeout(timeout_duration, async {
|
||||
while let Ok((event_group_name, event)) = consensus_events.recv().await {
|
||||
if event_group_name == group_name_clone {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: event_proposal_id,
|
||||
result,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus reached for proposal {proposal_id}: {result}");
|
||||
return Ok(result);
|
||||
}
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id: event_proposal_id,
|
||||
reason,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus failed for proposal {proposal_id}: {reason}");
|
||||
return Err(format!("Consensus failed: {reason}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err("Event channel closed".to_string())
|
||||
})
|
||||
.await
|
||||
.expect("Timeout waiting for consensus event")
|
||||
.expect("Consensus should succeed");
|
||||
|
||||
// Should have consensus result based on 2n/3 threshold
|
||||
assert!(consensus_result); // All votes were true, so result should be true
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remove_group_sessions() {
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group_remove";
|
||||
let expected_voters_count = 2;
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let _proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
// Verify proposal exists
|
||||
let group_stats = consensus_service.get_group_stats(group_name).await;
|
||||
assert_eq!(group_stats.total_sessions, 1);
|
||||
|
||||
// Remove group sessions
|
||||
consensus_service.remove_group_sessions(group_name).await;
|
||||
|
||||
// Verify group sessions are removed
|
||||
let group_stats_after = consensus_service.get_group_stats(group_name).await;
|
||||
assert_eq!(group_stats_after.total_sessions, 0);
|
||||
|
||||
// Verify group is not in active groups
|
||||
let active_groups = consensus_service.get_active_groups().await;
|
||||
assert!(!active_groups.contains(&group_name.to_string()));
|
||||
}
|
||||
599
tests/consensus_realtime_test.rs
Normal file
599
tests/consensus_realtime_test.rs
Normal file
@@ -0,0 +1,599 @@
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use de_mls::consensus::{compute_vote_hash, ConsensusEvent, ConsensusService};
|
||||
use de_mls::protos::consensus::v1::Vote;
|
||||
use de_mls::LocalSigner;
|
||||
use prost::Message;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_realtime_consensus_waiting() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group_realtime";
|
||||
let expected_voters_count = 3;
|
||||
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
println!("Created proposal with ID: {}", proposal.proposal_id);
|
||||
|
||||
// Subscribe to consensus events
|
||||
let mut consensus_events = consensus_service.subscribe_to_events();
|
||||
let proposal_id = proposal.proposal_id;
|
||||
|
||||
// Start a background task that waits for consensus events
|
||||
let group_name_clone = group_name;
|
||||
let consensus_waiter = tokio::spawn(async move {
|
||||
println!("Starting consensus event waiter for proposal {proposal_id:?}");
|
||||
|
||||
// Wait for consensus event with timeout
|
||||
let timeout_duration = Duration::from_secs(10);
|
||||
match tokio::time::timeout(timeout_duration, async {
|
||||
while let Ok((event_group_name, event)) = consensus_events.recv().await {
|
||||
if event_group_name == group_name_clone {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: event_proposal_id,
|
||||
result,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus reached for proposal {proposal_id}: {result}");
|
||||
return Ok(result);
|
||||
}
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id: event_proposal_id,
|
||||
reason,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus failed for proposal {proposal_id}: {reason}");
|
||||
return Err(format!("Consensus failed: {reason}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err("Event channel closed".to_string())
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
println!("Consensus event waiter result: {result:?}");
|
||||
result
|
||||
}
|
||||
Err(_) => {
|
||||
println!("Consensus event waiter timed out");
|
||||
Err("Timeout waiting for consensus".to_string())
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Wait a bit to ensure the waiter is running
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Add votes to reach consensus
|
||||
let mut previous_vote_hash = proposal.votes[0].vote_hash.clone(); // Start with steward's vote hash
|
||||
|
||||
for i in 1..expected_voters_count {
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
let mut vote = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: proposal_owner,
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
vote: true,
|
||||
parent_hash: Vec::new(),
|
||||
received_hash: previous_vote_hash.clone(), // Reference previous vote's hash
|
||||
vote_hash: Vec::new(),
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
// Compute vote hash
|
||||
vote.vote_hash = compute_vote_hash(&vote);
|
||||
let vote_bytes = vote.encode_to_vec();
|
||||
vote.signature = signer
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.expect("Failed to sign vote");
|
||||
|
||||
println!("Adding vote {} for proposal {}", i, proposal.proposal_id);
|
||||
consensus_service
|
||||
.process_incoming_vote(group_name, vote.clone())
|
||||
.await
|
||||
.expect("Failed to process vote");
|
||||
|
||||
// Update previous vote hash for next iteration
|
||||
previous_vote_hash = vote.vote_hash.clone();
|
||||
|
||||
// Small delay between votes
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
}
|
||||
|
||||
// Wait for consensus result
|
||||
let consensus_result = consensus_waiter
|
||||
.await
|
||||
.expect("Consensus waiter task failed");
|
||||
|
||||
// Verify consensus was reached
|
||||
assert!(consensus_result.is_ok());
|
||||
let result = consensus_result.unwrap();
|
||||
assert!(result); // Should be true (yes votes)
|
||||
|
||||
println!("Test completed successfully - consensus reached!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_consensus_timeout() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group_timeout";
|
||||
let expected_voters_count = 5;
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Need 4 votes for consensus
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
println!("Created proposal with ID: {}", proposal.proposal_id);
|
||||
|
||||
// Subscribe to consensus events for timeout test
|
||||
let mut consensus_events = consensus_service.subscribe_to_events();
|
||||
let proposal_id = proposal.proposal_id;
|
||||
|
||||
// Start consensus event waiter with timeout
|
||||
let group_name_clone = group_name;
|
||||
let consensus_waiter = tokio::spawn(async move {
|
||||
println!("Starting consensus event waiter with timeout for proposal {proposal_id:?}");
|
||||
|
||||
// Wait for consensus event - should timeout and trigger liveness criteria
|
||||
let timeout_duration = Duration::from_secs(12); // Wait longer than consensus timeout (10s)
|
||||
match tokio::time::timeout(timeout_duration, async {
|
||||
while let Ok((event_group_name, event)) = consensus_events.recv().await {
|
||||
if event_group_name == group_name_clone {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached { proposal_id: event_proposal_id, result } => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus reached for proposal {proposal_id}: {result} (via timeout/liveness criteria)");
|
||||
return Ok(result);
|
||||
}
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed { proposal_id: event_proposal_id, reason } => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus failed for proposal {proposal_id}: {reason}");
|
||||
return Err(format!("Consensus failed: {reason}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err("Event channel closed".to_string())
|
||||
}).await {
|
||||
Ok(result) => result,
|
||||
Err(_) => Err("Test timeout waiting for consensus event".to_string())
|
||||
}
|
||||
});
|
||||
|
||||
// Don't add any additional votes - should timeout and apply liveness criteria
|
||||
|
||||
// Wait for consensus result
|
||||
let consensus_result = consensus_waiter
|
||||
.await
|
||||
.expect("Consensus waiter task failed");
|
||||
|
||||
// Verify timeout occurred and liveness criteria was applied
|
||||
// With liveness_criteria_yes = true, should return Ok(true)
|
||||
assert!(consensus_result.is_ok());
|
||||
let result = consensus_result.unwrap();
|
||||
assert!(result); // Should be true due to liveness criteria
|
||||
|
||||
println!("Test completed successfully - timeout occurred and liveness criteria applied!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_consensus_with_mixed_votes() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
let group_name = "test_group_mixed";
|
||||
let expected_voters_count = 3;
|
||||
|
||||
// Create a proposal
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
println!("Created proposal with ID: {}", proposal.proposal_id);
|
||||
|
||||
// Subscribe to consensus events
|
||||
let mut consensus_events = consensus_service.subscribe_to_events();
|
||||
let proposal_id = proposal.proposal_id;
|
||||
|
||||
// Start a background task that waits for consensus events
|
||||
let group_name_clone = group_name;
|
||||
let consensus_waiter = tokio::spawn(async move {
|
||||
println!("Starting consensus event waiter for proposal {proposal_id:?}");
|
||||
|
||||
// Wait for consensus event with timeout
|
||||
let timeout_duration = Duration::from_secs(15); // Allow time for votes to be processed
|
||||
match tokio::time::timeout(timeout_duration, async {
|
||||
while let Ok((event_group_name, event)) = consensus_events.recv().await {
|
||||
if event_group_name == group_name_clone {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: event_proposal_id,
|
||||
result,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus reached for proposal {proposal_id}: {result}");
|
||||
return Ok(result);
|
||||
}
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id: event_proposal_id,
|
||||
reason,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus failed for proposal {proposal_id}: {reason}");
|
||||
return Err(format!("Consensus failed: {reason}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err("Event channel closed".to_string())
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
println!("Consensus event waiter result: {result:?}");
|
||||
result
|
||||
}
|
||||
Err(_) => {
|
||||
println!("Consensus event waiter timed out");
|
||||
Err("Timeout waiting for consensus".to_string())
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Wait a bit to ensure the waiter is running
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Add mixed votes: one yes, one no
|
||||
let votes = vec![(2, false), (3, false)];
|
||||
let mut previous_vote_hash = proposal.votes[0].vote_hash.clone(); // Start with steward's vote hash
|
||||
|
||||
for (i, vote_value) in votes {
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
let mut vote = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: proposal_owner,
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
vote: vote_value,
|
||||
parent_hash: Vec::new(),
|
||||
received_hash: previous_vote_hash.clone(), // Reference previous vote's hash
|
||||
vote_hash: Vec::new(),
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
// Compute vote hash
|
||||
vote.vote_hash = compute_vote_hash(&vote);
|
||||
let vote_bytes = vote.encode_to_vec();
|
||||
vote.signature = signer
|
||||
.local_sign_message(&vote_bytes)
|
||||
.await
|
||||
.expect("Failed to sign vote");
|
||||
|
||||
println!(
|
||||
"Adding vote {} (value: {}) for proposal {}",
|
||||
i, vote_value, proposal.proposal_id
|
||||
);
|
||||
consensus_service
|
||||
.process_incoming_vote(group_name, vote.clone())
|
||||
.await
|
||||
.expect("Failed to process vote");
|
||||
|
||||
// Update previous vote hash for next iteration
|
||||
previous_vote_hash = vote.vote_hash.clone();
|
||||
|
||||
// Small delay between votes
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
}
|
||||
|
||||
// Wait for consensus result
|
||||
let consensus_result = consensus_waiter
|
||||
.await
|
||||
.expect("Consensus waiter task failed");
|
||||
|
||||
// Verify consensus was reached
|
||||
assert!(consensus_result.is_ok());
|
||||
let result = consensus_result.unwrap();
|
||||
// With 2 no votes and 1 yes vote, consensus should be no (false)
|
||||
// However, if it times out, liveness criteria (true) will be applied
|
||||
println!("Mixed votes test result: {result}");
|
||||
// Don't assert specific result since it depends on timing vs. liveness criteria
|
||||
|
||||
println!("Test completed successfully - consensus reached with mixed votes!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rfc_vote_chain_validation() {
|
||||
use de_mls::consensus::compute_vote_hash;
|
||||
use de_mls::LocalSigner;
|
||||
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_rfc_validation";
|
||||
let expected_voters_count = 3;
|
||||
|
||||
let signer1 = PrivateKeySigner::random();
|
||||
let signer2 = PrivateKeySigner::random();
|
||||
let _signer3 = PrivateKeySigner::random();
|
||||
|
||||
// Create first proposal with steward vote
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
signer1.address_bytes(),
|
||||
expected_voters_count,
|
||||
300,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer1)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
println!("Created proposal with ID: {}", proposal.proposal_id);
|
||||
|
||||
// Create second vote from different voter
|
||||
let mut vote2 = Vote {
|
||||
vote_id: Uuid::new_v4().as_u128() as u32,
|
||||
vote_owner: signer2.address_bytes(),
|
||||
proposal_id: proposal.proposal_id,
|
||||
timestamp: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("Failed to get current time")
|
||||
.as_secs(),
|
||||
vote: true,
|
||||
parent_hash: Vec::new(), // Different voter, no parent
|
||||
received_hash: proposal.votes[0].vote_hash.clone(), // Should be hash of first vote
|
||||
vote_hash: Vec::new(),
|
||||
signature: Vec::new(),
|
||||
};
|
||||
|
||||
// Compute vote hash and signature
|
||||
vote2.vote_hash = compute_vote_hash(&vote2);
|
||||
let vote2_bytes = vote2.encode_to_vec();
|
||||
vote2.signature = signer2
|
||||
.local_sign_message(&vote2_bytes)
|
||||
.await
|
||||
.expect("Failed to sign vote");
|
||||
|
||||
// Create proposal with two votes from different voters
|
||||
let mut test_proposal = proposal.clone();
|
||||
test_proposal.votes.push(vote2.clone());
|
||||
|
||||
// Validate the proposal - should pass RFC validation
|
||||
let validation_result = consensus_service.validate_proposal(&test_proposal);
|
||||
assert!(
|
||||
validation_result.is_ok(),
|
||||
"RFC validation should pass: {validation_result:?}"
|
||||
);
|
||||
|
||||
// Test invalid vote chain (wrong received_hash)
|
||||
let mut invalid_proposal = test_proposal.clone();
|
||||
invalid_proposal.votes[1].received_hash = vec![0; 32]; // Wrong hash
|
||||
|
||||
let invalid_result = consensus_service.validate_proposal(&invalid_proposal);
|
||||
assert!(
|
||||
invalid_result.is_err(),
|
||||
"Invalid vote chain should be rejected"
|
||||
);
|
||||
|
||||
println!("RFC vote chain validation test completed successfully!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_event_driven_timeout() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group_event_timeout";
|
||||
let expected_voters_count = 3;
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Create a proposal with only one vote (steward vote) - should timeout and apply liveness criteria
|
||||
let proposal = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal".to_string(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true, // liveness criteria = true
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal");
|
||||
|
||||
let proposal = consensus_service
|
||||
.vote_on_proposal(group_name, proposal.proposal_id, true, signer)
|
||||
.await
|
||||
.expect("Failed to vote on proposal");
|
||||
|
||||
println!(
|
||||
"Created proposal with ID: {} - waiting for timeout",
|
||||
proposal.proposal_id
|
||||
);
|
||||
|
||||
// Subscribe to consensus events
|
||||
let mut consensus_events = consensus_service.subscribe_to_events();
|
||||
let proposal_id = proposal.proposal_id;
|
||||
let group_name_clone = group_name;
|
||||
|
||||
// Wait for consensus event (should timeout after 10 seconds and apply liveness criteria)
|
||||
let timeout_duration = Duration::from_secs(12); // Wait longer than consensus timeout (10s)
|
||||
let consensus_result = tokio::time::timeout(timeout_duration, async {
|
||||
while let Ok((event_group_name, event)) = consensus_events.recv().await {
|
||||
if event_group_name == group_name_clone {
|
||||
match event {
|
||||
ConsensusEvent::ConsensusReached {
|
||||
proposal_id: event_proposal_id,
|
||||
result,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
println!("Consensus reached for proposal {proposal_id}: {result} (via timeout/liveness criteria)");
|
||||
return result;
|
||||
}
|
||||
}
|
||||
ConsensusEvent::ConsensusFailed {
|
||||
proposal_id: event_proposal_id,
|
||||
reason,
|
||||
} => {
|
||||
if event_proposal_id == proposal_id {
|
||||
panic!("Consensus failed for proposal {proposal_id}: {reason}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
panic!("Event channel closed unexpectedly");
|
||||
})
|
||||
.await
|
||||
.expect("Timeout waiting for consensus event");
|
||||
|
||||
// Should be true due to liveness criteria
|
||||
assert!(consensus_result);
|
||||
|
||||
println!("Test completed successfully - event-driven timeout worked!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_liveness_criteria_functionality() {
|
||||
// Create consensus service
|
||||
let consensus_service = ConsensusService::new();
|
||||
|
||||
let group_name = "test_group_liveness";
|
||||
let expected_voters_count = 3;
|
||||
let signer = PrivateKeySigner::random();
|
||||
let proposal_owner = signer.address_bytes();
|
||||
|
||||
// Test liveness criteria = false
|
||||
let proposal_false = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal False".to_string(),
|
||||
vec![],
|
||||
proposal_owner.clone(),
|
||||
expected_voters_count,
|
||||
300,
|
||||
false, // liveness criteria = false
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal with liveness_criteria_yes = false");
|
||||
|
||||
// Test liveness criteria getter
|
||||
let liveness_false = consensus_service
|
||||
.get_proposal_liveness_criteria(group_name, proposal_false.proposal_id)
|
||||
.await;
|
||||
assert_eq!(liveness_false, Some(false));
|
||||
|
||||
// Test liveness criteria = true
|
||||
let proposal_true = consensus_service
|
||||
.create_proposal(
|
||||
group_name,
|
||||
"Test Proposal True".to_owned(),
|
||||
vec![],
|
||||
proposal_owner,
|
||||
expected_voters_count,
|
||||
300,
|
||||
true, // liveness criteria = true
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create proposal with liveness_criteria_yes = true");
|
||||
|
||||
// Test liveness criteria getter
|
||||
let liveness_true = consensus_service
|
||||
.get_proposal_liveness_criteria(group_name, proposal_true.proposal_id)
|
||||
.await;
|
||||
assert_eq!(liveness_true, Some(true));
|
||||
|
||||
// Test non-existent proposal
|
||||
let liveness_none = consensus_service
|
||||
.get_proposal_liveness_criteria("nonexistent", 99999)
|
||||
.await;
|
||||
assert_eq!(liveness_none, None);
|
||||
|
||||
println!("Test completed successfully - liveness criteria functionality verified!");
|
||||
}
|
||||
271
tests/state_machine_test.rs
Normal file
271
tests/state_machine_test.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
use de_mls::{error::GroupError, group::Group, state_machine::GroupState};
|
||||
use mls_crypto::identity::random_identity;
|
||||
use mls_crypto::openmls_provider::MlsProvider;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_state_machine_transitions() {
|
||||
let crypto = MlsProvider::default();
|
||||
let mut id_steward = random_identity().expect("Failed to create identity");
|
||||
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
true,
|
||||
Some(&crypto),
|
||||
Some(id_steward.signer()),
|
||||
Some(&id_steward.credential_with_key()),
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Initial state should be Working
|
||||
assert_eq!(group.get_state().await, GroupState::Working);
|
||||
|
||||
// Test start_steward_epoch_with_validation
|
||||
let proposal_count = group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(proposal_count, 0); // No proposals initially
|
||||
assert_eq!(group.get_state().await, GroupState::Working); // Should stay in Working
|
||||
|
||||
// Add some proposals
|
||||
let kp_user = id_steward
|
||||
.generate_key_package(&crypto)
|
||||
.expect("Failed to generate key package");
|
||||
let _ = group
|
||||
.store_invite_proposal(Box::new(kp_user))
|
||||
.await
|
||||
.expect("Failed to store proposal");
|
||||
|
||||
// Now start steward epoch with proposals
|
||||
let proposal_count = group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(proposal_count, 1); // Should have 1 proposal
|
||||
assert_eq!(group.get_state().await, GroupState::Waiting);
|
||||
|
||||
// Test start_voting_with_validation
|
||||
group.start_voting().await.expect("Failed to start voting");
|
||||
assert_eq!(group.get_state().await, GroupState::Voting);
|
||||
|
||||
// Test complete_voting with success
|
||||
group
|
||||
.complete_voting(true)
|
||||
.await
|
||||
.expect("Failed to complete voting");
|
||||
assert_eq!(group.get_state().await, GroupState::ConsensusReached);
|
||||
|
||||
// Test start_waiting_after_consensus
|
||||
group
|
||||
.start_waiting_after_consensus()
|
||||
.await
|
||||
.expect("Failed to start waiting after consensus");
|
||||
assert_eq!(group.get_state().await, GroupState::Waiting);
|
||||
|
||||
// Test apply_proposals_and_complete
|
||||
group
|
||||
.handle_yes_vote()
|
||||
.await
|
||||
.expect("Failed to apply proposals");
|
||||
assert_eq!(group.get_state().await, GroupState::Working);
|
||||
assert_eq!(group.get_pending_proposals_count().await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_state_transitions() {
|
||||
let crypto = MlsProvider::default();
|
||||
let mut id_steward = random_identity().expect("Failed to create identity");
|
||||
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
true,
|
||||
Some(&crypto),
|
||||
Some(id_steward.signer()),
|
||||
Some(&id_steward.credential_with_key()),
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Cannot complete voting from Working state
|
||||
let result = group.complete_voting(true).await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
|
||||
// Cannot apply proposals from Working state
|
||||
let result = group.handle_yes_vote().await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
|
||||
// Start steward epoch - but there are no proposals, so it should stay in Working state
|
||||
let proposal_count = group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(proposal_count, 0); // No proposals
|
||||
assert_eq!(group.get_state().await, GroupState::Working); // Should still be in Working state
|
||||
|
||||
// Cannot apply proposals from Working state (even after steward epoch start with no proposals)
|
||||
let result = group.handle_yes_vote().await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
|
||||
// Add a proposal to actually transition to Waiting state
|
||||
let kp_user = id_steward
|
||||
.generate_key_package(&crypto)
|
||||
.expect("Failed to generate key package");
|
||||
let _ = group
|
||||
.store_invite_proposal(Box::new(kp_user))
|
||||
.await
|
||||
.expect("Failed to store proposal");
|
||||
|
||||
// Now start steward epoch with proposals - should transition to Waiting
|
||||
let proposal_count = group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(proposal_count, 1); // Should have 1 proposal
|
||||
assert_eq!(group.get_state().await, GroupState::Waiting); // Should now be in Waiting state
|
||||
|
||||
// Can apply proposals from Waiting state (even with no proposals)
|
||||
let result = group.handle_yes_vote().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_proposal_counting() {
|
||||
let crypto = MlsProvider::default();
|
||||
let id_steward = random_identity().expect("Failed to create identity");
|
||||
let mut id_user = random_identity().expect("Failed to create identity");
|
||||
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
true,
|
||||
Some(&crypto),
|
||||
Some(id_steward.signer()),
|
||||
Some(&id_steward.credential_with_key()),
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Add some proposals
|
||||
let kp_user = id_user
|
||||
.generate_key_package(&crypto)
|
||||
.expect("Failed to generate key package");
|
||||
|
||||
let _ = group
|
||||
.store_invite_proposal(Box::new(kp_user.clone()))
|
||||
.await
|
||||
.expect("Failed to store proposal");
|
||||
let _ = group
|
||||
.store_remove_proposal("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266".to_string())
|
||||
.await
|
||||
.expect("Failed to put remove proposal");
|
||||
|
||||
// Start steward epoch - should collect proposals
|
||||
let proposal_count = group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
assert_eq!(proposal_count, 2); // Should have 2 proposals
|
||||
assert_eq!(group.get_state().await, GroupState::Waiting);
|
||||
assert_eq!(group.get_voting_proposals_count().await, 2);
|
||||
|
||||
// Complete the flow
|
||||
group.start_voting().await.expect("Failed to start voting");
|
||||
group
|
||||
.complete_voting(true)
|
||||
.await
|
||||
.expect("Failed to complete voting");
|
||||
group
|
||||
.handle_yes_vote()
|
||||
.await
|
||||
.expect("Failed to apply proposals");
|
||||
|
||||
// Proposals count should be reset
|
||||
assert_eq!(group.get_voting_proposals_count().await, 0);
|
||||
assert_eq!(group.get_pending_proposals_count().await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_steward_validation() {
|
||||
let _crypto = MlsProvider::default();
|
||||
let _id_steward = random_identity().expect("Failed to create identity");
|
||||
|
||||
// Create group without steward
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
false, // No steward
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Should fail to start steward epoch without steward
|
||||
let result = group.start_steward_epoch_with_validation().await;
|
||||
assert!(matches!(result, Err(GroupError::StewardNotSet)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_consensus_result_handling() {
|
||||
let crypto = MlsProvider::default();
|
||||
let id_steward = random_identity().expect("Failed to create identity");
|
||||
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
true,
|
||||
Some(&crypto),
|
||||
Some(id_steward.signer()),
|
||||
Some(&id_steward.credential_with_key()),
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Start steward epoch and voting
|
||||
group
|
||||
.start_steward_epoch_with_validation()
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
group.start_voting().await.expect("Failed to start voting");
|
||||
|
||||
// Test consensus result handling for steward
|
||||
let result = group.complete_voting(true).await;
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(group.get_state().await, GroupState::ConsensusReached);
|
||||
|
||||
// Test invalid consensus result handling (not in voting state)
|
||||
let result = group.complete_voting(true).await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_voting_validation_edge_cases() {
|
||||
let _crypto = MlsProvider::default();
|
||||
let _id_steward = random_identity().expect("Failed to create identity");
|
||||
|
||||
let mut group = Group::new(
|
||||
"test_group",
|
||||
true,
|
||||
Some(&_crypto),
|
||||
Some(_id_steward.signer()),
|
||||
Some(&_id_steward.credential_with_key()),
|
||||
)
|
||||
.expect("Failed to create group");
|
||||
|
||||
// Test starting voting from Working state (should transition to Waiting first)
|
||||
group.start_voting().await.expect("Failed to start voting");
|
||||
assert_eq!(group.get_state().await, GroupState::Voting);
|
||||
|
||||
// Test starting voting from Voting state (should fail)
|
||||
let result = group.start_voting().await;
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(GroupError::InvalidStateTransition { .. })
|
||||
));
|
||||
}
|
||||
@@ -1,224 +1,714 @@
|
||||
use de_mls::{
|
||||
consensus::ConsensusService,
|
||||
protos::de_mls::messages::v1::app_message,
|
||||
state_machine::GroupState,
|
||||
user::{User, UserAction},
|
||||
ws_actor::{RawWsMessage, UserMessage, WsAction},
|
||||
};
|
||||
use ds::transport::{InboundPacket, OutboundPacket};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_admin_message_flow() {
|
||||
let group_name = "new_group".to_string();
|
||||
const EXPECTED_EPOCH_1: u64 = 1;
|
||||
const EXPECTED_EPOCH_2: u64 = 2;
|
||||
|
||||
let alice_priv_key = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
|
||||
let res = User::new(alice_priv_key);
|
||||
assert!(res.is_ok(), "Failed to create user");
|
||||
let mut alice = res.unwrap();
|
||||
assert!(
|
||||
alice.create_group(group_name.clone(), true).await.is_ok(),
|
||||
"Failed to create group"
|
||||
const EXPECTED_MEMBERS_2: usize = 2;
|
||||
const EXPECTED_MEMBERS_3: usize = 3;
|
||||
|
||||
const ALICE_PRIVATE_KEY: &str =
|
||||
"0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80";
|
||||
// const ALICE_WALLET_ADDRESS: &str = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266";
|
||||
const BOB_PRIVATE_KEY: &str = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
|
||||
// const BOB_WALLET_ADDRESS: &str = "0x70997970C51812dc3A010C7d01b50e0d17dc79C8";
|
||||
const CAROL_PRIVATE_KEY: &str =
|
||||
"0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a";
|
||||
// const CAROL_WALLET_ADDRESS: &str = "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC";
|
||||
|
||||
const GROUP_NAME: &str = "new_group";
|
||||
|
||||
fn outbound_to_inbound(pkt: OutboundPacket) -> InboundPacket {
|
||||
InboundPacket {
|
||||
payload: pkt.payload,
|
||||
subtopic: pkt.subtopic,
|
||||
group_id: pkt.group_id,
|
||||
app_id: pkt.app_id,
|
||||
timestamp: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_two_test_user_with_group(group_name: &str) -> (User, User) {
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut alice =
|
||||
User::new(ALICE_PRIVATE_KEY, &consensus_service).expect("Failed to create user for Alice");
|
||||
alice
|
||||
.create_group(group_name, true)
|
||||
.await
|
||||
.expect("Failed to create group for Alice");
|
||||
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut bob =
|
||||
User::new(BOB_PRIVATE_KEY, &consensus_service).expect("Failed to create user for Bob");
|
||||
bob.create_group(group_name, false)
|
||||
.await
|
||||
.expect("Failed to create group for Bob");
|
||||
|
||||
(alice, bob)
|
||||
}
|
||||
|
||||
async fn create_three_test_user_with_group(group_name: &str) -> (User, User, User) {
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut alice =
|
||||
User::new(ALICE_PRIVATE_KEY, &consensus_service).expect("Failed to create user");
|
||||
alice
|
||||
.create_group(group_name, true)
|
||||
.await
|
||||
.expect("Failed to create group for Alice");
|
||||
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut bob = User::new(BOB_PRIVATE_KEY, &consensus_service).expect("Failed to create user");
|
||||
bob.create_group(group_name, false)
|
||||
.await
|
||||
.expect("Failed to create group for Bob");
|
||||
|
||||
let consensus_service = ConsensusService::new();
|
||||
let mut carol =
|
||||
User::new(CAROL_PRIVATE_KEY, &consensus_service).expect("Failed to create user");
|
||||
carol
|
||||
.create_group(group_name, false)
|
||||
.await
|
||||
.expect("Failed to create group for Carol");
|
||||
|
||||
(alice, bob, carol)
|
||||
}
|
||||
|
||||
async fn get_group_announcement_message(steward: &mut User, group_name: &str) -> InboundPacket {
|
||||
let pkt = steward
|
||||
.prepare_steward_msg(group_name)
|
||||
.await
|
||||
.expect("Failed to prepare steward message");
|
||||
outbound_to_inbound(pkt)
|
||||
}
|
||||
|
||||
async fn share_group_announcement_for_one_user(
|
||||
steward: &mut User,
|
||||
invite_user: &mut User,
|
||||
group_announcement_message: InboundPacket,
|
||||
) {
|
||||
// Newcomer parse GA message and share his KP to Steward
|
||||
let invite_user_kp_message = match invite_user
|
||||
.process_inbound_packet(group_announcement_message.clone())
|
||||
.await
|
||||
.expect("Failed to process waku message with group announcement")
|
||||
{
|
||||
UserAction::Outbound(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
let invite_user_kp_inbound = outbound_to_inbound(invite_user_kp_message);
|
||||
|
||||
// Steward parse invite user's KP and add it to the queue of income key packages
|
||||
let _steward_action = steward
|
||||
.process_inbound_packet(invite_user_kp_inbound)
|
||||
.await
|
||||
.expect("Failed to process waku message with invite user's KP");
|
||||
}
|
||||
|
||||
// In this function, we are starting steward epoch without any user in the group
|
||||
// So as result we don't expect another vote from another user
|
||||
// and have `GroupState::Working` after processing steward vote
|
||||
async fn steward_epoch_without_user_in_group(
|
||||
steward: &mut User,
|
||||
group_name: &str,
|
||||
) -> Vec<InboundPacket> {
|
||||
// Set up consensus event subscription before voting
|
||||
let mut consensus_events = steward.subscribe_to_consensus_events();
|
||||
|
||||
// State machine: start steward epoch, voting, complete voting
|
||||
let steward_epoch_proposals = steward
|
||||
.start_steward_epoch(group_name)
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
|
||||
println!("Debug: Steward epoch returned {steward_epoch_proposals} proposals");
|
||||
|
||||
let (proposal_id, action) = steward
|
||||
.get_proposals_for_steward_voting(group_name)
|
||||
.await
|
||||
.expect("Failed to start voting");
|
||||
|
||||
println!("Debug: Proposal ID: {proposal_id}");
|
||||
|
||||
// This message will be printed to the app and allow steward to vote
|
||||
let _steward_voting_proposal_app_message = match action {
|
||||
UserAction::SendToApp(app_msg) => app_msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
|
||||
let steward_state = steward
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for steward after making voting proposal");
|
||||
println!("Debug: Steward state after making voting proposal: {steward_state:?}");
|
||||
assert_eq!(steward_state, GroupState::Voting);
|
||||
|
||||
// Now steward can vote
|
||||
steward
|
||||
.process_user_vote(proposal_id, true, group_name)
|
||||
.await
|
||||
.expect("Failed to process steward vote on proposal");
|
||||
|
||||
let mut msgs_to_send: Vec<OutboundPacket> = vec![];
|
||||
// Process any consensus events that were emitted during voting
|
||||
while let Ok((group_name, ev)) = consensus_events.try_recv() {
|
||||
println!(
|
||||
"Debug: Processing consensus event in steward_epoch: {ev:?} for group {group_name}"
|
||||
);
|
||||
|
||||
let wmts = steward
|
||||
.handle_consensus_event(&group_name, ev)
|
||||
.await
|
||||
.expect("Failed to handle consensus event");
|
||||
msgs_to_send.extend(wmts);
|
||||
}
|
||||
|
||||
let steward_state = steward
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for steward after voting");
|
||||
println!("Debug: Steward state after voting: {steward_state:?}");
|
||||
assert_eq!(steward_state, GroupState::Working);
|
||||
|
||||
msgs_to_send.into_iter().map(outbound_to_inbound).collect()
|
||||
}
|
||||
|
||||
// In this function, we are starting steward epoch with users in the group
|
||||
// So as result we expect another vote from another user
|
||||
// and have `GroupState::Working` after processing steward vote
|
||||
async fn steward_epoch_with_user_in_group(
|
||||
steward: &mut User,
|
||||
group_name: &str,
|
||||
) -> (Vec<InboundPacket>, u32) {
|
||||
// Set up consensus event subscription before voting
|
||||
let mut consensus_events = steward.subscribe_to_consensus_events();
|
||||
|
||||
// State machine: start steward epoch, voting, complete voting
|
||||
let steward_epoch_proposals = steward
|
||||
.start_steward_epoch(group_name)
|
||||
.await
|
||||
.expect("Failed to start steward epoch");
|
||||
|
||||
println!("Debug: Steward epoch returned {steward_epoch_proposals} proposals");
|
||||
|
||||
let (proposal_id, action) = steward
|
||||
.get_proposals_for_steward_voting(group_name)
|
||||
.await
|
||||
.expect("Failed to start voting");
|
||||
|
||||
println!("Debug: Proposal ID: {proposal_id}");
|
||||
|
||||
// This message will be printed to the app and allow steward to vote
|
||||
let _steward_voting_proposal_app_message = match action {
|
||||
UserAction::SendToApp(app_msg) => app_msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
|
||||
let steward_state = steward
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for steward after making voting proposal");
|
||||
println!("Debug: Steward state after making voting proposal: {steward_state:?}");
|
||||
assert_eq!(steward_state, GroupState::Voting);
|
||||
|
||||
// Now steward can vote
|
||||
let steward_action = steward
|
||||
.process_user_vote(proposal_id, true, group_name)
|
||||
.await
|
||||
.expect("Failed to process steward vote on proposal");
|
||||
|
||||
let mut msgs_to_send: Vec<OutboundPacket> = vec![];
|
||||
// Process any consensus events that were emitted during voting
|
||||
while let Ok((group_name, ev)) = consensus_events.try_recv() {
|
||||
println!(
|
||||
"Debug: Processing consensus event in steward_epoch: {ev:?} for group {group_name}"
|
||||
);
|
||||
|
||||
let wmts = steward
|
||||
.handle_consensus_event(&group_name, ev)
|
||||
.await
|
||||
.expect("Failed to handle consensus event");
|
||||
msgs_to_send.extend(wmts);
|
||||
}
|
||||
|
||||
let steward_state = steward
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for steward after voting");
|
||||
println!("Debug: Steward state after voting: {steward_state:?}");
|
||||
assert_eq!(steward_state, GroupState::Voting);
|
||||
|
||||
let steward_voting_proposal_outbound = match steward_action {
|
||||
UserAction::Outbound(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
|
||||
// Build the voting proposal message for other users
|
||||
let voting_proposal_inbound = outbound_to_inbound(steward_voting_proposal_outbound);
|
||||
|
||||
(vec![voting_proposal_inbound], proposal_id)
|
||||
}
|
||||
|
||||
async fn user_join_group(user: &mut User, welcome_message: InboundPacket) {
|
||||
let user_res_action = user
|
||||
.process_inbound_packet(welcome_message)
|
||||
.await
|
||||
.expect("Failed to process waku message for user to join the group");
|
||||
|
||||
match user_res_action {
|
||||
UserAction::Outbound(_) => {
|
||||
println!("Debug: user join group");
|
||||
}
|
||||
_ => panic!("User action is not SendToWaku: {user_res_action:?}"),
|
||||
};
|
||||
}
|
||||
|
||||
async fn user_vote_on_proposal(
|
||||
user: &mut User,
|
||||
proposal_message: InboundPacket,
|
||||
vote: bool,
|
||||
group_name: &str,
|
||||
) -> InboundPacket {
|
||||
println!("Debug: user vote on proposal: {proposal_message:?}");
|
||||
let mut consensus_events = user.subscribe_to_consensus_events();
|
||||
let user_action = user
|
||||
.process_inbound_packet(proposal_message)
|
||||
.await
|
||||
.expect("Failed to process waku message for user to vote on proposal");
|
||||
|
||||
let msg = match user_action {
|
||||
UserAction::SendToApp(msg) => {
|
||||
println!("Debug: user got message: {msg:?}");
|
||||
msg
|
||||
}
|
||||
_ => panic!("User action is not SendToApp or DoNothing: {user_action:?}"),
|
||||
};
|
||||
|
||||
let user_state = user
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for user after making voting proposal");
|
||||
println!("Debug: User state after making voting proposal: {user_state:?}");
|
||||
assert_eq!(user_state, GroupState::Voting);
|
||||
|
||||
let proposal_id = match msg.payload {
|
||||
Some(app_message::Payload::VotePayload(vote_payload)) => vote_payload.proposal_id,
|
||||
_ => panic!("User got an unexpected message: {msg:?}",),
|
||||
};
|
||||
|
||||
// after getting voting proposal, user actually should send it into app and get vote result
|
||||
// here we mock it and start to process user vote
|
||||
let user_action = user
|
||||
.process_user_vote(proposal_id, vote, group_name)
|
||||
.await
|
||||
.expect("Failed to process steward vote on proposal");
|
||||
|
||||
let mut msgs_to_send: Vec<OutboundPacket> = vec![];
|
||||
while let Ok((group_name, ev)) = consensus_events.try_recv() {
|
||||
println!(
|
||||
"Debug: Processing consensus event in user_vote_on_proposal: {ev:?} for group {group_name}"
|
||||
);
|
||||
|
||||
let wmts = user
|
||||
.handle_consensus_event(&group_name, ev)
|
||||
.await
|
||||
.expect("Failed to handle consensus event");
|
||||
msgs_to_send.extend(wmts);
|
||||
}
|
||||
|
||||
let user_state = user
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for user after vote");
|
||||
println!("Debug: User state after vote: {user_state:?}");
|
||||
assert_eq!(user_state, GroupState::Waiting);
|
||||
|
||||
let msg = match user_action {
|
||||
UserAction::Outbound(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku: {user_action:?}"),
|
||||
};
|
||||
|
||||
outbound_to_inbound(msg)
|
||||
}
|
||||
|
||||
async fn process_and_handle_trigger_event_message(
|
||||
user: &mut User,
|
||||
inbound_message: InboundPacket,
|
||||
expected_group_state: GroupState,
|
||||
) -> Vec<InboundPacket> {
|
||||
let mut consensus_events = user.subscribe_to_consensus_events();
|
||||
|
||||
user.process_inbound_packet(inbound_message)
|
||||
.await
|
||||
.expect("Failed to process waku message");
|
||||
|
||||
let mut msgs_to_send: Vec<OutboundPacket> = vec![];
|
||||
// Process any consensus events that were emitted during voting
|
||||
while let Ok((group_name, ev)) = consensus_events.try_recv() {
|
||||
println!(
|
||||
"Debug: Processing consensus event in steward_epoch: {ev:?} for group {group_name}"
|
||||
);
|
||||
|
||||
let wmts = user
|
||||
.handle_consensus_event(&group_name, ev)
|
||||
.await
|
||||
.expect("Failed to handle consensus event");
|
||||
msgs_to_send.extend(wmts);
|
||||
}
|
||||
|
||||
let waku_msgs_to_send: Vec<InboundPacket> =
|
||||
msgs_to_send.into_iter().map(outbound_to_inbound).collect();
|
||||
|
||||
let user_state = user
|
||||
.get_group_state(GROUP_NAME)
|
||||
.await
|
||||
.expect("Failed to get group state for user after processing trigger event message");
|
||||
println!("Debug: User state after voting: {user_state:?}");
|
||||
assert_eq!(user_state, expected_group_state);
|
||||
|
||||
waku_msgs_to_send
|
||||
}
|
||||
|
||||
async fn check_users_have_same_group_stats(
|
||||
alice: &User,
|
||||
bob: &User,
|
||||
group_name: &str,
|
||||
expected_members: usize,
|
||||
expected_epoch: u64,
|
||||
) {
|
||||
let alice_group_state = alice
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for Alice");
|
||||
assert_eq!(alice_group_state, GroupState::Working);
|
||||
let bob_group_state = bob
|
||||
.get_group_state(group_name)
|
||||
.await
|
||||
.expect("Failed to get group state for Bob");
|
||||
assert_eq!(bob_group_state, GroupState::Working);
|
||||
|
||||
let bob_members = bob
|
||||
.get_group_number_of_members(group_name)
|
||||
.await
|
||||
.expect("Failed to get number of members for Bob");
|
||||
let bob_epoch = bob
|
||||
.get_group_mls_epoch(group_name)
|
||||
.await
|
||||
.expect("Failed to get MLS epoch for Bob");
|
||||
assert_eq!(
|
||||
bob_members, expected_members,
|
||||
"Wrong number of members in the group for Bob"
|
||||
);
|
||||
assert_eq!(
|
||||
bob_epoch, expected_epoch,
|
||||
"Bob group epoch is not {expected_epoch}"
|
||||
);
|
||||
|
||||
let res = alice.get_group(group_name.clone());
|
||||
assert!(res.is_ok(), "Failed to get group");
|
||||
let alice_group = res.unwrap();
|
||||
let alice_members = alice
|
||||
.get_group_number_of_members(group_name)
|
||||
.await
|
||||
.expect("Failed to get number of members for Alice");
|
||||
let alice_epoch = alice
|
||||
.get_group_mls_epoch(group_name)
|
||||
.await
|
||||
.expect("Failed to get MLS epoch for Alice");
|
||||
assert_eq!(
|
||||
alice_group.is_mls_group_initialized(),
|
||||
alice_members, expected_members,
|
||||
"Wrong number of members in the group for Alice"
|
||||
);
|
||||
assert_eq!(
|
||||
alice_epoch, expected_epoch,
|
||||
"Alice group epoch is not {expected_epoch}"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
bob_members, alice_members,
|
||||
"Bob and Alice have different members"
|
||||
);
|
||||
assert_eq!(
|
||||
bob_epoch, alice_epoch,
|
||||
"Bob and Alice have different MLS epochs"
|
||||
);
|
||||
}
|
||||
|
||||
// async fn create_ban_request_message(user: &mut User, group_name: &str) -> WakuMessage {
|
||||
// let ban_request_msg = BanRequest {
|
||||
// user_to_ban: CAROL_WALLET_ADDRESS.to_string(),
|
||||
// requester: ALICE_WALLET_ADDRESS.to_string(), // The current user is the requester
|
||||
// group_name: group_name.to_string(),
|
||||
// };
|
||||
|
||||
// let waku_msg = user
|
||||
// .process_ban_request(ban_request_msg, group_name)
|
||||
// .await
|
||||
// .expect("Failed to process ban request");
|
||||
|
||||
// let waku_msg = waku_msg
|
||||
// .build_waku_message()
|
||||
// .expect("Failed to build waku message");
|
||||
// waku_msg
|
||||
// }
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invite_user_to_group_flow() {
|
||||
let (mut steward, mut user) = create_two_test_user_with_group(GROUP_NAME).await;
|
||||
|
||||
let ga_message = get_group_announcement_message(&mut steward, GROUP_NAME).await;
|
||||
|
||||
share_group_announcement_for_one_user(&mut steward, &mut user, ga_message.clone()).await;
|
||||
|
||||
let steward_epoch_messages =
|
||||
steward_epoch_without_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
|
||||
user_join_group(&mut user, steward_epoch_messages[1].clone()).await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&user,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_2,
|
||||
EXPECTED_EPOCH_1,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invite_users_to_group_same_epoch_flow() {
|
||||
let (mut steward, mut user, mut user2) = create_three_test_user_with_group(GROUP_NAME).await;
|
||||
|
||||
let ga_message = get_group_announcement_message(&mut steward, GROUP_NAME).await;
|
||||
|
||||
share_group_announcement_for_one_user(&mut steward, &mut user, ga_message.clone()).await;
|
||||
share_group_announcement_for_one_user(&mut steward, &mut user2, ga_message.clone()).await;
|
||||
|
||||
let steward_epoch_messages =
|
||||
steward_epoch_without_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
|
||||
user_join_group(&mut user, steward_epoch_messages[1].clone()).await;
|
||||
user_join_group(&mut user2, steward_epoch_messages[1].clone()).await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&user,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_1,
|
||||
)
|
||||
.await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&user2,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_1,
|
||||
)
|
||||
.await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&user,
|
||||
&user2,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_1,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invite_users_to_group_different_epoch_flow() {
|
||||
let (mut steward, mut bob, mut carol) = create_three_test_user_with_group(GROUP_NAME).await;
|
||||
|
||||
let ga_message = get_group_announcement_message(&mut steward, GROUP_NAME).await;
|
||||
share_group_announcement_for_one_user(&mut steward, &mut bob, ga_message.clone()).await;
|
||||
|
||||
let steward_epoch_messages =
|
||||
steward_epoch_without_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
user_join_group(&mut bob, steward_epoch_messages[1].clone()).await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&bob,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_2,
|
||||
EXPECTED_EPOCH_1,
|
||||
)
|
||||
.await;
|
||||
|
||||
println!("START NEW EPOCH");
|
||||
println!("--------------------------------");
|
||||
|
||||
let ga_message_2 = get_group_announcement_message(&mut steward, GROUP_NAME).await;
|
||||
share_group_announcement_for_one_user(&mut steward, &mut carol, ga_message_2.clone()).await;
|
||||
|
||||
let (steward_epoch_messages_2, _) =
|
||||
steward_epoch_with_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
|
||||
println!("Debug: steward vote, wait for user vote");
|
||||
println!("--------------------------------");
|
||||
|
||||
let waku_vote_message = user_vote_on_proposal(
|
||||
&mut bob,
|
||||
steward_epoch_messages_2[0].clone(),
|
||||
true,
|
||||
"MLS group is notinitialized"
|
||||
);
|
||||
GROUP_NAME,
|
||||
)
|
||||
.await;
|
||||
|
||||
let bob_priv_key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80";
|
||||
let res = User::new(bob_priv_key);
|
||||
assert!(res.is_ok(), "Failed to create user");
|
||||
let mut bob = res.unwrap();
|
||||
assert!(
|
||||
bob.create_group(group_name.clone(), false).await.is_ok(),
|
||||
"Failed to create group"
|
||||
);
|
||||
let waku_msgs_to_send = process_and_handle_trigger_event_message(
|
||||
&mut steward,
|
||||
waku_vote_message,
|
||||
GroupState::Working,
|
||||
)
|
||||
.await;
|
||||
|
||||
let res = bob.get_group(group_name.clone());
|
||||
assert!(res.is_ok(), "Failed to get group");
|
||||
let bob_group = res.unwrap();
|
||||
assert_eq!(
|
||||
bob_group.is_mls_group_initialized(),
|
||||
false,
|
||||
"MLS group is initialized"
|
||||
);
|
||||
bob.process_inbound_packet(waku_msgs_to_send[0].clone())
|
||||
.await
|
||||
.expect("Failed to process waku message");
|
||||
|
||||
let _ = join_group_flow(&mut alice, &mut bob, group_name.clone()).await;
|
||||
user_join_group(&mut carol, waku_msgs_to_send[1].clone()).await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&bob,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_2,
|
||||
)
|
||||
.await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&steward,
|
||||
&carol,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_2,
|
||||
)
|
||||
.await;
|
||||
|
||||
check_users_have_same_group_stats(
|
||||
&bob,
|
||||
&carol,
|
||||
GROUP_NAME,
|
||||
EXPECTED_MEMBERS_3,
|
||||
EXPECTED_EPOCH_2,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn join_group_flow(alice: &mut User, bob: &mut User, group_name: String) -> UserAction {
|
||||
// Alice send Group Announcement msg to Bob
|
||||
let res = alice.prepare_admin_msg(group_name.clone()).await;
|
||||
assert!(res.is_ok(), "Failed to prepare admin message");
|
||||
let alice_ga_msg = res.unwrap();
|
||||
// #[tokio::test]
|
||||
// async fn test_remove_user_flow_request_from_steward() {
|
||||
// let (mut steward, mut bob, mut carol) = create_three_test_user_with_group(GROUP_NAME).await;
|
||||
|
||||
let res = alice_ga_msg.build_waku_message();
|
||||
assert!(res.is_ok(), "Failed to build waku message");
|
||||
let waku_ga_message = res.unwrap();
|
||||
// let ga_message = get_group_announcement_message(&mut steward, GROUP_NAME).await;
|
||||
|
||||
// Bob receives the Group Announcement msg and send Key Package Share msg to Alice
|
||||
let res = bob.process_waku_msg(waku_ga_message).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
let user_action = res.unwrap();
|
||||
assert!(user_action.len() == 1, "User action is not a single action");
|
||||
// share_group_announcement_for_one_user(&mut steward, &mut bob, ga_message.clone()).await;
|
||||
// share_group_announcement_for_one_user(&mut steward, &mut carol, ga_message.clone()).await;
|
||||
|
||||
let bob_kp_message = match user_action[0].clone() {
|
||||
UserAction::SendToWaku(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
let res = bob_kp_message.build_waku_message();
|
||||
assert!(res.is_ok(), "Failed to build waku message");
|
||||
let waku_kp_message = res.unwrap();
|
||||
// let steward_epoch_messages =
|
||||
// steward_epoch_without_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
|
||||
// Alice receives the Key Package Share msg and send Welcome msg to Bob
|
||||
let res = alice.process_waku_msg(waku_kp_message).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
let user_action_invite = res.unwrap();
|
||||
assert!(
|
||||
user_action_invite.len() == 2,
|
||||
"User action is not a two actions"
|
||||
);
|
||||
// user_join_group(&mut bob, steward_epoch_messages[1].clone()).await;
|
||||
// user_join_group(&mut carol, steward_epoch_messages[1].clone()).await;
|
||||
|
||||
let alice_welcome_message = match user_action_invite[1].clone() {
|
||||
UserAction::SendToWaku(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
let res = alice_welcome_message.build_waku_message();
|
||||
assert!(res.is_ok(), "Failed to build waku message");
|
||||
let waku_welcome_message = res.unwrap();
|
||||
// check_users_have_same_group_stats(
|
||||
// &steward,
|
||||
// &bob,
|
||||
// GROUP_NAME,
|
||||
// EXPECTED_MEMBERS_3,
|
||||
// EXPECTED_EPOCH_1,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
// Bob receives the Welcome msg and join the group
|
||||
let res = bob.process_waku_msg(waku_welcome_message).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
let user_action = res.unwrap();
|
||||
assert!(user_action.len() == 1, "User action is not a single action");
|
||||
let bob_group = bob.get_group(group_name.clone()).unwrap();
|
||||
assert!(
|
||||
bob_group.is_mls_group_initialized(),
|
||||
"MLS group is not initialized"
|
||||
);
|
||||
// check_users_have_same_group_stats(
|
||||
// &steward,
|
||||
// &carol,
|
||||
// GROUP_NAME,
|
||||
// EXPECTED_MEMBERS_3,
|
||||
// EXPECTED_EPOCH_1,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
user_action_invite[0].clone()
|
||||
}
|
||||
// check_users_have_same_group_stats(
|
||||
// &bob,
|
||||
// &carol,
|
||||
// GROUP_NAME,
|
||||
// EXPECTED_MEMBERS_3,
|
||||
// EXPECTED_EPOCH_1,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_remove_user_flow() {
|
||||
let group_name = "new_group".to_string();
|
||||
// let ban_request_message = create_ban_request_message(&mut steward, GROUP_NAME).await;
|
||||
|
||||
let alice_priv_key = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d";
|
||||
let mut alice = User::new(alice_priv_key).unwrap();
|
||||
alice.create_group(group_name.clone(), true).await.unwrap();
|
||||
// let action = bob
|
||||
// .process_waku_message(ban_request_message.clone())
|
||||
// .await
|
||||
// .expect("Failed to process ban request");
|
||||
|
||||
let bob_priv_key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80";
|
||||
let mut bob = User::new(bob_priv_key).unwrap();
|
||||
bob.create_group(group_name.clone(), false).await.unwrap();
|
||||
// match action {
|
||||
// UserAction::SendToApp(_) => {
|
||||
// println!("Debug: SendToApp action");
|
||||
// }
|
||||
// _ => panic!("Expected SendToApp action"),
|
||||
// }
|
||||
|
||||
let carol_priv_key = "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a";
|
||||
let mut carol = User::new(carol_priv_key).unwrap();
|
||||
carol.create_group(group_name.clone(), false).await.unwrap();
|
||||
// let action = carol
|
||||
// .process_waku_message(ban_request_message.clone())
|
||||
// .await
|
||||
// .expect("Failed to process ban request");
|
||||
|
||||
let _ = join_group_flow(&mut alice, &mut bob, group_name.clone()).await;
|
||||
let res = bob.get_group(group_name.clone());
|
||||
assert!(res.is_ok(), "Failed to get group");
|
||||
let bob_group = res.unwrap();
|
||||
assert!(
|
||||
bob_group.is_mls_group_initialized(),
|
||||
"MLS group is not initialized"
|
||||
);
|
||||
// match action {
|
||||
// UserAction::SendToApp(_) => {
|
||||
// println!("Debug: SendToApp action");
|
||||
// }
|
||||
// _ => panic!("Expected SendToApp action"),
|
||||
// }
|
||||
|
||||
let commit_action = join_group_flow(&mut alice, &mut carol, group_name.clone()).await;
|
||||
let res = carol.get_group(group_name.clone());
|
||||
assert!(res.is_ok(), "Failed to get group");
|
||||
let carol_group = res.unwrap();
|
||||
assert!(
|
||||
carol_group.is_mls_group_initialized(),
|
||||
"MLS group is not initialized"
|
||||
);
|
||||
let pmt = match commit_action {
|
||||
UserAction::SendToWaku(msg) => msg,
|
||||
_ => panic!("User action is not SendToWaku"),
|
||||
};
|
||||
let commit_message = pmt.build_waku_message();
|
||||
assert!(commit_message.is_ok(), "Failed to build waku message");
|
||||
let waku_commit_message = commit_message.unwrap();
|
||||
// let (steward_epoch_messages, proposal_id) =
|
||||
// steward_epoch_with_user_in_group(&mut steward, GROUP_NAME).await;
|
||||
|
||||
let res = bob.process_waku_msg(waku_commit_message.clone()).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
// steward
|
||||
// .set_up_consensus_threshold_for_group(GROUP_NAME, proposal_id, 1f64)
|
||||
// .await
|
||||
// .expect("Can't setup threshold");
|
||||
|
||||
let raw_msg = RawWsMessage {
|
||||
message: serde_json::to_string(&UserMessage {
|
||||
message: "/ban f39fd6e51aad88f6f4ce6ab8827279cfffb92266".to_string(),
|
||||
group_id: group_name.clone(),
|
||||
})
|
||||
.unwrap(),
|
||||
};
|
||||
// println!("Debug: Bob vote");
|
||||
// let waku_vote_message = user_vote_on_proposal(
|
||||
// &mut bob,
|
||||
// steward_epoch_messages[0].clone(),
|
||||
// true,
|
||||
// GROUP_NAME,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
let ws_action = match serde_json::from_str(&raw_msg.message) {
|
||||
Ok(UserMessage { message, group_id }) => {
|
||||
let ws_action = if message.starts_with("/") {
|
||||
let mut tokens = message.split_whitespace();
|
||||
let ws = match tokens.next() {
|
||||
Some("/ban") => {
|
||||
let user_to_ban = tokens.next().unwrap();
|
||||
WsAction::RemoveUser(user_to_ban.to_string(), group_id.clone())
|
||||
}
|
||||
_ => {
|
||||
assert!(false, "Invalid user message");
|
||||
WsAction::DoNothing
|
||||
}
|
||||
};
|
||||
ws
|
||||
} else {
|
||||
WsAction::UserMessage(UserMessage { message, group_id })
|
||||
};
|
||||
ws_action
|
||||
}
|
||||
Err(_) => {
|
||||
assert!(false, "Failed to parse user message");
|
||||
WsAction::DoNothing
|
||||
}
|
||||
};
|
||||
assert_eq!(
|
||||
ws_action,
|
||||
WsAction::RemoveUser(
|
||||
"f39fd6e51aad88f6f4ce6ab8827279cfffb92266".to_string(),
|
||||
group_name.clone()
|
||||
)
|
||||
);
|
||||
// println!("Debug: Carol vote");
|
||||
// let waku_vote_message_2 = user_vote_on_proposal(
|
||||
// &mut carol,
|
||||
// steward_epoch_messages[0].clone(),
|
||||
// true,
|
||||
// GROUP_NAME,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
let pmt = match ws_action {
|
||||
WsAction::RemoveUser(user_to_ban, group_name) => {
|
||||
let res = alice
|
||||
.remove_users_from_group(vec![user_to_ban], group_name.clone())
|
||||
.await;
|
||||
assert!(res.is_ok(), "Failed to remove user from group");
|
||||
res.unwrap()
|
||||
}
|
||||
_ => panic!("User action is not RemoveUser"),
|
||||
};
|
||||
// println!("Debug: steward process bob vote");
|
||||
// let waku_msgs_to_send = process_and_handle_trigger_event_message(
|
||||
// &mut steward,
|
||||
// waku_vote_message,
|
||||
// GroupState::Voting,
|
||||
// )
|
||||
// .await;
|
||||
// println!("Debug: waku_msgs_to_send after bob vote: {waku_msgs_to_send:?}");
|
||||
|
||||
let commit_message = pmt.build_waku_message();
|
||||
assert!(commit_message.is_ok(), "Failed to build waku message");
|
||||
let waku_commit_message = commit_message.unwrap();
|
||||
|
||||
let res = carol.process_waku_msg(waku_commit_message.clone()).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
let carol_group = carol.get_group(group_name.clone()).unwrap();
|
||||
assert!(
|
||||
carol_group.members_identity().await.len() == 2,
|
||||
"Bob is not removed from the group"
|
||||
);
|
||||
|
||||
let res = bob.process_waku_msg(waku_commit_message.clone()).await;
|
||||
assert!(res.is_ok(), "Failed to process waku message");
|
||||
let user_action = res.unwrap();
|
||||
assert!(user_action.len() == 1, "User action is not a single action");
|
||||
assert_eq!(
|
||||
user_action[0].clone(),
|
||||
UserAction::RemoveGroup(group_name.clone()),
|
||||
"User action is not RemoveGroup"
|
||||
);
|
||||
let res = bob.leave_group(group_name.clone()).await;
|
||||
assert!(res.is_ok(), "Failed to leave group");
|
||||
assert_eq!(bob.if_group_exists(group_name.clone()), false);
|
||||
}
|
||||
// println!("Debug: steward process carol vote");
|
||||
// let waku_msgs_to_send_2 = process_and_handle_trigger_event_message(
|
||||
// &mut steward,
|
||||
// waku_vote_message_2,
|
||||
// GroupState::Working,
|
||||
// )
|
||||
// .await;
|
||||
// println!("Debug: waku_msgs_to_send_2 after carol vote: {waku_msgs_to_send_2:?}");
|
||||
// }
|
||||
|
||||
Reference in New Issue
Block a user