Compare commits
7 Commits
develop
...
waku-execu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
093a2c59ff | ||
|
|
42c304342f | ||
|
|
e366a02af6 | ||
|
|
dcffb907c8 | ||
|
|
c85e701638 | ||
|
|
e21ca88e4c | ||
|
|
dfb4de81ba |
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
* @Cofson @fbarbu15
|
||||
1
.github/workflows/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
.DS_Store
|
||||
75
.github/workflows/markdown-lint.yml
vendored
@@ -1,75 +0,0 @@
|
||||
name: markdown-linting
|
||||
|
||||
on:
|
||||
|
||||
pull_request:
|
||||
branches:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Validate Metadata And Markdown Lint
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
BASE="${{ github.event.pull_request.base.sha }}"
|
||||
HEAD="${{ github.event.pull_request.head.sha }}"
|
||||
|
||||
python3 scripts/lint_targets.py --base-sha "$BASE" --head-sha "$HEAD" --output .lint-targets.txt
|
||||
|
||||
if [ ! -s .lint-targets.txt ]; then
|
||||
echo "No non-raw markdown targets."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mapfile -t files < .lint-targets.txt
|
||||
|
||||
python3 scripts/validate_metadata.py --check
|
||||
npx markdownlint-cli2@0.12.1 --config .markdownlint.yaml "${files[@]}"
|
||||
|
||||
remark:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install
|
||||
|
||||
- name: Remark Linter
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
BASE="${{ github.event.pull_request.base.sha }}"
|
||||
HEAD="${{ github.event.pull_request.head.sha }}"
|
||||
|
||||
python3 scripts/lint_targets.py --base-sha "$BASE" --head-sha "$HEAD" --output .lint-targets.txt
|
||||
|
||||
if [ ! -s .lint-targets.txt ]; then
|
||||
echo "No non-raw markdown targets."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mapfile -t files < .lint-targets.txt
|
||||
npx remark --frail --no-stdout --rc-path .remarkrc.mjs "${files[@]}"
|
||||
4
.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
book
|
||||
node_modules
|
||||
docs/logos-lips.json
|
||||
docs/SUMMARY.md
|
||||
@@ -1,50 +0,0 @@
|
||||
# markdownlint rules intentionally relaxed for broad Markdown compatibility.
|
||||
# Keep semantics-focused checks enabled; disable mostly formatting/style-only checks.
|
||||
|
||||
# Unordered list style consistency.
|
||||
MD004: false
|
||||
|
||||
# Nested list indentation width (allows both 2-space and 4-space nesting).
|
||||
MD007: false
|
||||
|
||||
# Trailing spaces (permits hard line breaks and legacy formatting).
|
||||
MD009: false
|
||||
|
||||
# Multiple consecutive blank lines.
|
||||
MD012: false
|
||||
|
||||
# Line length.
|
||||
MD013: false
|
||||
|
||||
# Require blank lines around headings.
|
||||
MD022: false
|
||||
|
||||
# Duplicate heading text.
|
||||
MD024: false
|
||||
|
||||
# Multiple top-level headings in a document.
|
||||
MD025: false
|
||||
|
||||
# Trailing punctuation in headings.
|
||||
MD026: false
|
||||
|
||||
# Require blank lines around fenced code blocks.
|
||||
MD031: false
|
||||
|
||||
# Require blank lines around lists.
|
||||
MD032: false
|
||||
|
||||
# Inline HTML.
|
||||
MD033: false
|
||||
|
||||
# Emphasis used instead of a heading.
|
||||
MD036: false
|
||||
|
||||
# Code block style (fenced vs indented).
|
||||
MD046: false
|
||||
|
||||
# Files should end with a single newline.
|
||||
MD047: false
|
||||
|
||||
# Emphasis marker style (_ vs *).
|
||||
MD049: false
|
||||
@@ -1,3 +0,0 @@
|
||||
book/
|
||||
node_modules/
|
||||
.remarkrc.mjs
|
||||
@@ -1,79 +0,0 @@
|
||||
// Based on https://github.com/status-im/status-web/blob/07e85e1d9eddc10be47e19c11a3bc5b6d3919c47/apps/status.app/.remarkrc.mjs
|
||||
import remarkGfm from 'remark-gfm'
|
||||
import remarkPresetLintConsistent from 'remark-preset-lint-consistent'
|
||||
import remarkPresetLintRecommended from 'remark-preset-lint-recommended'
|
||||
|
||||
const disabledRecommended = new Set([
|
||||
'remark-lint:list-item-bullet-indent', // Do not force one bullet indentation style.
|
||||
'remark-lint:no-blockquote-without-marker', // Allow plain wrapped quotes in imported docs.
|
||||
'remark-lint:no-undefined-references', // Permit refs resolved by external tooling/contexts.
|
||||
'remark-lint:ordered-list-marker-style', // Allow mixed ordered marker styles.
|
||||
'remark-lint:hard-break-spaces', // Do not enforce trailing-space hard-break style.
|
||||
'remark-lint:final-newline', // Do not require final newline in every file.
|
||||
'remark-lint:no-shortcut-reference-link', // Allow shortcut reference links.
|
||||
'remark-lint:no-shortcut-reference-image', // Allow shortcut reference images.
|
||||
])
|
||||
|
||||
const disabledConsistent = new Set([
|
||||
'remark-lint:code-block-style', // Allow both fenced and indented code blocks.
|
||||
'remark-lint:list-item-content-indent', // Do not enforce one list content indent style.
|
||||
'remark-lint:ordered-list-marker-style', // Allow mixed ordered marker styles.
|
||||
'remark-lint:table-cell-padding', // Do not enforce fixed table cell padding style.
|
||||
'remark-lint:blockquote-indentation', // Allow mixed blockquote indentation styles.
|
||||
])
|
||||
|
||||
const filteredRecommended = {
|
||||
...remarkPresetLintRecommended,
|
||||
plugins: remarkPresetLintRecommended.plugins.filter((plugin) => {
|
||||
const entry = Array.isArray(plugin) ? plugin[0] : plugin
|
||||
const name = typeof entry === 'string' ? entry : entry?.name
|
||||
return !disabledRecommended.has(name)
|
||||
}),
|
||||
}
|
||||
|
||||
const filteredConsistent = {
|
||||
...remarkPresetLintConsistent,
|
||||
plugins: remarkPresetLintConsistent.plugins.filter((plugin) => {
|
||||
const entry = Array.isArray(plugin) ? plugin[0] : plugin
|
||||
const name = typeof entry === 'string' ? entry : entry?.name
|
||||
return !disabledConsistent.has(name)
|
||||
}),
|
||||
}
|
||||
|
||||
/** @type {Array<import('unified').Plugin | import('unified').Preset>} */
|
||||
const plugins = [
|
||||
remarkGfm,
|
||||
filteredConsistent,
|
||||
filteredRecommended,
|
||||
['remark-lint-no-html', false], // Allow inline HTML in legacy/spec docs.
|
||||
['remark-lint-file-extension', false], // Do not enforce file extension policy.
|
||||
['remark-lint-no-literal-urls', false], // Allow raw URLs for readability.
|
||||
['remark-lint-no-paragraph-content-indent', false], // Allow paragraph indentation variants.
|
||||
['remark-lint-maximum-heading-length', false], // Do not cap heading length.
|
||||
['remark-lint-maximum-line-length', false], // Do not cap line length.
|
||||
['remark-lint-ordered-list-marker-value', false], // Do not enforce sequential list markers.
|
||||
['remark-lint-unordered-list-marker-style', false], // Allow mixed unordered list markers.
|
||||
['remark-lint-table-pipe-alignment', false], // Do not enforce table pipe alignment.
|
||||
['remark-lint-heading-style', false], // Allow ATX/setext heading style mix.
|
||||
['remark-lint-first-heading-level', false], // Do not enforce first heading level.
|
||||
['remark-lint-list-item-indent', false], // Allow list item indentation variants.
|
||||
['remark-lint-list-item-spacing', false], // Allow flexible blank lines in lists.
|
||||
['remark-lint-heading-increment', false], // Do not require strict heading level increments.
|
||||
['remark-lint-no-duplicate-headings', false], // Allow duplicate headings across document.
|
||||
['remark-lint-no-duplicate-headings-in-section', false], // Allow duplicate headings in sections.
|
||||
['remark-lint-no-emphasis-as-heading', false], // Allow emphasized lines used as pseudo-headings.
|
||||
['remark-lint-emphasis-marker', false], // Allow mixed emphasis marker styles.
|
||||
]
|
||||
|
||||
/** @type {import('unified').Preset} */
|
||||
export default {
|
||||
settings: {
|
||||
emphasis: '_',
|
||||
bullet: '-',
|
||||
quote: '"',
|
||||
listItemIndent: 'one',
|
||||
rule: '-',
|
||||
incrementListMarker: false,
|
||||
},
|
||||
plugins,
|
||||
}
|
||||
59
Jenkinsfile
vendored
@@ -1,59 +0,0 @@
|
||||
#!/usr/bin/env groovy
|
||||
library 'status-jenkins-lib@v1.9.40'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
docker {
|
||||
label 'linuxcontainer'
|
||||
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
|
||||
args '--volume=/nix:/nix ' +
|
||||
'--volume=/etc/nix:/etc/nix ' +
|
||||
'--user jenkins'
|
||||
}
|
||||
}
|
||||
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '20',
|
||||
daysToKeepStr: '30',
|
||||
))
|
||||
}
|
||||
|
||||
environment {
|
||||
GIT_COMMITTER_NAME = 'status-im-auto'
|
||||
GIT_COMMITTER_EMAIL = 'auto@status.im'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
nix.develop('make build')
|
||||
jenkins.genBuildMetaJSON('book/build.json')
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Publish') {
|
||||
steps {
|
||||
sshagent(credentials: ['status-im-auto-ssh']) {
|
||||
script {
|
||||
nix.develop("""
|
||||
ghp-import \
|
||||
-b ${deployBranch()} \
|
||||
-c ${deployDomain()} \
|
||||
-p book
|
||||
""", pure: false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
cleanup { cleanWs() }
|
||||
}
|
||||
}
|
||||
|
||||
def isMasterBranch() { GIT_BRANCH ==~ /.*master/ }
|
||||
def deployBranch() { isMasterBranch() ? 'deploy-master' : 'deploy-develop' }
|
||||
def deployDomain() { isMasterBranch() ? 'lip.logos.co' : 'dev-lip.logos.co' }
|
||||
19
Makefile
@@ -1,19 +0,0 @@
|
||||
.PHONY: install lint build serve
|
||||
|
||||
MDBOOK_VERSION ?= 0.4.52
|
||||
|
||||
install:
|
||||
cargo install mdbook --version $(MDBOOK_VERSION)
|
||||
|
||||
serve:
|
||||
python scripts/run_runtime_generators.py
|
||||
mdbook serve -p 3000 -n 0.0.0.0
|
||||
|
||||
build:
|
||||
python scripts/run_runtime_generators.py
|
||||
mdbook build
|
||||
|
||||
lint:
|
||||
python scripts/validate_metadata.py --check
|
||||
npx markdownlint-cli2@0.12.1 "docs/**/*.md" --config .markdownlint.yaml
|
||||
npm run lint:remark
|
||||
78
README.md
@@ -1,74 +1,22 @@
|
||||
# Logos LIP (Logos Improvement Proposals) Index
|
||||
# Vac Request For Comments(RFC)
|
||||
|
||||
NOTE: This repo is still evolving while the LIP process is refined.
|
||||
*NOTE*: This repo is WIP. We are currently restructuring the RFC process.
|
||||
|
||||
This repository contains specifications from the Messaging, Blockchain, Storage,
|
||||
and IFT-TS components of the IFT portfolio. LIPs are Requests for Comments that
|
||||
document protocols, processes, and system interfaces in a consistent, reviewable
|
||||
format.
|
||||
## RFC Process
|
||||
|
||||
## LIP process
|
||||
This repository contains specifications from the [Waku](https://waku.org/), [Nomos](https://nomos.tech/),
|
||||
[Codex](https://codex.storage/), and [Status](https://status.app/) projects that are part of the [IFT portfolio](https://free.technology/).
|
||||
[Vac](https://vac.dev) is an [IFT service](https://free.technology/services) that will manage the RFC process within this repository.
|
||||
The goal of the RFC, [Request for Comments](https://en.wikipedia.org/wiki/Request_for_Comments),
|
||||
process is to standardize technical specifications.
|
||||
Specifications will adhere to [1/COSS](./vac/1/coss.md) by obtaining a rough consensus within each project.
|
||||
|
||||
This repository replaces the old rfc.vac.dev resource. Specs are maintained in
|
||||
Markdown here and progress through statuses such as raw, draft, stable, or
|
||||
deprecated. The process and lifecycle are defined in:
|
||||
|
||||
- 1/COSS: `docs/ift-ts/raw/1/coss.md`
|
||||
**See [rfc.vac.dev](https://rfc.vac.dev) for an easy to browse index of all RFCs.**
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Open a pull request against this repo.
|
||||
2. Add or update the LIP in the appropriate component folder.
|
||||
3. Include status and category metadata in the header table.
|
||||
Please see [1/COSS](https://rfc.vac.dev/spec/1/) for general guidelines and specification lifecycle.
|
||||
|
||||
If you are unsure where a document belongs, open an issue first and we will
|
||||
help route it.
|
||||
Feel free to join the [Vac discord](https://discord.gg/Vy54fEWuqC).
|
||||
|
||||
## Component indexes
|
||||
|
||||
- Messaging: `docs/messaging/README.md`
|
||||
- Blockchain: `docs/blockchain/README.md`
|
||||
- Storage: `docs/storage/README.md`
|
||||
- IFT-TS: `docs/ift-ts/README.md`
|
||||
|
||||
## Local setup
|
||||
|
||||
1. Install mdBook (pick the version that matches your Rust toolchain).
|
||||
2. Install Python dependencies if needed.
|
||||
|
||||
To install mdBook via Make:
|
||||
|
||||
```bash
|
||||
make install
|
||||
```
|
||||
|
||||
## Build and serve
|
||||
|
||||
Run the generators before building or serving:
|
||||
|
||||
```bash
|
||||
python scripts/run_runtime_generators.py
|
||||
mdbook build
|
||||
```
|
||||
|
||||
To serve locally:
|
||||
|
||||
```bash
|
||||
make serve
|
||||
```
|
||||
|
||||
Or run the commands directly:
|
||||
|
||||
```bash
|
||||
python scripts/run_runtime_generators.py
|
||||
mdbook serve -p 3000 -n 0.0.0.0
|
||||
```
|
||||
|
||||
## CI/CD
|
||||
|
||||
- [CI builds](https://ci.infra.status.im/job/website/job/lip.logos.co/) `master` and pushes to `deploy-master` branch, which is hosted at <https://lip.logos.co/>.
|
||||
- [CI builds](https://ci.infra.status.im/job/website/job/dev-lip.logos.co/) `develop` and pushes to `deploy-develop` branch, which is hosted at <https://dev-lip.logos.co.dev/>.
|
||||
|
||||
The hosting is done using [Caddy server with Git plugin for handling GitHub webhooks](https://github.com/status-im/infra-sites/blob/b930491f44b4958957b998d20ca222b1e10c4d67/ansible/vars/sites/caddy_git.yml#L123-L149).
|
||||
|
||||
Information about deployed build can be also found in `/build.json` available on the website.
|
||||
Here's the project board used by core contributors and maintainers: [Projects](https://github.com/orgs/vacp2p/projects/5)
|
||||
|
||||
24
book.toml
@@ -1,24 +0,0 @@
|
||||
[book]
|
||||
title = "Logos LIP"
|
||||
authors = ["Jakub Sokołowski"]
|
||||
language = "en"
|
||||
src = "docs"
|
||||
|
||||
[output.html]
|
||||
default-theme = "ayu"
|
||||
additional-css = [
|
||||
"theme/katex/katex.min.css",
|
||||
"theme/fonts/fonts.css",
|
||||
"theme/FontAwesome/css/font-awesome.css",
|
||||
"custom.css"
|
||||
]
|
||||
additional-js = [
|
||||
"theme/katex/katex.min.js",
|
||||
"theme/katex/auto-render.min.js",
|
||||
"scripts/katex-render.js",
|
||||
"scripts/logos-lips.js"
|
||||
]
|
||||
git-repository-url = "https://github.com/logos-co/logos-lips"
|
||||
|
||||
[preprocessor.math]
|
||||
command = "python3 scripts/mdbook-math.py"
|
||||
3
codex/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Codex RFCs
|
||||
|
||||
Codex specifications related to a decentralised data storage platform.
|
||||
431
custom.css
@@ -1,431 +0,0 @@
|
||||
/* Landing layout */
|
||||
.landing-hero {
|
||||
margin-bottom: 1.5rem;
|
||||
padding: 1.25rem 1.5rem;
|
||||
background: var(--bg);
|
||||
border: 1px solid var(--table-border-color);
|
||||
}
|
||||
|
||||
.landing-hero p {
|
||||
margin: 0.3rem 0 0;
|
||||
color: var(--sidebar-fg);
|
||||
}
|
||||
|
||||
.filter-row {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
align-items: center;
|
||||
margin-bottom: 0.75rem;
|
||||
}
|
||||
|
||||
.filter-row input[type="search"] {
|
||||
padding: 0.5rem 0.65rem;
|
||||
border: 1px solid var(--searchbar-border-color);
|
||||
border-radius: 4px;
|
||||
min-width: 240px;
|
||||
background: var(--searchbar-bg);
|
||||
color: var(--searchbar-fg);
|
||||
}
|
||||
|
||||
.chips {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.chip {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.4rem;
|
||||
padding: 0.35rem 0.6rem;
|
||||
border: 1px solid var(--table-border-color);
|
||||
border-radius: 999px;
|
||||
background: var(--theme-hover);
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.chip.active {
|
||||
background: var(--theme-hover);
|
||||
border-color: var(--sidebar-active);
|
||||
color: var(--sidebar-active);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.quick-links {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
flex-wrap: wrap;
|
||||
margin: 0.5rem 0 1rem 0;
|
||||
}
|
||||
|
||||
.quick-links a {
|
||||
border: 1px solid var(--table-border-color);
|
||||
padding: 0.35rem 0.65rem;
|
||||
border-radius: 4px;
|
||||
background: var(--bg);
|
||||
text-decoration: none;
|
||||
color: var(--fg);
|
||||
}
|
||||
|
||||
.quick-links a:hover {
|
||||
border-color: var(--sidebar-active);
|
||||
color: var(--links);
|
||||
}
|
||||
|
||||
.rfc-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin-top: 0.75rem;
|
||||
}
|
||||
|
||||
.rfc-table th, .rfc-table td {
|
||||
border: 1px solid var(--table-border-color);
|
||||
padding: 0.45rem 0.6rem;
|
||||
}
|
||||
|
||||
.rfc-table .rfc-updated {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.rfc-table thead {
|
||||
background: var(--table-header-bg);
|
||||
}
|
||||
|
||||
.rfc-table tbody tr:hover {
|
||||
background: var(--theme-hover);
|
||||
}
|
||||
|
||||
.badge {
|
||||
display: inline-block;
|
||||
padding: 0.15rem 0.45rem;
|
||||
border-radius: 4px;
|
||||
font-size: 0.85em;
|
||||
border: 1px solid var(--table-border-color);
|
||||
background: var(--table-alternate-bg);
|
||||
color: var(--fg);
|
||||
}
|
||||
|
||||
.rfc-header {
|
||||
margin: 0.5rem 0 1.5rem 0;
|
||||
padding-bottom: 0.75rem;
|
||||
border-bottom: 1px solid var(--table-border-color);
|
||||
}
|
||||
|
||||
.rfc-badges {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 0.5rem;
|
||||
margin: 0.5rem 0 0.75rem 0;
|
||||
}
|
||||
|
||||
.rfc-badges .badge {
|
||||
font-weight: 600;
|
||||
background: var(--theme-hover);
|
||||
}
|
||||
|
||||
.badge.category-standards,
|
||||
.badge.category-bcp,
|
||||
.badge.category-informational,
|
||||
.badge.category-experimental,
|
||||
.badge.category-other {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.rfc-meta-table {
|
||||
font-size: 0.95em;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.rfc-meta-table-wrapper {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.rfc-meta-table th {
|
||||
width: 9rem;
|
||||
text-align: left;
|
||||
background: var(--table-header-bg);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.rfc-meta-table td {
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
|
||||
/* Landing polish */
|
||||
main h1 {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.results-row {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: baseline;
|
||||
gap: 1rem;
|
||||
margin: 0.5rem 0 0.75rem 0;
|
||||
color: var(--sidebar-fg);
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.results-count {
|
||||
color: var(--fg);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.results-hint {
|
||||
color: var(--sidebar-fg);
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.table-wrap {
|
||||
overflow-x: auto;
|
||||
border: 1px solid var(--table-border-color);
|
||||
border-radius: 6px;
|
||||
background: var(--bg);
|
||||
}
|
||||
|
||||
.table-wrap .rfc-table {
|
||||
margin: 0;
|
||||
border: none;
|
||||
}
|
||||
|
||||
.rfc-table tbody tr:nth-child(even) {
|
||||
background: var(--table-alternate-bg);
|
||||
}
|
||||
|
||||
.rfc-table th[data-sort] {
|
||||
cursor: pointer;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.rfc-table th.sorted {
|
||||
color: var(--links);
|
||||
}
|
||||
|
||||
.rfc-table td:first-child a {
|
||||
word-break: break-word;
|
||||
}
|
||||
|
||||
.noscript-note {
|
||||
margin-top: 0.75rem;
|
||||
color: var(--sidebar-fg);
|
||||
}
|
||||
|
||||
@media (max-width: 900px) {
|
||||
.results-row {
|
||||
flex-direction: column;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.filter-row input[type="search"] {
|
||||
width: 100%;
|
||||
min-width: 0;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 700px) {
|
||||
#mdbook-menu-bar {
|
||||
flex-wrap: nowrap;
|
||||
}
|
||||
|
||||
#mdbook-menu-bar .right-buttons {
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
#mdbook-menu-bar .menu-title {
|
||||
max-width: 40vw;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.menu-title-link {
|
||||
position: absolute;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
.menu-title-link .menu-title {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.theme-typography,
|
||||
body {
|
||||
font-family: "Open Sans", sans-serif;
|
||||
}
|
||||
|
||||
code, pre, .hljs {
|
||||
font-family: "Source Code Pro", "SFMono-Regular", Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
}
|
||||
|
||||
.site-nav {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
margin-left: auto;
|
||||
margin-right: 1rem;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.site-nav .nav-link {
|
||||
color: var(--fg);
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
background: transparent;
|
||||
border: 0;
|
||||
font: inherit;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.site-nav .nav-link:hover {
|
||||
color: var(--links);
|
||||
}
|
||||
|
||||
.nav-dropdown {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.nav-dropdown summary {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.nav-dropdown summary::-webkit-details-marker {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.nav-dropdown .nav-menu {
|
||||
display: none;
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: calc(100% + 0.35rem);
|
||||
background: var(--bg);
|
||||
border: 1px solid var(--table-border-color);
|
||||
border-radius: 6px;
|
||||
padding: 0.4rem 0.5rem;
|
||||
min-width: 10rem;
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.nav-dropdown[open] .nav-menu {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.35rem;
|
||||
}
|
||||
|
||||
.nav-dropdown .nav-menu a {
|
||||
color: var(--fg);
|
||||
text-decoration: none;
|
||||
padding: 0.2rem 0.2rem;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.nav-dropdown .nav-menu a:hover {
|
||||
background: var(--theme-hover);
|
||||
color: var(--links);
|
||||
}
|
||||
|
||||
.back-to-top-link {
|
||||
color: var(--links);
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
.back-to-top-link.is-visible {
|
||||
opacity: 1;
|
||||
pointer-events: auto;
|
||||
}
|
||||
|
||||
.site-footer {
|
||||
margin: 2.5rem auto 1.25rem auto;
|
||||
padding-top: 0.75rem;
|
||||
border-top: 1px solid var(--table-border-color);
|
||||
color: var(--sidebar-fg);
|
||||
font-size: 0.85em;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
gap: 0.6rem;
|
||||
max-width: var(--content-max-width);
|
||||
}
|
||||
|
||||
.site-footer a {
|
||||
color: var(--links);
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.site-footer a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.footer-sep {
|
||||
color: var(--sidebar-fg);
|
||||
}
|
||||
|
||||
.on-this-page {
|
||||
margin-left: 16px;
|
||||
border-inline-start: 3px solid var(--sidebar-header-border-color);
|
||||
padding-left: 8px;
|
||||
margin-top: 0.4rem;
|
||||
font-size: 0.92em;
|
||||
}
|
||||
|
||||
.on-this-page::before {
|
||||
content: "On this page";
|
||||
display: block;
|
||||
margin-bottom: 0.35rem;
|
||||
color: var(--sidebar-fg);
|
||||
font-weight: 600;
|
||||
font-size: 0.85em;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
|
||||
.on-this-page > ol {
|
||||
padding-left: 0;
|
||||
}
|
||||
|
||||
@media (max-width: 900px) {
|
||||
.site-nav {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
.chapter-item > .chapter-link-wrapper > a,
|
||||
.chapter-item > a {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.4rem;
|
||||
}
|
||||
|
||||
.section-toggle::before {
|
||||
content: "▸";
|
||||
display: inline-block;
|
||||
font-size: 0.9em;
|
||||
line-height: 1;
|
||||
transition: transform 0.15s ease;
|
||||
}
|
||||
|
||||
.chapter-item.expanded > a .section-toggle::before,
|
||||
.chapter-item.expanded > .chapter-link-wrapper > a .section-toggle::before {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
|
||||
.chapter-item:not(.expanded) > ol.section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.chapter-item:not(.expanded) + li > ol.section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.chapter-item:not(.expanded) > .chapter-link-wrapper > a .section-toggle::before,
|
||||
.chapter-item:not(.expanded) > a .section-toggle::before {
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
# Logos LIP Index
|
||||
|
||||
An IETF-style index of Logos-managed LIPs across Storage, Messaging, Blockchain and IFT-TS components. Use the filters below to jump straight to a specification.
|
||||
|
||||
<div class="landing-hero">
|
||||
<div class="filter-row">
|
||||
<input id="rfc-search" type="search" placeholder="Search by number, title, status, component..." aria-label="Search LIPs">
|
||||
<div class="chips" id="status-chips">
|
||||
<span class="chip active" data-status="current" data-label="Current">Current</span>
|
||||
<span class="chip" data-status="all" data-label="All">All</span>
|
||||
<span class="chip" data-status="stable" data-label="Stable">Stable</span>
|
||||
<span class="chip" data-status="draft" data-label="Draft">Draft</span>
|
||||
<span class="chip" data-status="raw" data-label="Raw">Raw</span>
|
||||
<span class="chip" data-status="deprecated" data-label="Deprecated">Deprecated</span>
|
||||
<span class="chip" data-status="deleted" data-label="Deleted">Deleted</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="filter-row">
|
||||
<div class="chips" id="component-chips">
|
||||
<span class="chip active" data-component="all" data-label="All components">All components</span>
|
||||
<span class="chip" data-component="messaging" data-label="Messaging">Messaging</span>
|
||||
<span class="chip" data-component="blockchain" data-label="Blockchain">Blockchain</span>
|
||||
<span class="chip" data-component="storage" data-label="Storage">Storage</span>
|
||||
<span class="chip" data-component="ift-ts" data-label="IFT-TS">IFT-TS</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="filter-row">
|
||||
<div class="chips" id="date-chips">
|
||||
<span class="chip active" data-date="all" data-label="All time">All time</span>
|
||||
<span class="chip" data-date="latest" data-label="Latest" data-count="false">Latest</span>
|
||||
<span class="chip" data-date="last90" data-label="Last 90 days">Last 90 days</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="results-row">
|
||||
<div id="results-count" class="results-count">Loading RFC index...</div>
|
||||
<div class="results-hint">Click a column to sort</div>
|
||||
</div>
|
||||
|
||||
<div id="rfc-table-container" class="table-wrap"></div>
|
||||
|
||||
<noscript>
|
||||
<p class="noscript-note">JavaScript is required to load the RFC index table.</p>
|
||||
</noscript>
|
||||
@@ -1,26 +0,0 @@
|
||||
# About
|
||||
|
||||
The Logos LIP Index collects specifications maintained by IFT-TS across Messaging, Blockchain,
|
||||
and Storage. Each RFC documents a protocol, process, or system in a
|
||||
consistent, reviewable format.
|
||||
|
||||
This site is generated with mdBook from the repository:
|
||||
[logos-co/logos-lips](https://github.com/logos-co/logos-lips).
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Open a pull request against the repo.
|
||||
2. Add or update the RFC in the appropriate component folder.
|
||||
3. Include clear status and category metadata in the RFC header table.
|
||||
|
||||
If you are unsure where a document belongs, open an issue first and we will
|
||||
help route it.
|
||||
|
||||
We keep RFCs in Markdown within this repository; updates happen through
|
||||
pull requests.
|
||||
|
||||
## Links
|
||||
|
||||
- IFT-TS: <https://vac.dev>
|
||||
- IETF RFC Series: <https://www.rfc-editor.org/>
|
||||
- Repository: <https://github.com/logos-co/logos-lips>
|
||||
@@ -1,39 +0,0 @@
|
||||
# Blockchain LIPs
|
||||
|
||||
Logos Blockchain is building a secure, flexible, and
|
||||
scalable infrastructure for developers creating applications for the network state.
|
||||
Published Specifications are currently available here,
|
||||
[Blockchain Specifications](https://nomos-tech.notion.site/project).
|
||||
|
||||
<div class="landing-hero">
|
||||
<div class="filter-row">
|
||||
<input id="rfc-search" type="search" placeholder="Search by number, title, status" aria-label="Search LIPs">
|
||||
<div class="chips" id="status-chips">
|
||||
<span class="chip active" data-status="current" data-label="Current">Current</span>
|
||||
<span class="chip" data-status="all" data-label="All">All</span>
|
||||
<span class="chip" data-status="stable" data-label="Stable">Stable</span>
|
||||
<span class="chip" data-status="draft" data-label="Draft">Draft</span>
|
||||
<span class="chip" data-status="raw" data-label="Raw">Raw</span>
|
||||
<span class="chip" data-status="deprecated" data-label="Deprecated">Deprecated</span>
|
||||
<span class="chip" data-status="deleted" data-label="Deleted">Deleted</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="filter-row">
|
||||
<div class="chips" id="date-chips">
|
||||
<span class="chip active" data-date="all" data-label="All time">All time</span>
|
||||
<span class="chip" data-date="latest" data-label="Latest" data-count="false">Latest</span>
|
||||
<span class="chip" data-date="last90" data-label="Last 90 days">Last 90 days</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="results-row">
|
||||
<div id="results-count" class="results-count">Loading RFC index...</div>
|
||||
<div class="results-hint">Click a column to sort</div>
|
||||
</div>
|
||||
|
||||
<div id="rfc-table-container" class="table-wrap" data-component="blockchain"></div>
|
||||
|
||||
<noscript>
|
||||
<p class="noscript-note">JavaScript is required to load the RFC index table.</p>
|
||||
</noscript>
|
||||
@@ -1,3 +0,0 @@
|
||||
# Blockchain Deprecated Specifications
|
||||
|
||||
Deprecated Blockchain specifications kept for archival and reference purposes.
|
||||
@@ -1,3 +0,0 @@
|
||||
# Blockchain Raw Specifications
|
||||
|
||||
Early-stage Blockchain specifications that have not yet progressed beyond raw status.
|
||||
|
Before Width: | Height: | Size: 359 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 111 KiB |
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 110 KiB |
|
Before Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 60 KiB |
@@ -1,275 +0,0 @@
|
||||
# BEDROCK-ANONYMOUS-LEADERS-REWARD
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock Anonymous Leaders Reward Protocol |
|
||||
| Slug | 85 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Thomas Lavaur <thomaslavaur@status.im> |
|
||||
| Contributors | David Rusu <davidrusu@status.im>, Mehmet Gonen <mehmet@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Frederico Teixeira <frederico@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/bedrock-anonymous-leaders-reward.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/bedrock-anonymous-leaders-reward.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the mechanism for anonymous reward distribution
|
||||
based on voucher commitments, nullifiers, and zero-knowledge (ZK) proofs.
|
||||
Block leaders can claim their rewards
|
||||
without linking them to specific blocks and without revealing their identities.
|
||||
The protocol removes any direct link between block production
|
||||
and the recipient of the reward, preventing self-censorship behaviors.
|
||||
|
||||
**Keywords:** anonymous rewards, voucher commitment, nullifier, zero-knowledge proof,
|
||||
leader reward, Merkle tree, block production, self-censorship
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Definitions
|
||||
|
||||
<!-- markdownlint-disable MD013 -->
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Voucher | A one-time random secret used to claim a block reward anonymously. |
|
||||
| Voucher Commitment | A cryptographic commitment (zkhash) to a voucher secret. |
|
||||
| Voucher Nullifier | A unique identifier derived from a voucher, prevents double claims. |
|
||||
| Leader Claim Operation | A Mantle Operation allowing a leader to claim their reward. |
|
||||
| Reward Voucher Set | A Merkle tree containing all voucher commitments. |
|
||||
| Voucher Nullifier Set | A searchable database of nullifiers for claimed vouchers. |
|
||||
| Anonymity Set | The set of unclaimed vouchers from which a claim could originate. |
|
||||
|
||||
<!-- markdownlint-enable MD013 -->
|
||||
|
||||
## Background
|
||||
|
||||
In many blockchain designs, leaders receive rewards for producing valid blocks.
|
||||
Traditionally, this reward is linked directly to the block or its producer,
|
||||
potentially opening the door to manipulation or self-censorship,
|
||||
where leaders may avoid including certain transactions or messages
|
||||
out of fear of retaliation or reputational harm.
|
||||
As Nomos protects its nodes
|
||||
and ensures that they do not need to engage in self-censorship,
|
||||
the reward mechanism preserves the anonymity of block leaders
|
||||
while maintaining correctness and preventing double rewards.
|
||||
|
||||
### Design Overview
|
||||
|
||||
The protocol introduces a concept of *vouchers*
|
||||
to unlink the block reward claim from the block itself.
|
||||
Instead of directly crediting themselves in the block,
|
||||
leaders include a commitment (a zkhash in this protocol) to a secret voucher.
|
||||
These commitments are gathered into a Merkle tree.
|
||||
In the first block of an epoch,
|
||||
all vouchers from the previous epoch are added to the voucher Merkle tree,
|
||||
accumulating the vouchers together in a set
|
||||
and guaranteeing a minimal anonymity set.
|
||||
Leaders MAY anonymously claim their reward using a ZK proof later,
|
||||
proving the ownership of their voucher.
|
||||
|
||||
```text
|
||||
┌──────────────┐ ┌─────────────────┐ ┌─────────────────────┐
|
||||
│ Leader block │───▶│ Reward voucher │───▶│ Wait until next │
|
||||
└──────────────┘ └─────────────────┘ │ epoch │
|
||||
└──────────┬──────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────────────┐ ┌─────────────────────┐
|
||||
│ Reward │◀───│ Claim with │◀───│ Voucher added to │
|
||||
│ │ │ ZK proof │ │ Merkle tree │
|
||||
└──────────────┘ └─────────────────┘ └─────────────────────┘
|
||||
```
|
||||
|
||||
By anonymizing the identity of block leaders at the time of reward claiming,
|
||||
the protocol removes any direct link
|
||||
between block production and the recipient of the reward.
|
||||
This is essential to prevent self-censorship behaviors.
|
||||
With anonymous claiming,
|
||||
leaders are free to act honestly according to protocol rules
|
||||
without concern for external consequences,
|
||||
thus improving the overall neutrality and robustness of the network.
|
||||
|
||||
Key properties of the protocol:
|
||||
|
||||
- **Anonymity**: Block rewards are unlinkable to the blocks they originate from
|
||||
(avoiding deanonymization).
|
||||
- **Soundness**: No reward can be claimed twice.
|
||||
|
||||
In parallel, the blockchain maintains the value `leaders_rewards`
|
||||
accumulating the rewards for leaders over time.
|
||||
Each voucher included in the Merkle tree represents the same share of `leaders_rewards`.
|
||||
Just like for voucher inclusion,
|
||||
more rewards are added to this variable on an epoch-by-epoch basis,
|
||||
which guarantees a stable and equal claimable reward for leaders over an epoch.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Voucher Creation and Inclusion
|
||||
|
||||
When producing a block, a leader performs the following:
|
||||
|
||||
1. Generate a one-time random secret $voucher \xleftarrow{\$} \mathbb{F}_{p}$.
|
||||
|
||||
1. Compute the commitment: `voucher_cm := zkHash(b"LEAD_VOUCHER_CM_V1", voucher)`.
|
||||
|
||||
1. Include the `voucher_cm` in the block header.
|
||||
|
||||
Each `voucher_cm` is added to a Merkle tree of voucher commitments by validators
|
||||
during the execution of the first block of the following epoch,
|
||||
maintained throughout the entire blockchain history by everyone.
|
||||
|
||||
### Claiming the Reward
|
||||
|
||||
Each leader MAY submit a Leader Claim Operation to claim their reward.
|
||||
This Operation includes:
|
||||
|
||||
- The Merkle root of the global voucher set
|
||||
when the Mantle Transaction containing the claim is submitted.
|
||||
- A Proof of Claim.
|
||||
|
||||
This Operation increases the balance of a Mantle Transaction
|
||||
by the leader reward amount,
|
||||
letting the leader move the funds as desired
|
||||
through the Ledger transaction or another Operation.
|
||||
|
||||
> **Note**: This means that a leader MAY use their funds directly,
|
||||
> getting their reward and using them atomically.
|
||||
|
||||
Every leader receives a reward that is independent of the block content
|
||||
to avoid de-anonymization.
|
||||
This means that the fees of the block cannot be collected by the leader directly,
|
||||
or need to be pooled for all the leaders.
|
||||
|
||||
### Leaders Reward Calculation
|
||||
|
||||
At the start of epoch N+1,
|
||||
validators aggregate the leaders rewards of epoch N into the leader rewards variable.
|
||||
The amount of the reward claimable with a voucher
|
||||
corresponds to a share of the `leaders_rewards`.
|
||||
This share is exactly equal to the total value of rewards
|
||||
divided by the size of the anonymity set of leaders, that is:
|
||||
|
||||
<!-- markdownlint-disable MD013 -->
|
||||
|
||||
$$
|
||||
share = \begin{cases}
|
||||
0 & \textbf{if } |voucher\_cm| = |voucher\_nf| \\
|
||||
\frac{leader\_rewards}{|voucher\_cm| - |voucher\_nf|} & \textbf{if } |voucher\_cm| \neq |voucher\_nf|
|
||||
\end{cases}
|
||||
$$
|
||||
|
||||
<!-- markdownlint-enable MD013 -->
|
||||
|
||||
This amount is stable through an epoch
|
||||
because when a leader withdraws,
|
||||
both the pool value and the number of unclaimed vouchers decrease proportionally,
|
||||
so the price per share remains unchanged.
|
||||
However, the share value will vary across epochs if the leader rewards are variable.
|
||||
|
||||
### LEADER_CLAIM Validation
|
||||
|
||||
Nodes validate a `LEADER_CLAIM` Operation by:
|
||||
|
||||
1. Verifying the ZK proof.
|
||||
|
||||
1. Checking that `voucher_nf` is not already in the voucher nullifier set.
|
||||
|
||||
1. Executing the reward logic:
|
||||
- Add the `voucher_nf` to the voucher nullifier set
|
||||
to prevent claiming the same reward more than once.
|
||||
- Increase the balance of the Mantle Transaction by the share amount.
|
||||
- Decrease the value of the `leaders_rewards` by the same amount.
|
||||
|
||||
## Design Details
|
||||
|
||||
### Unlinking Block Rewards from Proposals
|
||||
|
||||
Each reward voucher is a cryptographic commitment derived from a voucher secret.
|
||||
This commitment, when included in the block header,
|
||||
reveals no information about the block producer's identity
|
||||
or the actual secret voucher.
|
||||
It is computationally infeasible to reverse the commitment
|
||||
to retrieve the voucher secret.
|
||||
|
||||
Crucially, when the leader reward is claimed and the voucher nullifier revealed,
|
||||
a third party cannot link this nullifier to the initial voucher commitment.
|
||||
A reward is claimable if its reward voucher is in the reward voucher set
|
||||
and its voucher nullifier is not in the voucher nullifier set.
|
||||
|
||||
The reward voucher set is maintained as a Merkle tree of depth 32,
|
||||
and validators are required to hold the frontier of the MMR in memory
|
||||
to continue appending to the set.
|
||||
The voucher nullifier set is maintained as a searchable database.
|
||||
|
||||
### ZK Proof of Membership
|
||||
|
||||
When claiming a reward, the leader provides a ZK proof
|
||||
that they know a leaf in the global Merkle tree of reward vouchers
|
||||
and the preimage of that leaf.
|
||||
Crucially, the ZK proof does not reveal which leaf is being proven.
|
||||
The verifier only learns that *some* valid leaf exists in the tree
|
||||
for which the prover knows the secret voucher.
|
||||
This property ensures that the claim cannot be linked
|
||||
to any specific block header or reward voucher commitment.
|
||||
|
||||
### Preventing Double Claims Without Breaking Privacy
|
||||
|
||||
To prevent double claiming, the leader derives a voucher nullifier.
|
||||
This nullifier is unique to the voucher
|
||||
but reveals nothing about the original reward voucher or block.
|
||||
It acts as a one-way identifier
|
||||
that allows nodes to track whether a voucher has already been claimed,
|
||||
without compromising the anonymity of the claim.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Anonymity Guarantees
|
||||
|
||||
The protocol provides anonymity through the following mechanisms:
|
||||
|
||||
- Voucher commitments reveal no information about the block producer's identity.
|
||||
- ZK proofs do not reveal which leaf in the Merkle tree is being claimed.
|
||||
- Nullifiers cannot be linked back to voucher commitments.
|
||||
|
||||
The anonymity set size is determined by the number of unclaimed vouchers.
|
||||
Implementations SHOULD ensure a sufficient anonymity set size
|
||||
before allowing claims to prevent timing-based deanonymization attacks.
|
||||
|
||||
### Double Claim Prevention
|
||||
|
||||
The nullifier mechanism ensures that each voucher can only be claimed once.
|
||||
Nodes MUST verify that a nullifier is not in the voucher nullifier set
|
||||
before accepting a `LEADER_CLAIM` Operation.
|
||||
|
||||
### Reward Independence
|
||||
|
||||
Leaders receive a reward independent of block content
|
||||
to prevent correlation attacks based on block fees or transaction patterns.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [BEDROCK-MANTLE-SPECIFICATION][mantle] - Mantle Transaction and Operation specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Anonymous Leaders Reward Protocol][origin-ref] - Original specification document
|
||||
|
||||
[mantle]: https://nomos-tech.notion.site/v1-1-Mantle-Specification-269261aa09df80dda501f568697930fd
|
||||
[origin-ref]: https://nomos-tech.notion.site/Anonymous-Leaders-Reward-Protocol-206261aa09df8120a49ffa49c71ba70d
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,288 +0,0 @@
|
||||
# BEDROCK-ARCHITECTURE-OVERVIEW
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock Architecture Overview |
|
||||
| Slug | 146 |
|
||||
| Status | raw |
|
||||
| Category | Informational |
|
||||
| Editor | David Rusu <davidrusu@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Daniel Kashepava <danielkashepava@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-30** — [`0ef87b1`](https://github.com/logos-co/logos-lips/blob/0ef87b1ba9491c854e48c8dfd7574d34ec69c704/docs/blockchain/raw/bedrock-architecture-overview.md) — New RFC: CODEX-MANIFEST (#191)
|
||||
- **2026-01-30** — [`5c123d6`](https://github.com/logos-co/logos-lips/blob/5c123d6b676be36053d5d9b9d67bb757138c2ace/docs/blockchain/raw/bedrock-architecture-overview.md) — Nomos/raw/bedrock architecture overview raw (#257)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
Bedrock enables high-performance Sovereign Rollups
|
||||
to leverage the security guarantees of Nomos.
|
||||
Sovereign Rollups build on Nomos through Bedrock Mantle,
|
||||
Bedrock's minimal execution layer which in turn runs on Cryptarchia,
|
||||
the Nomos consensus protocol.
|
||||
Taken together, Bedrock provides a private, highly scalable,
|
||||
and resilient substrate for high-performance decentralized applications.
|
||||
|
||||
**Keywords:** Bedrock, Sovereign Rollups, Mantle, Cryptarchia, channels,
|
||||
NomosDA, Blend Network, consensus, privacy
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Bedrock | The foundational layer of Nomos composed of Cryptarchia and Bedrock Mantle. |
|
||||
| Mantle | The minimal execution layer of Nomos for Sovereign Rollups. |
|
||||
| Cryptarchia | The Nomos consensus protocol, a Private Proof of Stake (PPoS) protocol. |
|
||||
| Sovereign Rollup | A virtual chain overlaid on top of the Nomos blockchain. |
|
||||
| Channel | A permissioned, ordered log of messages forming a virtual chain. |
|
||||
| Inscription | A channel message stored permanently in the ledger. |
|
||||
| Blob | A channel message with only a commitment stored on-chain; data stored in NomosDA. |
|
||||
| NomosDA | The Data Availability layer providing temporary storage for Blob data. |
|
||||
| Blend Network | A privacy-preserving network layer for routing block proposals. |
|
||||
|
||||
## Background
|
||||
|
||||
Bedrock is composed of Cryptarchia and Bedrock Mantle.
|
||||
Bedrock is in turn supported by the Bedrock Services: Blend Network and NomosDA.
|
||||
Together they provide an interface for building high-performance Sovereign Rollups
|
||||
that leverage the security and resilience of Nomos.
|
||||
|
||||
```text
|
||||
┌─────────────┐ ┌──────────────────────┐ ┌─────────────────────────────────────────┐
|
||||
│ Clients │ │ Sovereign Zone │ │ Nomos Blockchain │
|
||||
├─────────────┤ ├──────────────────────┤ ├───────────────────┬─────────────────────┤
|
||||
│ │ │ │ │ Bedrock │ Services │
|
||||
│ Alice ──┼──>│ DeFi Exchange SZ │ │ ┌─────────┐ │ ┌───────────────┐ │
|
||||
│ │ │ │ │ │ Mantle │<───┼──│ Blend Network │ │
|
||||
│ Bob ──┼──>│ Land Registry SZ │──>│ └────┬────┘ │ └───────────────┘ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ Charlie ──┼──>│ Prediction Market SZ │ │ v │ │
|
||||
│ │ │ │ │ ┌─────────────┐ │ │
|
||||
└─────────────┘ └──────────────────────┘ │ │ Cryptarchia │ │ │
|
||||
│ └──────┬──────┘ │ │
|
||||
└─────────┼─────────┴─────────────────────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
v v v
|
||||
┌────────┐ ┌────────┐ ┌────────┐
|
||||
│ Node_1 │ │ Node_2 │ │ Node_3 │
|
||||
└────────┘ └────────┘ └────────┘
|
||||
```
|
||||
|
||||
## Bedrock Mantle
|
||||
|
||||
Mantle forms the minimal execution layer of Nomos.
|
||||
Mantle Transactions consist of a sequence of Operations
|
||||
together with a Ledger Transaction used for paying fees and transferring funds.
|
||||
|
||||
```text
|
||||
┌─────────────────────────────────┐
|
||||
│ Mantle Transaction │
|
||||
├─────────────────────────────────┤
|
||||
│ Operations │
|
||||
│ ┌───────────────────────────┐ │
|
||||
│ │ CHANNEL_DEPOSIT(...) │ │
|
||||
│ ├───────────────────────────┤ │
|
||||
│ │ CHANNEL_INSCRIBE(...) │ │
|
||||
│ ├───────────────────────────┤ │
|
||||
│ │ ... │ │
|
||||
│ └───────────────────────────┘ │
|
||||
├─────────────────────────────────┤
|
||||
│ Ledger Transaction │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
Sovereign Rollups make use of Mantle Transactions
|
||||
when posting their updates to Nomos.
|
||||
This is done through the use of Mantle Channels and Channel Operations.
|
||||
|
||||
### Mantle Channels
|
||||
|
||||
Mantle Channels are lightweight virtual chains
|
||||
overlaid on top of the Nomos blockchain.
|
||||
Sovereign Rollups are built on top of these channels,
|
||||
allowing them to outsource the hard parts of running a decentralized service to Nomos,
|
||||
namely ordering and replicating state updates.
|
||||
|
||||
Channels are permissioned, ordered logs of messages.
|
||||
These messages are signed by the Channel owner
|
||||
and come in two types: Inscriptions or Blobs.
|
||||
Inscriptions store the message data permanently in-ledger,
|
||||
while Blobs store only a commitment to the message data permanently.
|
||||
The actual message data is stored temporarily in NomosDA,
|
||||
just long enough for interested parties to fetch a copy for themselves.
|
||||
|
||||
```text
|
||||
┌───────────┐ ┌──────────────────┐ ┌───────────┐
|
||||
│ Channel A │ │ Nomos Blockchain │ │ Channel B │
|
||||
├───────────┤ ├──────────────────┤ ├───────────┤
|
||||
│ A_3 │ │ Block_6 <─┼───│ B_4 │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ v │ │ v │ │ v │
|
||||
│ A_2 │ │ Block_5 │ │ B_3 │
|
||||
│ │ │ │ │ │ │ │ │
|
||||
│ v │──>│ v │ │ v │
|
||||
│ A_1 │ │ Block_4 <─┼───│ B_2 │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ v │ │ v │
|
||||
│ │ │ Block_3 │ │ B_1 │
|
||||
│ │ │ │ │ │ │
|
||||
│ │ │ v │ │ │
|
||||
│ │ │ Block_2 │ │ │
|
||||
│ │ │ │ │ │ │
|
||||
│ │ │ v │ │ │
|
||||
│ │ │ Block_1 │ │ │
|
||||
└───────────┘ └──────────────────┘ └───────────┘
|
||||
```
|
||||
|
||||
Channels A and B form virtual chains on top of the Nomos blockchain.
|
||||
Channel messages are included in blocks on the Nomos blockchain
|
||||
in such a way that they respect the ordering of channel messages.
|
||||
For example, $B_4$ MUST come after $B_3$ in the Nomos blockchain.
|
||||
|
||||
### Transient Blobs
|
||||
|
||||
The fact that Blobs are stored only temporarily in NomosDA
|
||||
allows Nomos to provide cheap, temporary storage for Sovereign Rollups
|
||||
without incurring long-term scalability concerns.
|
||||
The network can serve a large amount of data
|
||||
without the risk of bloating with obsolete data after years of operations.
|
||||
|
||||
At the same time, the transient nature of Blobs
|
||||
shifts the burden of long-term replication from the Nomos Network
|
||||
to the parties interested in that Blob data—
|
||||
that is, the Sovereign Rollup operators, their clients,
|
||||
and other interested parties (archival nodes, block explorers, etc.).
|
||||
So long as at least one party holds a copy of a Blob
|
||||
and is willing to provide it to the network,
|
||||
the Sovereign Rollup can continue to be verified
|
||||
by checking provided Blobs against their corresponding on-chain Blob commitments,
|
||||
which are stored permanently on the Nomos blockchain.
|
||||
|
||||
## Cryptarchia
|
||||
|
||||
Bedrock Mantle is powered by [Cryptarchia][cryptarchia],
|
||||
a highly scalable, permissionless consensus protocol
|
||||
optimized for privacy and resilience.
|
||||
Cryptarchia is a Private Proof of Stake (PPoS) consensus protocol
|
||||
with properties very similar to Bitcoin.
|
||||
Just like in Bitcoin, where a miner's hashing power is not revealed when they win a block,
|
||||
Cryptarchia ensures privacy for block proposers
|
||||
by breaking the link between a proposal and its proposer.
|
||||
Unlike Bitcoin, Nomos extends block proposer confidentiality to the network layer
|
||||
by routing proposals through the Blend Network,
|
||||
making network analysis attacks prohibitively expensive.
|
||||
|
||||
## Sovereign Rollups
|
||||
|
||||
Sovereign Rollups bridge the gap between traditional server-based applications
|
||||
and decentralized, permissionless applications.
|
||||
|
||||
Sovereign Rollups alleviate the contention
|
||||
caused by decentralized applications competing
|
||||
for the limited resources of a single-threaded VM (e.g., EVM in Ethereum)
|
||||
while still remaining auditable and fault tolerant.
|
||||
This is achieved through shifting transaction ordering and execution
|
||||
off of the main chain into Sovereign Rollup nodes,
|
||||
with Sovereign Rollup nodes posting only a state diff or batch of transactions
|
||||
to Nomos as an opaque data Blob.
|
||||
|
||||
### Transaction Flow
|
||||
|
||||
```text
|
||||
┌─────────────┐ ┌─────────────────────────────────────────────┐ ┌──────────────────┐
|
||||
│ Clients │ │ Sovereign Zone │ │ Nomos Blockchain │
|
||||
├─────────────┤ ├──────────────┬──────────────────────────────┤ ├──────────────────┤
|
||||
│ │ │ Sequencer │ Inscription │ │ │
|
||||
│ Alice ──┼──>│ │ ┌──────────────────────┐ │ │ │
|
||||
│ │ │ Orders Txs │ │ tx_Alice │ │ │ │
|
||||
│ Bob ──┼──>│ │ │ ├──────────────────────┤ │ │ Bedrock Mantle │
|
||||
│ │ │ v │ │ tx_Bob │──>│──>│ │
|
||||
│ Charlie ──┼──>│ Bundle Txs │ ├──────────────────────┤ │ │ │
|
||||
│ │ │ │ │ tx_Charlie │ │ │ │
|
||||
│ │ │ │ └──────────────────────┘ │ │ │
|
||||
└─────────────┘ └──────────────┴──────────────────────────────┘ └──────────────────┘
|
||||
```
|
||||
|
||||
The following sequence describes the flow of transactions
|
||||
from clients through a Sovereign Rollup to finality on Nomos:
|
||||
|
||||
1. Clients submit transactions to the Sovereign Rollup.
|
||||
|
||||
1. The Sovereign Rollup orders, executes,
|
||||
and bundles the transactions into a Blob.
|
||||
|
||||
1. The Sovereign Rollup submits the Blob Mantle Transaction
|
||||
along with DA Shares to NomosDA.
|
||||
|
||||
1. NomosDA begins replicating the Blob
|
||||
and forwards the Blob Mantle Transaction to the Nomos Mempool.
|
||||
|
||||
1. A leader includes the transaction in the next block via Cryptarchia.
|
||||
|
||||
1. NomosDA observes the Blob inclusion on-chain (Blob confirmed).
|
||||
|
||||
1. The client observes their transaction
|
||||
in a Sovereign Rollup Blob included in Nomos (weak confirmation).
|
||||
|
||||
1. The block finalizes after being buried by 2160 blocks.
|
||||
|
||||
1. The client observes the Sovereign Rollup Blob finalized (finality).
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant C as Clients
|
||||
participant SZ as Sovereign Zone
|
||||
box Nomos Blockchain
|
||||
participant Mempool as Nomos Mempool
|
||||
participant Cryptarchia
|
||||
end
|
||||
C->>SZ: Alice's Tx
|
||||
C->>SZ: Bob's Tx
|
||||
C->>SZ: Charlie's Tx
|
||||
SZ-->>SZ: Order, Execute and Bundle Txs into a Blob
|
||||
SZ->>Mempool: Blob Mantle Transaction
|
||||
Mempool->>Cryptarchia: Leader includes transaction in next block
|
||||
Cryptarchia-->>Cryptarchia: Block finalizes after being buried by 2160 blocks
|
||||
Cryptarchia->>C: Client observes the SR Blob finalized (finality)
|
||||
```
|
||||
|
||||
### Architecture Benefits
|
||||
|
||||
Sovereign Rollups form a virtual chain overlaid on top of the Nomos blockchain.
|
||||
This architecture allows application developers
|
||||
to easily spin up high-performance applications
|
||||
while taking advantage of the security of Nomos
|
||||
to distribute the application state widely for auditing and resilience purposes.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [NOMOS-CRYPTARCHIA-V1-PROTOCOL][cryptarchia] - Cryptarchia consensus protocol specification
|
||||
- [BEDROCK-V1.1-MANTLE-SPECIFICATION][mantle] - Mantle Transaction and Operation specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Bedrock Architecture Overview][origin-ref] - Original architecture overview document
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[cryptarchia]: ../raw/nomos-cryptarchia-v1-protocol.md
|
||||
[mantle]: ../raw/bedrock-v1.1-mantle-specification.md
|
||||
[origin-ref]: https://nomos-tech.notion.site/Bedrock-Architecture-Overview-1fd261aa09df8112918df709898a8fa3
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,446 +0,0 @@
|
||||
# BEDROCK-GENESIS-BLOCK
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock Genesis Block Specification |
|
||||
| Slug | 90 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | David Rusu <davidrusu@status.im> |
|
||||
| Contributors | Hong-Sheng Zhou, Thomas Lavaur <thomaslavaur@status.im>, Marcin Pawlowski <marcin@status.im>, Mehmet Gonen <mehmet@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/bedrock-genesis-block.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/bedrock-genesis-block.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the Genesis Block for the Bedrock chain,
|
||||
including the initial bedrock service providers, NMO token distribution,
|
||||
and protocol parameters.
|
||||
The Genesis Block is the root of trust for all subsequent protocol operations
|
||||
and must be constructed in a way that is deterministic, verifiable,
|
||||
and robust against long-range or bootstrap attacks.
|
||||
|
||||
**Keywords:** genesis block, token distribution, epoch nonce, service providers,
|
||||
Cryptarchia initialization, ledger state
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Genesis Block | The first block in the Bedrock chain establishing the initial state. |
|
||||
| NMO | The native token of the Nomos network. |
|
||||
| Epoch Nonce | The source of randomness for the Cryptarchia lottery. |
|
||||
| Service Provider | A node participating in DA or Blend network services. |
|
||||
| Ledger Transaction | A transaction that modifies the token ledger state. |
|
||||
| Mantle Transaction | A transaction containing operations and a ledger transaction. |
|
||||
|
||||
## Background
|
||||
|
||||
The block body is a single Mantle Transaction
|
||||
containing a Ledger Transaction distributing the notes to initial token holders.
|
||||
The bedrock services are initialized through `SDP_DECLARE` Operations
|
||||
embedded in the Mantle Transaction's Operations list
|
||||
and protocol initializing constants are encoded through a `CHANNEL_INSCRIBE` Operation
|
||||
also embedded in the Operations list.
|
||||
|
||||
Not all protocol constants are encoded in the Genesis block.
|
||||
The principle used to decide
|
||||
whether a value should be in the Genesis block or not
|
||||
is whether it is a value that is derived from blockchain activity
|
||||
or whether it is updated through a protocol update (hard / soft fork).
|
||||
For example, the epoch nonce is updated through normal blockchain Operations
|
||||
and therefore it should be specified in the Genesis block.
|
||||
Gas constants are only changed through protocol updates and hard forks
|
||||
and therefore they will be hardcoded in the node implementation.
|
||||
|
||||
## Genesis Block Data Structure
|
||||
|
||||
The Genesis Block is composed of the Genesis Block Header
|
||||
and the Genesis Mantle Transaction
|
||||
(there is a single transaction in the genesis block).
|
||||
The Mantle Transaction contains all information necessary
|
||||
for initializing Bedrock Services and Cryptarchia state,
|
||||
as well as distributing the initial tokens to stakeholders.
|
||||
|
||||
### Initial Token Distribution
|
||||
|
||||
Initial tokens will be distributed through a Ledger Transaction
|
||||
containing zero inputs and one output note for each initial stakeholder.
|
||||
Note that since the Ledger is transparent,
|
||||
the initial stake allocation is visible to everyone.
|
||||
Those wishing to hide their initial stake may opt to subdivide their note
|
||||
into a few different notes of equal value.
|
||||
|
||||
In order to participate in the Cryptarchia lottery,
|
||||
stakeholders must generate their note keys in accordance with
|
||||
the Proof of Leadership protocol specified at
|
||||
[Proof of Leadership Specification - Protocol][pol-protocol].
|
||||
|
||||
The initial state of the Ledger will be derived
|
||||
through normal execution of this Ledger Transaction,
|
||||
that is, each output's note ID will be added to the unspent notes set.
|
||||
|
||||
#### Initial Token Distribution Example
|
||||
|
||||
```python
|
||||
STAKE_DISTRIBUTION_TX = LedgerTx(
|
||||
inputs=[],
|
||||
outputs=[
|
||||
Note(value=1000, public_key=STAKE_HOLDER_0_PK),
|
||||
Note(value=2000, public_key=STAKE_HOLDER_1_PK),
|
||||
Note(value=1500, public_key=STAKE_HOLDER_2_PK),
|
||||
# ...
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### Initial Service Declarations
|
||||
|
||||
Data Availability (DA) and Blend Network MUST initialize their set of providers.
|
||||
This is done through a set of `SDP_DECLARE` Operations
|
||||
in the Genesis Mantle Transaction.
|
||||
|
||||
Both Blend and DA enforce a minimal network size for the service to be active.
|
||||
Thus, in order to have active Blend and DA services at Genesis,
|
||||
there MUST be at least as many declarations for each service
|
||||
in the Genesis block to meet each service's minimal network size:
|
||||
|
||||
- **DA** — [NomosDA Specification - Minimum Network Size][nomosda-min-size]
|
||||
- **Blend** — [Blend Protocol - Minimal Network Size][blend-min-size]
|
||||
|
||||
#### Initial Service Declarations Example
|
||||
|
||||
```python
|
||||
DA_DECLARATIONS = [
|
||||
Declaration(
|
||||
msg=DeclarationMessage(
|
||||
ServiceType.DA,
|
||||
["ip://1.1.1.1:3000"],
|
||||
PROVIDER_ID_0,
|
||||
ZK_ID_0
|
||||
),
|
||||
locked_note_id=STAKE_DISTRIBUTION_TX.output_note_id(0)
|
||||
),
|
||||
# ... 40 total declarations
|
||||
]
|
||||
|
||||
BLEND_DECLARATIONS = [
|
||||
Declaration(
|
||||
msg=DeclarationMessage(
|
||||
ServiceType.BLEND,
|
||||
["ip://1.1.1.1:3000"],
|
||||
PROVIDER_ID_0,
|
||||
ZK_ID_0
|
||||
),
|
||||
locked_note_id=STAKE_DISTRIBUTION_TX.output_note_id(0)
|
||||
),
|
||||
# ... 32 total declarations
|
||||
]
|
||||
|
||||
SERVICE_DECLARATIONS = DA_DECLARATIONS + BLEND_DECLARATIONS
|
||||
```
|
||||
|
||||
### Cryptarchia Parameters
|
||||
|
||||
Cryptarchia is initialized with the following parameters:
|
||||
|
||||
- `genesis_time`: ISO 8601 encoded timestamp.
|
||||
|
||||
Cryptarchia uses slots as a measure of time offset from some start time.
|
||||
This timestamp must be agreed upon by all nodes in order to have a common clock.
|
||||
|
||||
- `chain_id`: string.
|
||||
|
||||
It is useful to differentiate testnets from mainnet.
|
||||
To avoid confusion, the chain ID is placed in the Genesis block
|
||||
to guarantee that the networks are disjoint.
|
||||
|
||||
- `genesis_epoch_nonce`: 32 bytes, hex encoded.
|
||||
|
||||
The initial source of randomness for the Cryptarchia lottery.
|
||||
The process for selecting this value is described in detail
|
||||
at [Epoch Nonce Ceremony](#epoch-nonce-ceremony).
|
||||
|
||||
These parameters are encoded in the Genesis block
|
||||
as an inscription sent to the null channel.
|
||||
|
||||
#### Cryptarchia Parameters Example
|
||||
|
||||
```python
|
||||
CRYPTARCHIA_PARAMS = {
|
||||
"chain_id": "nomos-mainnet",
|
||||
"genesis_time": "2026-01-05T19:20:35Z",
|
||||
"genesis_epoch_nonce": "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
|
||||
}
|
||||
|
||||
CRYPTARCHIA_INSCRIPTION = Inscribe(
|
||||
channel=bytes(32),
|
||||
inscription=json.dumps(CRYPTARCHIA_PARAMS).encode("utf-8"),
|
||||
parent=bytes(32),
|
||||
signer=Ed25519PublicKey_ZERO,
|
||||
)
|
||||
```
|
||||
|
||||
## Epoch Nonce Ceremony
|
||||
|
||||
The initial epoch nonce value governs the Cryptarchia lottery randomness
|
||||
for the first epoch.
|
||||
It must be revealed AFTER the initial stake distribution has been frozen.
|
||||
This is done to prevent any stakeholders from gaining an unfair advantage
|
||||
from prior knowledge of the lottery randomness.
|
||||
|
||||
The protocol for generating the initial randomness nonce can be found below.
|
||||
|
||||
### Schedule Epoch Nonce Ceremony Event
|
||||
|
||||
The time of the epoch nonce ceremony must be fixed well in advance;
|
||||
let `t` denote the time of the Epoch Nonce Ceremony, broadcast `t` widely.
|
||||
|
||||
The `STAKE_DISTRIBUTION_TX` must be finalized before `t`
|
||||
to ensure a fair Cryptarchia slot lottery.
|
||||
|
||||
### Randomness Collection
|
||||
|
||||
The entropy is collected from multiple randomness sources:
|
||||
|
||||
- **Bitcoin block hash** immediately after time `t`, denoted as r₁.
|
||||
Block hash can be found on `blockchain.com`'s bitcoin block explorer,
|
||||
e.g. [blockchain.com/explorer/blocks/btc/905030][btc-block].
|
||||
|
||||
- **Ethereum block hash** immediately after time `t`, denoted as r₂.
|
||||
Block hash can be found in the `more details` section
|
||||
when viewing a block on etherscan,
|
||||
e.g. [etherscan.io/block/22894116][eth-block].
|
||||
|
||||
- **DRAND beacon value** for the round immediately after `t`, denoted as r₃.
|
||||
Use the `default` beacon, and find the round number corresponding to `t`.
|
||||
[api.drand.sh/v2/beacons/default/rounds/1234][drand-beacon].
|
||||
|
||||
[btc-block]: https://www.blockchain.com/explorer/blocks/btc/905030
|
||||
[eth-block]: https://etherscan.io/block/22894116
|
||||
[drand-beacon]: https://api.drand.sh/v2/beacons/default/rounds/1234
|
||||
|
||||
### Randomness Derivation
|
||||
|
||||
Once all above entropy contributions, i.e., r₁, r₂, r₃ are collected,
|
||||
the initial epoch randomness η_GENESIS is computed as:
|
||||
|
||||
```text
|
||||
η_GENESIS = H(r₁, r₂, r₃)
|
||||
```
|
||||
|
||||
where H is a collision-resistant zkhash function.
|
||||
|
||||
## Genesis Mantle Transaction
|
||||
|
||||
The initial stake distribution, service declarations and Cryptarchia inscription
|
||||
are components of the Genesis Mantle Transaction.
|
||||
This is the single transaction that forms the body of the Genesis block.
|
||||
|
||||
```python
|
||||
GENESIS_MANTLE_TX = MantleTx(
|
||||
ops=[CRYPTARCHIA_INSCRIPTION] + SERVICE_DECLARATIONS,
|
||||
ledger_tx=STAKE_DISTRIBUTION_TX,
|
||||
permanent_storage_gas_price=0,
|
||||
execution_gas_price=0
|
||||
)
|
||||
```
|
||||
|
||||
## Block Header Fields
|
||||
|
||||
The Genesis Block header fields are set to the following values:
|
||||
|
||||
- `bedrock_version`: Protocol version (e.g., 1).
|
||||
- `parent_block`: 0 (as this is the first block).
|
||||
- `slot`: 0 (the Genesis slot).
|
||||
- `block_root`: Block Merkle root over the (single) initial transaction.
|
||||
- `proof_of_leadership`: Stubbed leadership proof.
|
||||
- `leader_voucher`: 0 (as there is no leader block reward for the initial block).
|
||||
- `entropy_contribution`: 0 (no entropy is provided through the initial PoL).
|
||||
- `proof`: Null Groth16Proof, all values are set to zero.
|
||||
- `leader_key`: Null PublicKey.
|
||||
|
||||
### Block Header Fields Example
|
||||
|
||||
```python
|
||||
GENESIS_HEADER = Header(
|
||||
bedrock_version=1,
|
||||
parent_block=0,
|
||||
slot=0,
|
||||
block_root=block_merkle_root([GENESIS_MANTLE_TX]),
|
||||
proof_of_leadership=ProofOfLeadership(
|
||||
leader_voucher=bytes(32),
|
||||
entropy_contribution=bytes(32),
|
||||
proof=Groth16Proof(G1_ZERO, G2_ZERO, G1_ZERO),
|
||||
leader_key=Ed25519PublicKey_ZERO,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Sample Genesis Block
|
||||
|
||||
```python
|
||||
# distribute NMO to all stakeholders
|
||||
STAKE_DISTRIBUTION_TX = LedgerTx(
|
||||
inputs=[],
|
||||
outputs=[
|
||||
Note(value=1000, public_key=STAKE_HOLDER_0_PK),
|
||||
Note(value=2000, public_key=STAKE_HOLDER_1_PK),
|
||||
Note(value=1500, public_key=STAKE_HOLDER_2_PK),
|
||||
# ...
|
||||
]
|
||||
)
|
||||
|
||||
# set Cryptarchia parameters
|
||||
CRYPTARCHIA_PARAMS = {
|
||||
"chain_id": "nomos-mainnet",
|
||||
"genesis_time": "2026-01-05T19:20:35Z",
|
||||
"genesis_epoch_nonce": "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
|
||||
}
|
||||
|
||||
CRYPTARCHIA_INSCRIPTION = Inscribe(
|
||||
channel=bytes(32),
|
||||
inscription=json.dumps(CRYPTARCHIA_PARAMS).encode("utf-8"),
|
||||
parent=bytes(32),
|
||||
signer=Ed25519PublicKey_ZERO,
|
||||
)
|
||||
|
||||
# service declarations
|
||||
DA_DECLARATIONS = [
|
||||
Declaration(
|
||||
msg=DeclarationMessage(ServiceType.DA, ["ip://1.1.1.1:3000"], PROVIDER_ID_0, ZK_ID_0),
|
||||
locked_note_id=STAKE_DISTRIBUTION_TX.output_note_id(0)
|
||||
),
|
||||
# ... more declarations
|
||||
]
|
||||
|
||||
BLEND_DECLARATIONS = [
|
||||
Declaration(
|
||||
msg=DeclarationMessage(ServiceType.BLEND, ["ip://1.1.1.1:3000"], PROVIDER_ID_0, ZK_ID_0),
|
||||
locked_note_id=STAKE_DISTRIBUTION_TX.output_note_id(0)
|
||||
),
|
||||
# ... more declarations
|
||||
]
|
||||
|
||||
SERVICE_DECLARATIONS = DA_DECLARATIONS + BLEND_DECLARATIONS
|
||||
|
||||
# build the genesis Mantle Transaction
|
||||
GENESIS_MANTLE_TX = MantleTx(
|
||||
ops=[CRYPTARCHIA_INSCRIPTION] + SERVICE_DECLARATIONS,
|
||||
ledger_tx=STAKE_DISTRIBUTION_TX,
|
||||
gas_price=0,
|
||||
)
|
||||
|
||||
GENESIS_HEADER = Header(
|
||||
bedrock_version=1,
|
||||
parent_block=bytes(32),
|
||||
slot=0,
|
||||
block_root=block_merkle_root([GENESIS_MANTLE_TX]),
|
||||
proof_of_leadership=ProofOfLeadership(
|
||||
leader_voucher=bytes(32),
|
||||
entropy_contribution=bytes(32),
|
||||
proof=Groth16Proof(G1.ZERO, G2.ZERO, G1.ZERO),
|
||||
leader_key=Ed25519PublicKey_ZERO,
|
||||
)
|
||||
)
|
||||
|
||||
GENESIS_BLOCK = (GENESIS_HEADER, [GENESIS_MANTLE_TX])
|
||||
```
|
||||
|
||||
## Initializing Bedrock
|
||||
|
||||
Bedrock is initialized by executing the Mantle Transaction
|
||||
without validating the Ledger Transaction and Mantle Operations.
|
||||
No validation or execution is done for the Genesis block header;
|
||||
in particular, processing of `proof_of_leadership` is skipped.
|
||||
|
||||
### Mantle Ledger Initialization
|
||||
|
||||
The Ledger Transaction should be executed
|
||||
without checking that the transaction is balanced.
|
||||
However, other validations are checked,
|
||||
e.g. that output note values are positive and smaller than the maximum allowed value.
|
||||
The result of normal transaction execution adds all transaction outputs to the Ledger.
|
||||
|
||||
### Cryptarchia Initialization
|
||||
|
||||
The Mantle Transaction contains an inscription sent to the null channel
|
||||
containing the parameters for initializing Cryptarchia.
|
||||
|
||||
The Cryptarchia slot clock is initialized to `genesis_time`,
|
||||
`LIB` is set to the Genesis block and the epoch state is then initialized:
|
||||
|
||||
#### Initial Epoch State
|
||||
|
||||
Cryptarchia progresses in epochs
|
||||
where the variables governing the lottery are fixed for the duration of an epoch
|
||||
and the activity during that epoch is used
|
||||
to derive the values of those variables for the next epoch.
|
||||
These variables taken together are called the Epoch State.
|
||||
(see [Cryptarchia v1 Protocol Specification - Epoch State][cryptarchia-epoch-state]).
|
||||
|
||||
To initialize the Epoch State, the epoch variables are derived from the genesis block.
|
||||
|
||||
1. η: the epoch nonce is taken directly from the `genesis_epoch_nonce`.
|
||||
|
||||
1. C_LEAD: Eligible leader commitment is set to the Ledger Root
|
||||
over all notes from the initial token distribution.
|
||||
The derivation of this root is specified in
|
||||
[Proof of Leadership Specification - Ledger Root][pol-ledger-root].
|
||||
|
||||
1. D: The initial estimate of total stake
|
||||
will be the total tokens distributed at genesis.
|
||||
|
||||
### Bedrock Services Initialization
|
||||
|
||||
DA and Blend network are initialized through normal Mantle Transaction execution.
|
||||
The `SDP_DECLARE` Operations in the Genesis Mantle Transaction
|
||||
will create the initial set of providers in each service.
|
||||
|
||||
During normal operations, DA/Blend services would wait until a block is deep enough
|
||||
to be finalized,
|
||||
but for the Genesis block, it is considered finalized by definition
|
||||
and so DA/Blend will immediately use the provider set
|
||||
without the usual finalization delay.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Proof of Leadership Specification][pol-protocol] - Protocol for generating note keys
|
||||
- [NomosDA Specification][nomosda-min-size] - Minimum Network Size requirements
|
||||
- [Blend Protocol][blend-min-size] - Minimal Network Size requirements
|
||||
- [Cryptarchia v1 Protocol Specification][cryptarchia-epoch-state] - Epoch State specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Bedrock Genesis Block][origin-ref] - Original specification document
|
||||
- [Ouroboros Praos](https://eprint.iacr.org/2017/573.pdf) - Ouroboros Praos protocol
|
||||
- [Ouroboros Genesis](https://eprint.iacr.org/2018/378.pdf) - Ouroboros Genesis protocol
|
||||
- [Ouroboros Crypsinous](https://eprint.iacr.org/2018/1132.pdf) - Ouroboros Crypsinous protocol
|
||||
- [Cardano Shelley Genesis File Format](https://cardanocourse.gitbook.io/cardano-course/handbook/protocol-parameters-and-configuration-files/shelley-genesis-file) - Cardano genesis file format
|
||||
- [Cardano CIP-16 Key Serialisation](https://cips.cardano.org/cip/CIP-16) - Cardano key serialisation
|
||||
|
||||
[pol-protocol]: https://nomos-tech.notion.site/Proof-of-Leadership-Specification
|
||||
[pol-ledger-root]: https://nomos-tech.notion.site/Proof-of-Leadership-Specification
|
||||
[nomosda-min-size]: https://nomos-tech.notion.site/NomosDA-Specification
|
||||
[blend-min-size]: https://nomos-tech.notion.site/Blend-Protocol
|
||||
[cryptarchia-epoch-state]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification
|
||||
[origin-ref]: https://nomos-tech.notion.site/Bedrock-Genesis-Block-21d261aa09df80bb8dc3c768802eb527
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,572 +0,0 @@
|
||||
# BEDROCK-SERVICE-DECLARATION-PROTOCOL
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock Service Declaration Protocol |
|
||||
| Slug | 87 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski <marcin@status.im> |
|
||||
| Contributors | Mehmet Gonen <mehmet@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Thomas Lavaur <thomaslavaur@status.im>, Gusto Bacvinka <augustinas@status.im>, David Rusu <davidrusu@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/bedrock-service-declaration-protocol.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/bedrock-service-declaration-protocol.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the Service Declaration Protocol (SDP),
|
||||
a mechanism enabling validators to declare their participation
|
||||
in specific protocols that require a known and agreed-upon list of participants.
|
||||
Examples include Data Availability (DA) and the Blend Network.
|
||||
SDP creates a single repository of identifiers
|
||||
used to establish secure communication between validators and provide services.
|
||||
Before being admitted to the repository,
|
||||
a validator proves that it has locked at least a minimum stake.
|
||||
|
||||
**Keywords:** service declaration, validator, stake, declaration, withdrawal,
|
||||
session, minimum stake, provider, locator, Blend Network, Data Availability
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Definitions
|
||||
|
||||
<!-- markdownlint-disable MD013 -->
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| SDP | Service Declaration Protocol for node participation in Nomos Services. |
|
||||
| Declaration | A message confirming a validator's willingness to provide a specific service. |
|
||||
| Service Type | The type of service being declared (e.g., BN for Blend Network, DA for Data Availability). |
|
||||
| Minimum Stake | The minimum amount of stake a node MUST lock to declare for a service. |
|
||||
| Session | A fixed-length window defined per service via `session_length`. |
|
||||
| Lock Period | The minimum time during which a declaration cannot be withdrawn. |
|
||||
| Inactivity Period | The maximum time during which an activation message MUST be sent. |
|
||||
| Retention Period | The time after which a declaration can be safely deleted. |
|
||||
| Provider ID | An Ed25519 public key used to sign SDP messages and establish secure links. |
|
||||
| ZK ID | A public key used for zero-knowledge operations including rewarding. |
|
||||
| Locator | The network address of a validator following the multiaddr scheme. |
|
||||
| Declaration ID | A unique identifier for a declaration, computed as a hash. |
|
||||
|
||||
<!-- markdownlint-enable MD013 -->
|
||||
|
||||
## Background
|
||||
|
||||
In many protocols, a known and agreed-upon list of participants is required.
|
||||
Examples include Data Availability and the Blend Network.
|
||||
SDP enables nodes to declare their eligibility to serve a specific service
|
||||
and withdraw their declarations.
|
||||
|
||||
### Requirements
|
||||
|
||||
The protocol requirements are:
|
||||
|
||||
- A declaration MUST be backed by confirmation
|
||||
that the sender owns a certain value of stake.
|
||||
- A declaration is valid until it is withdrawn
|
||||
or is not used for a service-specific amount of time.
|
||||
|
||||
### Actions Overview
|
||||
|
||||
The protocol defines the following actions:
|
||||
|
||||
- **Declare**: A node sends a declaration confirming its willingness
|
||||
to provide a specific service, backed by locking a threshold of stake.
|
||||
- **Active**: A node marks that its participation in the protocol is active
|
||||
according to the service-specific activity logic.
|
||||
This action enables the protocol to monitor the node's activity.
|
||||
It is crucial to exclude inactive nodes from the set of active nodes,
|
||||
as it enhances the stability of services.
|
||||
- **Withdraw**: A node withdraws its declaration and stops providing a service.
|
||||
|
||||
### Protocol Flow
|
||||
|
||||
1. A node sends a declaration message for a specific service
|
||||
and proves it has a minimum stake.
|
||||
|
||||
1. The declaration is registered on the blockchain ledger,
|
||||
and the node can commence its service
|
||||
according to the service-specific logic.
|
||||
|
||||
1. After a service-specific service-providing time, the node confirms its activity.
|
||||
|
||||
1. The node MUST confirm its activity with a service-specific minimum frequency;
|
||||
otherwise, its declaration is inactive.
|
||||
|
||||
1. After the service-specific locking period,
|
||||
the node can send a withdrawal message,
|
||||
and its declaration is removed from the blockchain ledger
|
||||
(after the necessary retention period),
|
||||
meaning the node will no longer provide the service.
|
||||
|
||||
> **Note**: Protocol messages are subject to finality,
|
||||
> meaning messages become part of the immutable ledger after a delay.
|
||||
> The delay is defined by the consensus.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Service Types
|
||||
|
||||
The following services are defined for service declaration:
|
||||
|
||||
- `BN`: Blend Network service.
|
||||
- `DA`: Data Availability service.
|
||||
|
||||
```python
|
||||
class ServiceType(Enum):
|
||||
BN = "BN" # Blend Network
|
||||
DA = "DA" # Data Availability
|
||||
```
|
||||
|
||||
A declaration can be generated for any of the services above.
|
||||
Any declaration that is not one of the above MUST be rejected.
|
||||
The number of services MAY grow in the future.
|
||||
|
||||
### Minimum Stake
|
||||
|
||||
The minimum stake is a global value
|
||||
defining the minimum stake a node MUST have to perform any service.
|
||||
|
||||
The `MinStake` structure holds the value of the stake `stake_threshold`
|
||||
and the block number at which it was set (`timestamp`).
|
||||
|
||||
```python
|
||||
class MinStake:
|
||||
stake_threshold: StakeThreshold
|
||||
timestamp: BlockNumber
|
||||
```
|
||||
|
||||
The `stake_thresholds` structure aggregates all defined `MinStake` values:
|
||||
|
||||
```python
|
||||
stake_thresholds: list[MinStake]
|
||||
```
|
||||
|
||||
### Service Parameters
|
||||
|
||||
The service parameters structure defines the parameters necessary
|
||||
for handling interaction between the protocol and services.
|
||||
Each service type MUST be mapped to the following parameters:
|
||||
|
||||
- `session_length`: The session length expressed as the number of blocks.
|
||||
Sessions are counted from block `timestamp`.
|
||||
- `lock_period`: The minimum time (as a number of sessions)
|
||||
during which the declaration cannot be withdrawn.
|
||||
This time MUST include the period necessary for finalizing the declaration
|
||||
and provision of a service for at least a single session.
|
||||
It can be expressed as blocks by multiplying by `session_length`.
|
||||
- `inactivity_period`: The maximum time (as a number of sessions)
|
||||
during which an activation message MUST be sent;
|
||||
otherwise, the declaration is considered inactive.
|
||||
It can be expressed as blocks by multiplying by `session_length`.
|
||||
- `retention_period`: The time (as a number of sessions)
|
||||
after which the declaration can be safely deleted
|
||||
by the Garbage Collection mechanism.
|
||||
It can be expressed as blocks by multiplying by `session_length`.
|
||||
- `timestamp`: The block number at which the parameter was set.
|
||||
|
||||
```python
|
||||
class ServiceParameters:
|
||||
session_length: NumberOfBlocks
|
||||
lock_period: NumberOfSessions
|
||||
inactivity_period: NumberOfSessions
|
||||
retention_period: NumberOfSessions
|
||||
timestamp: BlockNumber
|
||||
```
|
||||
|
||||
The `parameters` structure aggregates all defined `ServiceParameters` values:
|
||||
|
||||
```python
|
||||
parameters: list[ServiceParameters]
|
||||
```
|
||||
|
||||
### Session Tracking
|
||||
|
||||
A session is a fixed-length window defined per service
|
||||
via `ServiceParameters.session_length`.
|
||||
The session length MUST be at least `k`, the consensus finality parameter.
|
||||
|
||||
Session numbers start at 0 and are computed as follows:
|
||||
|
||||
```python
|
||||
def get_session_number(current_block_number, service_parameters):
|
||||
return current_block_number // service_parameters.session_length
|
||||
```
|
||||
|
||||
At the start of session `n`,
|
||||
each node takes a snapshot (`get_snapshot_at_block`) of the SDP registry
|
||||
at a specified block height from the finalized part of the chain:
|
||||
|
||||
```python
|
||||
def get_session_snapshot(session_number, service_parameters):
|
||||
if session_number < 2:
|
||||
# Take the genesis block for the first two sessions
|
||||
return get_snapshot_at_block(0)
|
||||
# Take the last block of the previous session for the rest
|
||||
return get_snapshot_at_block(
|
||||
(session_number - 1) * service_parameters.session_length - 1
|
||||
)
|
||||
```
|
||||
|
||||
The function `get_snapshot_at_block(block_number)` returns the state
|
||||
of the SDP registry at `block_number`,
|
||||
including state changes made by that block.
|
||||
This snapshot defines the declaration state for the session—
|
||||
each snapshot updates the common view of the registry.
|
||||
Changes to the declaration registry take effect with a one-session delay:
|
||||
messages sent during session `n` are included
|
||||
in the next snapshot (for session `n+1`).
|
||||
|
||||
Sessions 0 and 1 read the snapshot at block 0,
|
||||
because the chain has not yet progressed far enough
|
||||
to provide a later finalized block.
|
||||
|
||||
### Identifiers
|
||||
|
||||
The following identifiers are used for service-specific cryptographic operations:
|
||||
|
||||
- `provider_id`: Used to sign SDP messages
|
||||
and establish secure links between validators.
|
||||
It is an `Ed25519PublicKey`.
|
||||
- `zk_id`: Used for zero-knowledge operations by the validator,
|
||||
including rewarding (Zero Knowledge Signature Scheme).
|
||||
|
||||
### Locators
|
||||
|
||||
A `Locator` is the address of a validator
|
||||
used to establish secure communication between validators.
|
||||
It follows the multiaddr addressing scheme from libp2p,
|
||||
but it MUST contain only the location part
|
||||
and MUST NOT contain the node identity (`peer_id`).
|
||||
|
||||
The `provider_id` MUST be used as the node identity.
|
||||
Therefore, the `Locator` MUST be completed
|
||||
by adding the `provider_id` at the end of it,
|
||||
making the `Locator` usable in the context of libp2p.
|
||||
|
||||
The length of the `Locator` is restricted to 329 characters.
|
||||
|
||||
The syntax of every `Locator` entry MUST be validated.
|
||||
|
||||
Common formatting of every `Locator` MUST be applied
|
||||
to maintain its unambiguity and make deterministic ID generation work consistently.
|
||||
The `Locator` MUST at least contain only lowercase letters
|
||||
and every part of the address MUST be explicit (no implicit defaults).
|
||||
|
||||
### Declaration Message
|
||||
|
||||
The construction of the declaration message is as follows:
|
||||
|
||||
```python
|
||||
class DeclarationMessage:
|
||||
service_type: ServiceType
|
||||
locators: list[Locator]
|
||||
provider_id: Ed25519PublicKey
|
||||
locked_note_id: NoteId
|
||||
zk_id: ZkPublicKey
|
||||
```
|
||||
|
||||
The `locators` list length MUST be limited to reduce the potential for abuse.
|
||||
The length of the list MUST NOT be longer than 8.
|
||||
|
||||
The message MUST be signed by the `provider_id` key
|
||||
to prove ownership of the key used for network-level authentication.
|
||||
|
||||
The `locked_note_id` points to a locked note
|
||||
used for minimum stake threshold verification purposes.
|
||||
|
||||
The message MUST also be signed by the `zk_id` key.
|
||||
|
||||
### Declaration Storage
|
||||
|
||||
Only valid declaration messages can be stored on the blockchain ledger.
|
||||
The `DeclarationInfo` structure is defined as follows:
|
||||
|
||||
```python
|
||||
class DeclarationInfo:
|
||||
service: ServiceType
|
||||
provider_id: Ed25519PublicKey
|
||||
locked_note_id: NoteId
|
||||
zk_id: ZkPublicKey
|
||||
locators: list[Locator]
|
||||
created: BlockNumber
|
||||
active: BlockNumber
|
||||
withdrawn: BlockNumber
|
||||
nonce: Nonce
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `service`: The service type of the declaration.
|
||||
- `provider_id`: An `Ed25519PublicKey` used to sign the message by the validator.
|
||||
- `locked_note_id`: A `NoteId` used for minimum stake threshold verification.
|
||||
- `zk_id`: Used for zero-knowledge operations including rewarding.
|
||||
- `locators`: A copy of the locators from the `DeclarationMessage`.
|
||||
- `created`: The block number of the block that contained the declaration.
|
||||
- `active`: The latest block number for which the active message was sent
|
||||
(set to `created` by default).
|
||||
- `withdrawn`: The block number for which the service declaration was withdrawn
|
||||
(set to 0 by default).
|
||||
- `nonce`: MUST be set to 0 for the declaration message
|
||||
and MUST increase monotonically by every message sent for the `declaration_id`.
|
||||
|
||||
The `declaration_id` (of type `DeclarationId`)
|
||||
is the unique identifier of `DeclarationInfo`,
|
||||
calculated as a hash of the concatenation of
|
||||
`service`, `provider_id`, `zk_id`, and `locators`.
|
||||
The hash function implementation is blake2b using 256 bits of output:
|
||||
|
||||
```python
|
||||
declaration_id = Hash(service || provider_id || zk_id || locators)
|
||||
```
|
||||
|
||||
The `declaration_id` is not stored as part of `DeclarationInfo`
|
||||
but is used to index it.
|
||||
All `DeclarationInfo` references are stored in `declarations`
|
||||
and are indexed by `declaration_id`:
|
||||
|
||||
```python
|
||||
declarations: list[declaration_id]
|
||||
```
|
||||
|
||||
### Active Message
|
||||
|
||||
The construction of the active message is as follows:
|
||||
|
||||
```python
|
||||
class ActiveMessage:
|
||||
declaration_id: DeclarationId
|
||||
nonce: Nonce
|
||||
metadata: Metadata
|
||||
```
|
||||
|
||||
Where `metadata` is service-specific node activeness metadata.
|
||||
|
||||
The message MUST be signed by the `zk_id` key
|
||||
associated with the `declaration_id`.
|
||||
|
||||
The `nonce` MUST increase monotonically
|
||||
by every message sent for the `declaration_id`.
|
||||
|
||||
### Withdraw Message
|
||||
|
||||
The construction of the withdraw message is as follows:
|
||||
|
||||
```python
|
||||
class WithdrawMessage:
|
||||
declaration_id: DeclarationId
|
||||
locked_note_id: NoteId
|
||||
nonce: Nonce
|
||||
```
|
||||
|
||||
The message MUST be signed by the `zk_id` key from the `declaration_id`.
|
||||
|
||||
The `locked_note_id` is a `NoteId`
|
||||
that was used for minimum stake threshold verification purposes
|
||||
and will be unlocked after withdrawal.
|
||||
|
||||
The `nonce` MUST increase monotonically
|
||||
by every message sent for the `declaration_id`.
|
||||
|
||||
### Indexing
|
||||
|
||||
Every event MUST be correctly indexed
|
||||
to enable lighter synchronization of the changes.
|
||||
Events are indexed by `EventType`, `ServiceType`, and `Timestamp`,
|
||||
where `EventType = { "created", "active", "withdrawn" }`
|
||||
corresponds to the type of message:
|
||||
|
||||
```python
|
||||
events = {
|
||||
event_type: {
|
||||
service_type: {
|
||||
timestamp: {
|
||||
declarations: list[declaration_id]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Protocol Actions
|
||||
|
||||
### Declare
|
||||
|
||||
The Declare action associates a validator with a service it wants to provide.
|
||||
It requires sending a valid `DeclarationMessage`,
|
||||
which is then processed and stored.
|
||||
|
||||
The declaration message is considered valid when all of the following are met:
|
||||
|
||||
- The sender meets the stake requirements and its `locked_note_id` is valid.
|
||||
- The `declaration_id` is unique.
|
||||
- The sender knows the secret behind the `provider_id` identifier.
|
||||
- The length of the `locators` list MUST NOT be longer than 8.
|
||||
- The `nonce` increases monotonically.
|
||||
|
||||
If all conditions are fulfilled,
|
||||
the message is stored on the blockchain ledger;
|
||||
otherwise, the message is discarded.
|
||||
|
||||
### Active
|
||||
|
||||
The Active action enables marking the provider as actively providing a service.
|
||||
It requires sending a valid `ActiveMessage`,
|
||||
which is relayed to the service-specific node activity logic.
|
||||
|
||||
The Active action updates the `active` value of the `DeclarationInfo`,
|
||||
which also activates inactive (but not expired) providers.
|
||||
|
||||
The SDP active action logic is:
|
||||
|
||||
1. A node sends an `ActiveMessage` transaction.
|
||||
|
||||
1. The `ActiveMessage` is verified by the SDP logic:
|
||||
1. The `declaration_id` returns an existing `DeclarationInfo`.
|
||||
1. The transaction containing `ActiveMessage` is signed by the `zk_id`.
|
||||
1. The `withdrawn` from the `DeclarationInfo` is set to zero.
|
||||
1. The `nonce` increases monotonically.
|
||||
|
||||
1. If any of these conditions fail, discard the message and stop processing.
|
||||
|
||||
1. The message is processed by the service-specific activity logic
|
||||
alongside the `active` value indicating the period
|
||||
since the last active message was sent.
|
||||
The `active` value comes from the `DeclarationInfo`.
|
||||
|
||||
1. If the service-specific activity logic approves the node active message,
|
||||
then the `active` field of the `DeclarationInfo`
|
||||
is set to the current block height.
|
||||
|
||||
### Withdraw
|
||||
|
||||
The Withdraw action enables withdrawal of a service declaration.
|
||||
It requires sending a valid `WithdrawMessage`.
|
||||
The withdrawal cannot happen before the end of the locking period,
|
||||
defined as the number of blocks counted since `created`.
|
||||
This lock period is stored as `lock_period` in the Service Parameters.
|
||||
|
||||
The logic of the withdraw action is:
|
||||
|
||||
1. A node sends a `WithdrawMessage` transaction.
|
||||
|
||||
1. The `WithdrawMessage` is verified by the SDP logic:
|
||||
1. The `declaration_id` returns an existing `DeclarationInfo`.
|
||||
1. The transaction containing `WithdrawMessage` is signed by the `zk_id`.
|
||||
1. The `withdrawn` from `DeclarationInfo` is set to zero.
|
||||
1. The `nonce` increases monotonically.
|
||||
|
||||
1. If any of the above is not correct, discard the message and stop.
|
||||
|
||||
1. Set the `withdrawn` from the `DeclarationInfo` to the current block height.
|
||||
|
||||
1. Unlock the stake (release the `locked_note_id`).
|
||||
|
||||
### Garbage Collection
|
||||
|
||||
The protocol requires a garbage collection mechanism
|
||||
that periodically removes unused `DeclarationInfo` entries.
|
||||
|
||||
The logic of garbage collection is:
|
||||
|
||||
For every `DeclarationInfo` in the `declarations` set,
|
||||
remove the entry if either:
|
||||
|
||||
1. The entry is past the retention period:
|
||||
`withdrawn + (retention_period * session_length) < current_block_height`.
|
||||
|
||||
1. The entry is inactive beyond the inactivity and retention periods:
|
||||
`active + (inactivity_period + retention_period) * session_length < current_block_height`.
|
||||
|
||||
### Query Interface
|
||||
|
||||
The protocol MUST enable querying the blockchain ledger
|
||||
with at least the following queries:
|
||||
|
||||
- `GetAllProviderId(timestamp)`:
|
||||
Returns all `provider_id`s associated with the timestamp.
|
||||
- `GetAllProviderIdSince(timestamp)`:
|
||||
Returns all `provider_id`s since the timestamp.
|
||||
- `GetAllDeclarationInfo(timestamp)`:
|
||||
Returns all `DeclarationInfo` entries associated with the timestamp.
|
||||
- `GetAllDeclarationInfoSince(timestamp)`:
|
||||
Returns all `DeclarationInfo` entries since the timestamp.
|
||||
- `GetDeclarationInfo(provider_id)`:
|
||||
Returns the `DeclarationInfo` entry identified by the `provider_id`.
|
||||
- `GetDeclarationInfo(declaration_id)`:
|
||||
Returns the `DeclarationInfo` entry identified by the `declaration_id`.
|
||||
- `GetAllServiceParameters(timestamp)`:
|
||||
Returns all entries of the `ServiceParameters` store for the timestamp.
|
||||
- `GetAllServiceParametersSince(timestamp)`:
|
||||
Returns all entries of the `ServiceParameters` store since the timestamp.
|
||||
- `GetServiceParameters(service_type, timestamp)`:
|
||||
Returns the service parameter entry for a `service_type` at a timestamp.
|
||||
- `GetMinStake(timestamp)`:
|
||||
Returns the `MinStake` structure at the requested timestamp.
|
||||
- `GetMinStakeSince(timestamp)`:
|
||||
Returns a set of `MinStake` structures since the requested timestamp.
|
||||
|
||||
The query MUST return an error
|
||||
if the retention period for the declaration has passed
|
||||
and the requested information is not available.
|
||||
|
||||
The list of queries MAY be extended.
|
||||
|
||||
Every query MUST return information for a finalized state only.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Stake Requirements
|
||||
|
||||
Validators MUST lock a minimum stake before declaring for a service.
|
||||
This prevents Sybil attacks
|
||||
by ensuring economic commitment to the network.
|
||||
|
||||
### Message Authentication
|
||||
|
||||
All SDP messages MUST be cryptographically signed:
|
||||
|
||||
- `DeclarationMessage` MUST be signed by both `provider_id` and `zk_id`.
|
||||
- `ActiveMessage` MUST be signed by `zk_id`.
|
||||
- `WithdrawMessage` MUST be signed by `zk_id`.
|
||||
|
||||
### Nonce Monotonicity
|
||||
|
||||
The `nonce` MUST increase monotonically for each `declaration_id`
|
||||
to prevent replay attacks.
|
||||
|
||||
### Locator Validation
|
||||
|
||||
The syntax of every `Locator` entry MUST be validated
|
||||
to prevent malformed addresses from being registered.
|
||||
The length restriction of 329 characters
|
||||
and the limit of 8 locators per declaration
|
||||
prevent resource exhaustion attacks.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [BEDROCK-MANTLE-SPECIFICATION][mantle] - Mantle Transaction and Operation specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Service Declaration Protocol][origin-ref] - Original specification document
|
||||
- [libp2p multiaddr][multiaddr] - Multiaddr addressing scheme
|
||||
|
||||
[mantle]: https://nomos-tech.notion.site/v1-1-Mantle-Specification-269261aa09df80dda501f568697930fd
|
||||
[origin-ref]: https://nomos-tech.notion.site/Service-Declaration-Protocol-1fd261aa09df819ca9f8eb2bdfd4ec1d
|
||||
[multiaddr]: https://github.com/multiformats/multiaddr
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,185 +0,0 @@
|
||||
# BEDROCK-SERVICE-REWARD-DISTRIBUTION
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock v1.2 Service Reward Distribution Protocol |
|
||||
| Slug | 86 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Thomas Lavaur <thomaslavaur@status.im> |
|
||||
| Contributors | David Rusu <davidrusu@status.im>, Mehmet Gonen <mehmet@status.im>, Marcin Pawlowski <marcin@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/bedrock-service-reward-distribution.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/bedrock-service-reward-distribution.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the Service Reward Distribution Protocol
|
||||
for distributing rewards to validators based on their participation
|
||||
in Nomos services such as Data Availability and Blend Network.
|
||||
The protocol enables deterministic, efficient, and verifiable reward distribution
|
||||
to validators based on their activity within each service.
|
||||
|
||||
**Keywords:** Bedrock, rewards, services, validators, Data Availability,
|
||||
Blend Network, session, activity
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Session | A fixed number of blocks during which the validator set remains unchanged. |
|
||||
| Service Validator | A node participating in a service (DA or Blend Network). |
|
||||
| Activity Message | A signed message attesting to a validator's participation. |
|
||||
| zk_id | The zero-knowledge identity of a validator from SDP declarations. |
|
||||
| SDP_ACTIVE | A Mantle Operation used to submit activity attestations. |
|
||||
|
||||
## Background
|
||||
|
||||
Nomos relies on multiple services,
|
||||
including the Data Availability and Blend Network -
|
||||
each operated by independent validator sets.
|
||||
For sustainability and fairness,
|
||||
these services must compensate service validators based on their participation.
|
||||
Validators first declare their participation through
|
||||
[Service Declaration Protocol][sdp].
|
||||
|
||||
Each service defines:
|
||||
|
||||
- The session length, a fixed number of blocks
|
||||
during which its validator set remains unchanged.
|
||||
- The validator activity rule that distinguishes
|
||||
between active and inactive validators.
|
||||
- The reward formula for distributing the session's rewards
|
||||
at the end of the session.
|
||||
|
||||
The protocol unfolds over three key phases, aligned with validator sessions:
|
||||
|
||||
1. **Service Activity Tracking** (Session N+1):
|
||||
Service validators submit signed activity messages
|
||||
to attest to their participation of session N through a Mantle Transaction,
|
||||
including an activity message
|
||||
(see [Mantle Specification - SDP_ACTIVE][mantle-sdp-active]).
|
||||
|
||||
1. **Service Reward Derivation** (End of Session N+1):
|
||||
Nodes compute each validator's reward based on validated activity messages
|
||||
and the different service reward policies.
|
||||
|
||||
1. **Service Reward Distribution** (First block of session N+2):
|
||||
Rewards are distributed to validators marked as active for the service.
|
||||
This is done by inserting new notes in the ledger
|
||||
corresponding to the reward amount for each active validator.
|
||||
|
||||
**Core Properties:**
|
||||
|
||||
- Service rewards are distributed to the `zk_id` from validator SDP declarations.
|
||||
- Minimal Block Overhead: rewards are directly added to the ledger
|
||||
without involving Mantle Transactions.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Sessions
|
||||
|
||||
Each service defines its own session length (e.g., 10000 blocks), during which:
|
||||
|
||||
- The service validator set remains static.
|
||||
- Activity criteria and reward policy are fixed.
|
||||
|
||||
### Activity Tracking
|
||||
|
||||
Throughout session N+1, the block proposers integrate Mantle Transactions
|
||||
containing `SDP_ACTIVE` Operations.
|
||||
These transactions originate from service validators
|
||||
and are used to derive their activity
|
||||
according to the service provided policy.
|
||||
The protocol does not prescribe a unique activity rule:
|
||||
each service defines what qualifies as valid participation,
|
||||
enabling flexibility across different services.
|
||||
|
||||
Service validators are economically incentivized to participate actively
|
||||
since only active validators will be rewarded.
|
||||
Moreover, by decoupling activity submission from reward calculation,
|
||||
the system remains robust to network latency.
|
||||
|
||||
This generalized mechanism accommodates a wide range of services
|
||||
without requiring specialized infrastructure.
|
||||
It enables services to evolve their own activity rules independently
|
||||
while preserving a shared framework for reward distribution.
|
||||
|
||||
### Service Reward Calculation
|
||||
|
||||
At the end of session N+1,
|
||||
service rewards for the validator `n` for the session N
|
||||
are computed by the different services
|
||||
taking as input the rewards of the session:
|
||||
|
||||
```text
|
||||
Rewards^n := serviceReward(n, Rewards_Session)
|
||||
```
|
||||
|
||||
Where `Rewards_Session` are the total rewards of session N.
|
||||
The `Rewards_Session` is determined by the service,
|
||||
which calculates how much each service receives
|
||||
based on fees burnt during session N and the blockchain's state.
|
||||
`Rewards^n` is stored as an array that maps each validator's `zk_id`
|
||||
to their allocated reward.
|
||||
|
||||
### Service Reward Distribution
|
||||
|
||||
Starting immediately after session N+1,
|
||||
service rewards are distributed in the first block of session N+2.
|
||||
The rewards are inserted directly in the ledger
|
||||
without triggering any Mantle validation.
|
||||
|
||||
The note ID is computed using the result of
|
||||
`zkhash(FiniteField(ServiceType, byte_order="little", modulus=p) || session_number)`
|
||||
as the transaction hash.
|
||||
The output number corresponds to the position of the `zk_id`
|
||||
when sorted in ascending order.
|
||||
|
||||
The reward MUST:
|
||||
|
||||
- Transfer the correct reward amount
|
||||
according to [Service Reward Calculation](#service-reward-calculation).
|
||||
- Be sent to the public key `zk_id` of the validator
|
||||
registered during declaration of the service.
|
||||
- Be distributed into a single note if several rewards share the same `zk_id`.
|
||||
- Be executed identically by every node processing the first block of session N+2.
|
||||
This happens by inserting notes in the ledger in ascending order of `zk_id`.
|
||||
|
||||
Nodes indirectly verify the correct inclusion of rewards
|
||||
because all consensus-validating nodes must maintain the same ledger view
|
||||
to derive the latest ledger root,
|
||||
which serves as input for verifying the [Proof of Leadership][pol].
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Service Declaration Protocol][sdp] - Protocol for declaring service participation
|
||||
- [Mantle Specification][mantle-sdp-active] - SDP_ACTIVE operation specification
|
||||
- [Proof of Leadership][pol] - Proof of Leadership specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [v1.2 Service Reward Distribution Protocol][origin-ref] - Original specification document
|
||||
|
||||
[sdp]: https://nomos-tech.notion.site/Service-Declaration-Protocol
|
||||
[mantle-sdp-active]: https://nomos-tech.notion.site/Mantle-Specification
|
||||
[pol]: https://nomos-tech.notion.site/Proof-of-Leadership
|
||||
[origin-ref]: https://nomos-tech.notion.site/v1-2-Service-Reward-Distribution-Protocol-26b261aa09df8032861dddf01182e242
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,542 +0,0 @@
|
||||
# BEDROCK-V1-1-BLOCK-CONSTRUCTION
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Bedrock v1.1 Block Construction, Validation and Execution Specification |
|
||||
| Slug | 93 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski <marcin@status.im> |
|
||||
| Contributors | Thomas Lavaur <thomaslavaur@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, David Rusu <davidrusu@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Mehmet Gonen <mehmet@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/blockchain/raw/bedrock-v1.1-block-construction.md) — chore: add math support (#287)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/bedrock-v1.1-block-construction.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/bedrock-v1.1-block-construction.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the construction, validation,
|
||||
and execution of block proposals in the Nomos blockchain.
|
||||
It describes how block proposals contain references to transactions
|
||||
rather than complete transactions,
|
||||
compressing the proposal size from up to 1 MB down to 33 kB
|
||||
to save bandwidth necessary to broadcast new blocks.
|
||||
|
||||
**Keywords:** Bedrock, block construction, validation, execution,
|
||||
leader, transaction, Proof of Leadership
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Leader | A node elected through the leader lottery to construct a new block. |
|
||||
| Block Builder | The leader node that constructs a new block proposal. |
|
||||
| Block Proposer | The leader node that shares the constructed block with other network members. |
|
||||
| Block Proposal | A message structure containing a header and references to transactions. |
|
||||
| Proof of Leadership (PoL) | A proof confirming that a node is indeed the elected leader. |
|
||||
| Transaction Maturity | The assumption that transactions have had enough time to spread across the network. |
|
||||
| Validator | A node that validates and executes block proposals. |
|
||||
|
||||
## High-level Flow
|
||||
|
||||
This section presents a high-level description of the block lifecycle.
|
||||
The main focus of this section is to build an intuition
|
||||
on the block construction, validation, and execution.
|
||||
|
||||
1. A leader is selected. The leader becomes a block builder.
|
||||
|
||||
1. The block builder **constructs** a block proposal.
|
||||
|
||||
1. The block builder selects the latest block (parent)
|
||||
as the reference point for the chain state update.
|
||||
|
||||
1. The block builder constructs references to the deterministically generated
|
||||
Mantle Transactions that execute the [Service Reward Distribution Protocol][service-reward],
|
||||
if such transactions can be constructed.
|
||||
For example, there is no need to distribute rewards
|
||||
when all rewards have already been distributed.
|
||||
|
||||
1. The block builder selects valid Mantle Transactions
|
||||
(as defined in [Mantle Specification][mantle-spec])
|
||||
from its mempool and includes references to them in the proposal.
|
||||
|
||||
1. The block builder populates the block header of the block proposal.
|
||||
|
||||
1. The block proposer sends the block proposal to the Blend network.
|
||||
|
||||
1. The validators receive the block proposal.
|
||||
|
||||
1. The validators **validate** the block proposal.
|
||||
|
||||
1. They validate the block header.
|
||||
|
||||
1. They verify distribution of service rewards through Mantle Transactions
|
||||
as specified in [Service Reward Distribution Protocol][service-reward].
|
||||
This is done by independently deriving the distribution transaction
|
||||
and confirming that it matches the first reference,
|
||||
if there are rewards to be distributed.
|
||||
|
||||
1. They retrieve complete transactions from their mempool
|
||||
that are referred in the block.
|
||||
|
||||
1. They validate each transaction included in the block.
|
||||
|
||||
1. The validators **execute** the block proposal.
|
||||
|
||||
1. They derive the new blockchain state from the previous one
|
||||
by executing transactions as defined in [Mantle Specification][mantle-spec].
|
||||
|
||||
1. They update the different variables that need to be maintained over time.
|
||||
|
||||
## Constructions
|
||||
|
||||
### Hash
|
||||
|
||||
This specification uses two hashing algorithms
|
||||
that have the same output length of 256 bits (32 bytes)
|
||||
that are Poseidon2 and Blake2b.
|
||||
|
||||
### Block Proposal
|
||||
|
||||
A block proposal,
|
||||
instead of containing complete Mantle Transactions of an unlimited size,
|
||||
contains references of fixed size to the transactions.
|
||||
Therefore, the size of the proposal is constant and it is 33129 bytes.
|
||||
|
||||
The following message structure is defined:
|
||||
|
||||
```python
|
||||
class Proposal: # 33129 bytes
|
||||
header: Header # 297 bytes
|
||||
references: References # 32768 bytes
|
||||
signature: Ed25519Signature # 64 bytes
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `header` is the header of the proposal; defined below: [Header](#header).
|
||||
- `references` is a set of 1024 references to transactions of a `hash` type;
|
||||
the size of the `hash` type is 32 bytes
|
||||
and is the transaction hash as defined in [Mantle Specification - Mantle Transaction][mantle-tx].
|
||||
- `signature` is the signature of the complete `header` using the `leader_key`
|
||||
from the `ProofOfLeadership`;
|
||||
the size of the `Ed25519Signature` type is 64 bytes.
|
||||
|
||||
> **Note**: The length of the `references` list must be preserved
|
||||
> to maintain the message's indistinguishability in the Blend protocol.
|
||||
> Therefore, the list must be padded with zeros when necessary.
|
||||
|
||||
### Header
|
||||
|
||||
```python
|
||||
class Header: # 297 bytes
|
||||
bedrock_version: byte # 1 bytes
|
||||
parent_block: hash # 32 bytes
|
||||
slot: SlotNumber # 8 bytes
|
||||
block_root: hash # 32 bytes
|
||||
proof_of_leadership: ProofOfLeadership # 224 bytes
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `bedrock_version` is the version of the proposal message structure
|
||||
that supports other protocols defined in [Bedrock Specification][bedrock-spec];
|
||||
the size of it is 1 byte and is fixed to `0x01`.
|
||||
- `parent_block` is the block ID ([Cryptarchia v1 Protocol Specification][cryptarchia-spec])
|
||||
of the parent block, validated and accepted by the block builder.
|
||||
It is used for the derivation of the `AgedLedger` and `LatestLedger` values
|
||||
necessary for validating the PoL;
|
||||
the size of the `hash` is 32 bytes.
|
||||
- `slot` is the consensus slot number;
|
||||
the size of the `SlotNumber` type is 8 bytes.
|
||||
- `block_root` is the root of the Merkle tree constructed from transaction hashes
|
||||
(defined in [Mantle Specification - Mantle Transaction][mantle-tx])
|
||||
used for constructing the `references` list in the `transactions`;
|
||||
the size of the `hash` is 32 bytes.
|
||||
- `proof_of_leadership` is the proof confirming that the sender is the leader;
|
||||
defined below: [Proof of Leadership](#proof-of-leadership).
|
||||
|
||||
### Block References
|
||||
|
||||
```python
|
||||
class References: # 32768 bytes
|
||||
service_reward: list[zkhash] # 1*32 bytes
|
||||
mempool_transactions: list[zkhash] # 1024-len(service_reward)*32 bytes
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `service_reward` is a set of up to 1 reference
|
||||
to a reward transaction of a `zkhash` type;
|
||||
the size of the `zkhash` type is 32 bytes
|
||||
and is the transaction hash as defined in
|
||||
[Mantle Specification - Mantle Transaction][mantle-tx].
|
||||
- `mempool_transactions` is a set of up to 1024 references
|
||||
to transactions of a `zkhash` type;
|
||||
the size of the `zkhash` type is 32 bytes
|
||||
and is the transaction hash as defined in
|
||||
[Mantle Specification - Mantle Transaction][mantle-tx].
|
||||
|
||||
The `service_reward` transaction is created deterministically by the leader
|
||||
and is not obtained from the mempool.
|
||||
If this transaction were obtained from the mempool,
|
||||
it could expose the leader's identity as the transaction creator.
|
||||
To protect the leader's identity,
|
||||
only the `service_reward` reference is included in the proposal,
|
||||
and it is derived again by the nodes verifying the block.
|
||||
|
||||
The `service_reward` transaction is a Service Rewards Distribution Transaction
|
||||
that distributes service rewards.
|
||||
It is a Mantle Transaction with no input
|
||||
and up to `service_count x 4` outputs,
|
||||
`service_count` being the number of services (global parameter).
|
||||
The outputs represent the validators rewarded (up to 4 per service).
|
||||
|
||||
If the `service_reward` transaction cannot be created,
|
||||
then nothing is added to the list.
|
||||
Therefore, the `service_reward` list is allowed to have a length of 0.
|
||||
|
||||
### Proof of Leadership
|
||||
|
||||
```python
|
||||
class ProofOfLeadership: # 224 bytes
|
||||
leader_voucher: RewardVoucher # 32 bytes
|
||||
entropy_contribution: zkhash # 32 bytes
|
||||
proof: ProofOfLeadership # 128 bytes
|
||||
leader_key: Ed25519PublicKey # 32 bytes
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- `leader_voucher` is the voucher value
|
||||
used for retrieving the reward by the leader for proposal;
|
||||
the size of the `RewardVoucher` is 32 bytes.
|
||||
- `entropy_contribution` is the output of the PoL contribution
|
||||
for Cryptarchia entropy;
|
||||
the size of the `zkhash` type is 32 bytes.
|
||||
- `proof` is the proof confirming that the proposal
|
||||
is constructed by the leader;
|
||||
the size of the `ProofOfLeadership` type is 128 bytes
|
||||
(2 compressed G1 and 1 compressed G2 BN256 elements).
|
||||
- `leader_key` is the one-time `Ed25519PublicKey`
|
||||
used for signing the `Proposal`.
|
||||
This binds the content of the proposal with the `ProofOfLeadership`;
|
||||
the size of the `Ed25519PublicKey` type is 32 bytes.
|
||||
|
||||
## Proposal Construction
|
||||
|
||||
This section explains how the block proposal structure presented above
|
||||
is populated by the consensus leader.
|
||||
|
||||
The block proposal is constructed by the leader of the current slot.
|
||||
The node becomes a leader only after successfully generating a valid PoL
|
||||
for a given `(Epoch, Slot)`.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before constructing the proposal, the block builder must:
|
||||
|
||||
1. Select a valid parent block referenced by `ParentBlock`
|
||||
on which they will extend the chain.
|
||||
|
||||
1. Derive the required Ledger state snapshots `AgedLedger` and `LatestLedger`
|
||||
from the state of the chain including the last block.
|
||||
|
||||
1. Select a valid unspent note winning the PoL.
|
||||
|
||||
1. Generate a valid PoL proving leadership eligibility for `(Epoch, Slot)`
|
||||
based on the selected note.
|
||||
Attach the PoL to a one-time Ed25519 public key used to sign the block proposal.
|
||||
|
||||
Only after the PoL is generated can the block proposal be constructed
|
||||
(see [Proof of Leadership Specification][pol-spec]).
|
||||
|
||||
### Construction Procedure
|
||||
|
||||
1. Initialize proposal metadata with the last known state of the blockchain.
|
||||
Set the:
|
||||
|
||||
- `header`:
|
||||
- `bedrock_version`
|
||||
- `parent_block`
|
||||
- `slot`
|
||||
- `block_root`
|
||||
- `proof_of_leadership`:
|
||||
- `leader_voucher`
|
||||
- `entropy_contribution`
|
||||
- `proof`
|
||||
- `leader_key`
|
||||
|
||||
1. Construct the `service_reward` object:
|
||||
|
||||
1. If there are service rewards to be distributed,
|
||||
construct the transaction that distributes
|
||||
the service rewards from previous session
|
||||
and add its reference to the `service_reward` list.
|
||||
This transaction must be computed locally,
|
||||
do not disseminate this transaction.
|
||||
|
||||
1. Construct the `mempool_transactions` object:
|
||||
|
||||
1. Select Mantle transactions:
|
||||
|
||||
- Choose up to `1024-len(service_reward)` valid `SignedMantleTx`
|
||||
from the local mempool.
|
||||
|
||||
- Ensure each transaction:
|
||||
|
||||
- Is valid according to [Mantle Specification][mantle-spec].
|
||||
|
||||
- Has no conflicts with others
|
||||
(e.g., two transactions trying to spend the same note).
|
||||
|
||||
1. Derive references values:
|
||||
|
||||
```python
|
||||
references: list[zkhash] = [mantle_txhash(tx) for tx in service_reward + mempool_transactions]
|
||||
```
|
||||
|
||||
1. Compute the `header.block_root` as the root of the Merkle tree constructed
|
||||
from the `list(service_reward) + mempool_transactions` transactions
|
||||
used to build `references`.
|
||||
|
||||
1. Sign the block proposal header.
|
||||
|
||||
```python
|
||||
signature = Ed25519.sign(leader_secret_key, header)
|
||||
```
|
||||
|
||||
1. Assemble the block proposal.
|
||||
|
||||
```python
|
||||
proposal = Proposal( header, references, signature )
|
||||
```
|
||||
|
||||
The PoL must have been generated beforehand
|
||||
and bound to the same Ledger view as mentioned in the [Prerequisites](#prerequisites).
|
||||
|
||||
The constructed proposal can now be broadcast to the network for validation.
|
||||
|
||||
## Block Proposal Reconstruction
|
||||
|
||||
Given a block proposal, this specification assumes *transaction maturity*.
|
||||
This means that the block proposal must include transactions from the mempool
|
||||
that have had enough time to spread across the network to reach all nodes.
|
||||
This ensures that transactions are widely known and recognized
|
||||
before block reconstruction.
|
||||
|
||||
This transaction maturity assumption holds true
|
||||
because the block proposal must be sent through the Blend Network
|
||||
before it reaches validators and can be reconstructed.
|
||||
The Blend Network introduces significant delay,
|
||||
ensuring that transactions referenced in the proposal
|
||||
have reached all network participants.
|
||||
|
||||
This approach is crucial for maintaining smooth network operation
|
||||
and reducing the risk that proposals get rejected
|
||||
due to transactions being unavailable to some validators.
|
||||
Moreover, by increasing the number of nodes that have seen the transaction,
|
||||
anonymity is also enhanced as the set of nodes with the same view is larger.
|
||||
This may result in increased difficulty—or even practical prevention—of
|
||||
executing deanonymization attacks such as tagging attacks.
|
||||
|
||||
Upon receipt of a block proposal,
|
||||
validators must confirm the presence of all referenced transactions
|
||||
within their local mempool.
|
||||
This verification is an absolute requirement—if even a single referenced transaction
|
||||
is missing from the validator's mempool, the entire proposal must be rejected.
|
||||
This stringent validation protocol ensures only widely-distributed transactions
|
||||
are included in the blockchain,
|
||||
safeguarding against potential network state fragmentation.
|
||||
|
||||
The process works as follows:
|
||||
|
||||
1. Transaction is added to the node mempool.
|
||||
|
||||
1. Node sends the transaction to all its neighbors.
|
||||
|
||||
1. Neighbors add the transaction to their own mempools
|
||||
and propagate it to their neighbors—transaction
|
||||
is gossiped throughout the network.
|
||||
|
||||
1. Block builder selects a transaction from its local mempool,
|
||||
which is guaranteed to be propagated through the network
|
||||
due to steps 1-3.
|
||||
|
||||
1. Block builder constructs a block proposal
|
||||
with references to selected transactions.
|
||||
|
||||
1. Block proposal is sent through the Blend Network,
|
||||
which requires multiple rounds of gossiping.
|
||||
This introduces a delay that ensures the transaction
|
||||
has reached most of the network participants' mempools.
|
||||
|
||||
1. Block proposal is received by validators.
|
||||
|
||||
1. Validators check their local mempools
|
||||
for all referenced transactions from the proposal.
|
||||
|
||||
1. If any transaction is missing, the entire proposal is rejected.
|
||||
|
||||
1. If all transactions are present,
|
||||
the block proposal is reconstructed and proceeds to further validation steps.
|
||||
|
||||
## Block Proposal Validation
|
||||
|
||||
This section defines the procedure followed by a Nomos node
|
||||
to validate a received block proposal.
|
||||
|
||||
Given a `Proposal`, a proposed block consisting of a `header` and `references`.
|
||||
This block proposal is considered valid if the following conditions are met:
|
||||
|
||||
### Block Validation
|
||||
|
||||
The `Proposal` must satisfy the rules defined in
|
||||
[Cryptarchia v1 Protocol Specification - Block Header Validation][cryptarchia-block-validation].
|
||||
|
||||
### Block Proposal Reconstruction Validation
|
||||
|
||||
The `references` must refer to either a `service_reward` transaction
|
||||
that is locally derivable
|
||||
or to existing `mempool_transaction` entries
|
||||
that are retrievable from the node's local mempool.
|
||||
|
||||
### Mempool Transactions Validation
|
||||
|
||||
`mempool_transactions` must refer to a valid sequence
|
||||
of Mantle Transactions from the mempool.
|
||||
Each transaction must be valid according to the rules
|
||||
defined in the [Mantle Specification][mantle-spec].
|
||||
In order to verify ZK proofs,
|
||||
they are batched for verification as explained in
|
||||
[Batch verification of ZK proofs](#batch-verification-of-zk-proofs)
|
||||
to get better performances.
|
||||
|
||||
### Rewards Validation
|
||||
|
||||
1. Check if the first reference matches a deterministically derived
|
||||
Service Rewards Distribution Transaction
|
||||
that distributes previous session service fees
|
||||
as defined in [Service Reward Distribution Protocol][service-reward].
|
||||
It should take no input and output up to `service_count * 4` reward notes
|
||||
distributed to the correct validators.
|
||||
|
||||
1. If the above rewarding transactions cannot be derived,
|
||||
then the first reference must refer to a `mempool_transaction`.
|
||||
|
||||
If any of the above checks fail, the block proposal must be rejected.
|
||||
|
||||
## Block Execution
|
||||
|
||||
This section specifies how a Nomos node executes a valid block proposal
|
||||
to update its local state.
|
||||
|
||||
Given a `ValidBlock` that has successfully passed proposal validation,
|
||||
the node must:
|
||||
|
||||
1. Append the `leader_voucher` contained in the block to the set of reward vouchers
|
||||
**when the following epoch starts**.
|
||||
|
||||
1. Execute the Mantle Transactions included in the block sequentially,
|
||||
using the execution rules defined in the [Mantle Specification][mantle-spec].
|
||||
|
||||
## Annex
|
||||
|
||||
### Batch verification of ZK proofs
|
||||
|
||||
#### Blob Samples
|
||||
|
||||
1. For each sample the verifier follows the classic cryptographic verification procedure
|
||||
as described in [NomosDA Cryptographic Protocol - Verification][nomosda-verification]
|
||||
except the last step, once the verifier has a single commitment C^(i),
|
||||
an aggregated element v^(i) at position u^(i) and one proof π^(i) for each sample.
|
||||
|
||||
1. The verifier draws a random value for each sample r_i ← $F_p$.
|
||||
|
||||
1. The verifier computes:
|
||||
|
||||
1. C' := Σ(i=1 to k) r_i · C^(i)
|
||||
|
||||
1. v' := Σ(i=1 to k) r_i · v^(i)
|
||||
|
||||
1. π' := Σ(i=1 to k) r_i · π^(i)
|
||||
|
||||
1. u' := Σ(i=1 to k) r_i · u^(i) · π^(i)
|
||||
|
||||
1. They test if e(C' - v' · G1 + u', G2) = e(π', τ · G2).
|
||||
|
||||
#### Proofs of Claim
|
||||
|
||||
1. For each proof of Claim the verifier collects
|
||||
the classic Groth16 elements required for verification.
|
||||
It includes the proof π^(i), and the public values x_j^(i)
|
||||
for each proof of claim.
|
||||
|
||||
1. The verifier draws one random value for each proof r_i ← $F_p$.
|
||||
|
||||
1. The verifier computes:
|
||||
|
||||
1. π'_j := Σ(i=1 to k) r_i · π_j^(i) for j ∈ {A, B, C}.
|
||||
|
||||
1. r' := Σ(i=1 to k) r_i
|
||||
|
||||
1. IC := r' · Ψ_0 + Σ(j=1 to l) (Σ(i=1 to k) r_i · x_j^(i)) · Ψ_j
|
||||
|
||||
1. They test if Σ(i=1 to k) e(r_1 · π'_A, π'_B) = e(r' · [α]_1, [β]_2) + e(IC, [γ]_2) + e(π'_C, [δ]_2).
|
||||
|
||||
> **Note**: This batch verification of Groth16 proofs is the same
|
||||
> as what is described in the Zcash paper, Appendix B.2.
|
||||
|
||||
#### ZkSignatures
|
||||
|
||||
The verifier follows the same procedure as in [Proofs of Claim](#proofs-of-claim)
|
||||
but with the Groth16 proofs of ZkSignatures.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Mantle Specification][mantle-spec] - Mantle transaction specification
|
||||
- [Cryptarchia v1 Protocol Specification][cryptarchia-spec]
|
||||
\- Cryptarchia consensus protocol
|
||||
- [Bedrock Specification][bedrock-spec] - Bedrock protocol specification
|
||||
- [Proof of Leadership Specification][pol-spec]
|
||||
\- Proof of Leadership specification
|
||||
- [Service Reward Distribution Protocol][service-reward]
|
||||
\- Service reward distribution protocol
|
||||
- [NomosDA Cryptographic Protocol][nomosda-verification]
|
||||
\- NomosDA cryptographic verification
|
||||
|
||||
### Informative
|
||||
|
||||
- [v1.1 Block Construction, Validation and Execution Specification][origin-ref]
|
||||
\- Original specification document
|
||||
- Poseidon2 - Hash function
|
||||
- Blake2b - Hash function
|
||||
- Zcash paper, Appendix B.2 - Batch verification of Groth16 proofs
|
||||
|
||||
[mantle-spec]: https://nomos-tech.notion.site/Mantle-Specification
|
||||
[mantle-tx]: https://nomos-tech.notion.site/Mantle-Specification
|
||||
[cryptarchia-spec]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification
|
||||
[cryptarchia-block-validation]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification
|
||||
[bedrock-spec]: https://nomos-tech.notion.site/Bedrock-Specification
|
||||
[pol-spec]: https://nomos-tech.notion.site/Proof-of-Leadership-Specification
|
||||
[service-reward]: https://nomos-tech.notion.site/Service-Reward-Distribution-Protocol
|
||||
[nomosda-verification]: https://nomos-tech.notion.site/NomosDA-Cryptographic-Protocol
|
||||
[origin-ref]: https://nomos-tech.notion.site/v1-1-Block-Construction-Validation-and-Execution-Specification-269261aa09df807185a9e0764acffe22
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,546 +0,0 @@
|
||||
# CRYPTARCHIA-PROOF-OF-LEADERSHIP
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Cryptarchia Proof of Leadership Specification |
|
||||
| Slug | 83 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Thomas Lavaur <thomas@status.im> |
|
||||
| Contributors | Mehmet <mehmet@status.im>, Giacomo Pasini <giacomo@status.im>, Daniel Sanchez Quiros <daniel@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, David Rusu <david@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/cryptarchia-proof-of-leadership.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/cryptarchia-proof-of-leadership.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
The Proof of Leadership (PoL) enables a leader to produce a zero-knowledge proof
|
||||
attesting to the fact that they have an eligible note (a representation of stake)
|
||||
that has won the leadership lottery.
|
||||
This proof is designed to be as lightweight as possible to generate and verify,
|
||||
to impose minimal restrictions on access to the role of leader
|
||||
and maximize the decentralization of that role.
|
||||
This document specifies the PoL mechanism for Cryptarchia,
|
||||
extending the work presented in the Ouroboros Crypsinous paper
|
||||
with recent cryptographic developments.
|
||||
|
||||
**Keywords:** Cryptarchia, proof-of-leadership, zero-knowledge, consensus,
|
||||
note, stake, lottery, Merkle tree
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be
|
||||
interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Background
|
||||
|
||||
### Protocol Overview
|
||||
|
||||
The PoL mechanism ensures that a note has legitimately won the leadership election
|
||||
while protecting the leader's privacy.
|
||||
The protocol is comprised of two parts: setup and PoL generation.
|
||||
|
||||
**Setup:**
|
||||
|
||||
1. Draw uniformly a random seed.
|
||||
2. Construct a Merkle tree composed of slot secrets derived from the seed.
|
||||
3. Use the root of the tree and the starting slot to get the leader's secret key.
|
||||
The starting slot is when the note can start to be used for PoL.
|
||||
4. The leader receives their stake in a note that uses this generated secret key.
|
||||
The leader either transfers this stake to themselves
|
||||
or obtains it from a different user.
|
||||
5. The note becomes eligible for Proof of Stake (PoS) when it has aged sufficiently,
|
||||
and the actual slot number is greater than or equal to
|
||||
the starting slot of the note.
|
||||
|
||||
**PoL generation:**
|
||||
|
||||
1. First, check if the note is winning by simulating the lottery.
|
||||
2. Prove the membership of the note identifier in an old snapshot of the Mantle Ledger,
|
||||
proving its age and its existence.
|
||||
3. Prove the membership of the note identifier in the most recent Mantle ledger,
|
||||
proving it's unspent.
|
||||
4. Prove that the note won the PoS lottery.
|
||||
5. Prove the knowledge of the slot secret for the winning slot.
|
||||
6. The proof is bound to a cryptographic public key used for signing
|
||||
the leader's proposed blocks.
|
||||
|
||||
### Comparison with Original Crypsinous PoL
|
||||
|
||||
This description differs from the original paper proposition,
|
||||
proving that a note is unspent directly
|
||||
instead of delegating the verification to validators.
|
||||
This design choice brings the following tradeoffs:
|
||||
|
||||
**Advantages:**
|
||||
|
||||
1. The ledger isn't required to be private using shielded notes.
|
||||
- Validators don't need to maintain a nullifier list.
|
||||
- Leaders keep their privacy unlinking their stake, block and PoL.
|
||||
|
||||
2. There is no leader note evolution mechanism anymore (see the paper for details).
|
||||
- There are no orphan proofs anymore,
|
||||
removing the need to include valid PoL proofs from abandoned forks.
|
||||
- Crypsinous required maintaining a parallel note commitment set
|
||||
integrating evolving notes over time.
|
||||
This requirement is removed now.
|
||||
- The derivation of the slot secret and its Merkle proof
|
||||
can be done locally without connection to the Nomos chain.
|
||||
|
||||
**Disadvantages:**
|
||||
|
||||
1. The PoL cannot be computed far in advance
|
||||
because the leader MUST know the latest ledger state of Mantle.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Protection Against Adaptive Adversaries
|
||||
|
||||
The Ouroboros Crypsinous paper integrates protection against adaptive adversaries:
|
||||
|
||||
> The design has several subtleties since a critical consideration in the PoS setting
|
||||
> is tolerating adaptive corruptions:
|
||||
> this ensures that even if the adversary can corrupt parties
|
||||
> in the course of the protocol execution in an adaptive manner,
|
||||
> it does not gain any non-negligible advantage by e.g., re-issuing past PoS blocks.
|
||||
> (p. 2)
|
||||
|
||||
To avoid a leaked note being reused to maliciously regenerate past PoLs,
|
||||
this specification adopts the solution proposed in the paper
|
||||
using slightly different parameters.
|
||||
|
||||
The solution proposed in the paper is as follows:
|
||||
|
||||
> We solve the former issue, by adding a cheap key-erasure scheme
|
||||
> into the NIZK for leadership proofs.
|
||||
> Specifically, **parties have a Merkle tree of secret keys**,
|
||||
> the root of which is hashed to create the corresponding public key.
|
||||
> The **Merkle tree roots acts like a Zerocash coin secret key**,
|
||||
> and can be used to spend coins.
|
||||
> For leadership however, parties also must prove knowledge of a path
|
||||
> in the Merkle tree to a leaf at the index of the slot they are claiming to lead.
|
||||
> **After a slot passes, honest parties erase their preimages**
|
||||
> of this part of that path in the tree.
|
||||
> As the size of this tree is linear with the number of slots,
|
||||
> we allow parties to keep it small, by restricting its size.
|
||||
> (p. 5)
|
||||
|
||||
The paper proposed a tree of depth 24.
|
||||
|
||||
- This implies that the note is usable for PoS for only 194 days approximately
|
||||
(because 1 slot is 1 second).
|
||||
- After this period, the note MUST be refreshed to include new randomness.
|
||||
For simplicity, the refresh mechanism is designed
|
||||
as a classical transaction modifying the nullifier secret key.
|
||||
- This solution has good performance:
|
||||
|
||||
> For a reasonable value of $R = 2^{24}$, this is of little practical concern.
|
||||
> Public keys are valid for $2^{24}$ slots
|
||||
> and employing standard space/time trade-offs,
|
||||
> key updates take under 10,000 hashes, with less than 500kB storage requirement.
|
||||
> The most expensive part of the process, key generation,
|
||||
> still takes less than a minute on a modern CPU.
|
||||
> (p. 29)
|
||||
|
||||
The disadvantages of this solution are that:
|
||||
|
||||
1. The public key of the note will change periodically
|
||||
(each time all slot secrets are consumed) for the ones participating in PoL.
|
||||
2. The note will not be reusable directly after refresh
|
||||
as only old enough notes are usable for PoS.
|
||||
|
||||
This specification proposes a tree with a depth of 25,
|
||||
extending the note's eligibility to around 388 days,
|
||||
with a maximum of **two epochs remaining ineligible** not counted in these days.
|
||||
Note that this requirement applies specifically to proving leadership in PoS
|
||||
and is not needed for every note.
|
||||
While any note can be used for PoL,
|
||||
the knowledge of the secret slots behind the public key
|
||||
is only necessary to demonstrate that you are a leader.
|
||||
|
||||
**Setup:** When refreshing their notes, potential leaders will:
|
||||
|
||||
1. Uniformly randomly draw a seed $r_1 \xleftarrow{\$} \mathbb{F}_p$.
|
||||
|
||||
2. Construct a Merkle Tree of root $R$ containing $2^{25}$ slot secrets
|
||||
(that are random numbers).
|
||||
One way to efficiently construct the tree is to:
|
||||
|
||||
- Derive the slot secrets using a zkhash chain:
|
||||
$\forall i \in [2, 2^{25}], r_i := \text{hash}(r_{i-1})$.
|
||||
- More concretely, each leaf is the zkhash of the previous leaf (slot secret).
|
||||
|
||||
- This reduces storage requirements compared to directly randomly drawn
|
||||
independently $2^{25}$ slot secrets.
|
||||
- The first generation of the Merkle tree should be fast enough
|
||||
as it only requires hashing data.
|
||||
A correct implementation that erases data over time
|
||||
could maintain an upper bound in memory usage during the generation
|
||||
of the tree to only $\log_2(2^{25}) = 25$ zk hashes which is 800 bytes.
|
||||
- Leaders are only required to maintain the MMR up to the current slot.
|
||||
This means at minimum,
|
||||
leaders hold only 25 hashes in memory at any point in time.
|
||||
- After the first generation, the wallets optimize their storage
|
||||
by holding only the necessary information to maintain a correct Merkle path,
|
||||
deriving the next one over time using the fact that
|
||||
slot secrets were derived from the previous ones.
|
||||
|
||||
- It guarantees protection against adaptive adversaries.
|
||||
- Thanks to the pseudo-random properties of the hash function,
|
||||
slot secrets are indistinguishable from true randomness.
|
||||
- The one-way property of the hash function guarantees
|
||||
that an adaptive adversary cannot retrieve past slot secrets
|
||||
using a fresher one.
|
||||
|
||||
3. The user chooses a starting slot $sl_{start}$ from which their note
|
||||
will be eligible for PoS.
|
||||
|
||||
The note MUST be on-chain by the start of epoch $ep - 1$
|
||||
to be eligible for PoL in epoch $ep$ because of the age requirement.
|
||||
Based on this, we suggest $sl_{start}$ to not be earlier
|
||||
than the start of the epoch following the one after the transaction is emitted.
|
||||
This prevents the inclusion of unusable slot secrets in the tree
|
||||
(because the note would not be aged enough),
|
||||
optimizing the PoL lifetime of the note.
|
||||
|
||||
4. Finally, they derive their secret key
|
||||
$sk := \text{hash}(\text{NOMOS\_POL\_SK\_V1}||sl_{start}||R)$,
|
||||
binding the starting slot and the Merkle tree of slot secret
|
||||
to the note secret key.
|
||||
This is verified in Circuit Constraints.
|
||||
|
||||
These four steps are summarized in the following pseudo-code:
|
||||
|
||||
```python
|
||||
def pol_sk_gen(sl_start, seed):
|
||||
frontier_nodes = MMR()
|
||||
path = MerkleProof()
|
||||
|
||||
# Generate 2^25 slot secrets using a hash chain initialized with `seed`.
|
||||
r = zkhash(seed)
|
||||
for i in range(2**25):
|
||||
frontier_nodes.append(r) # Append the slot secret to the MMR
|
||||
path.update(frontier_nodes) # Update Merkle path of this slot secret
|
||||
r = zkhash(r) # Derive the next slot secret
|
||||
|
||||
# Derive the root of the MMR
|
||||
root = frontier_nodes.get_root()
|
||||
|
||||
# Finally, derive the final PoL secret key.
|
||||
# Return the secret key and the Merkle proof of seed.
|
||||
return (zkhash(b"NOMOS_POL_SK_V1" + sl_start + root), path)
|
||||
|
||||
def update_secret_and_path(r, path):
|
||||
r = zkhash(r) # Derive next slot secret
|
||||
path.update(r) # Update the path for the Merkle proof of the new slot secret
|
||||
return (r, path)
|
||||
```
|
||||
|
||||
> Note that the generation of the slot secret tree is not constrained
|
||||
> by proofs or at consensus level and can be adapted by the node
|
||||
> as long as they are able to derive the merkle proof of their slot secret.
|
||||
|
||||
**PoL:** When proving the leadership election,
|
||||
note owners will prove knowledge of the slot secret corresponding to the slot $sl$.
|
||||
|
||||
1. To do that, they will give a Merkle path from the leaf at index $sl - sl_{start}$.
|
||||
2. The root of the tree hashed with $sl_{start}$ MUST be the secret key $sk$,
|
||||
which will be used for public key derivation.
|
||||
|
||||
**Protection against adaptive adversaries:**
|
||||
Since each slot has its own slot secret,
|
||||
requiring wallets to delete slot secrets used for previous slots
|
||||
avoids the risk of corruption that leads to the creation of PoL for previous blocks.
|
||||
|
||||
- The slot secret is derived from the previous one but the opposite is impossible.
|
||||
- An adaptive adversary corrupting the node would not have access to
|
||||
previous slot secrets if correctly deleted.
|
||||
Therefore, an adversary would not be able to generate the PoL for previous slots.
|
||||
|
||||
### Ledger Root
|
||||
|
||||
In order to prove that the winning note exists in the ledger
|
||||
and existed at the start of the previous epoch,
|
||||
every node MUST compute two ledger commitments.
|
||||
These commitments $ledger_{AGED}$ and $ledger_{LATEST}$
|
||||
are Merkle roots constructed over the Note IDs.
|
||||
The trees have a depth of 32 and are populated with note IDs.
|
||||
The value 0 represents an empty leaf.
|
||||
When the set is updated, during insertion,
|
||||
the first empty leaf is replaced with the new note ID,
|
||||
and during deletion, the leaf containing the deleted note ID is replaced with 0.
|
||||
|
||||
The following pseudo-code shows how the tree is managed:
|
||||
|
||||
```python
|
||||
def insert_new_note(note_set: list[NoteId], new_note: NoteId):
|
||||
i = 0
|
||||
while i < len(note_set) and note_set[i] != 0:
|
||||
i += 1
|
||||
if i < len(note_set):
|
||||
note_set[i] = new_note
|
||||
else:
|
||||
note_set.append(new_note)
|
||||
return note_set
|
||||
|
||||
def delete_note(note_set: list[NoteId], note: NoteId):
|
||||
i = 0
|
||||
while i < len(note_set) and note_set[i] != note:
|
||||
i += 1
|
||||
if i == len(note_set):
|
||||
# note not in the set
|
||||
return note_set
|
||||
note_set[i] = 0
|
||||
return note_set
|
||||
|
||||
def empty_tree_root(depth: int):
|
||||
root = 0
|
||||
for i in range(depth):
|
||||
h = hasher() # zk hash
|
||||
h.update(root)
|
||||
h.update(root)
|
||||
root = h.digest()
|
||||
return root
|
||||
|
||||
def get_ledger_root(note_set: list[NoteId]):
|
||||
assert(len(note_set) < 2**32)
|
||||
ledger_root = get_merkle_root(note_set)
|
||||
# return the Merkle root of the set padded with 0 to next power of 2
|
||||
ledger_root_height = len(note_set).bit_length()
|
||||
for height in range(ledger_root_height, 32):
|
||||
h = Hasher() # zk hash
|
||||
h.update(ledger_root)
|
||||
h.update(empty_tree_root(height))
|
||||
ledger_root = h.digest()
|
||||
return ledger_root
|
||||
```
|
||||
|
||||
> The ledger root may not be unique because the note IDs set can cycle.
|
||||
> Indeed, even if it's not possible to insert the same note ID twice,
|
||||
> it's possible to cycle on a previous set state by removing notes.
|
||||
> However, note IDs uniqueness guarantees protection against attacks on note aging.
|
||||
|
||||
### Zero-knowledge Proof Statement
|
||||
|
||||

|
||||
|
||||
#### Circuit Public Inputs
|
||||
|
||||
The prover (the leader) and the verifiers (nodes of the chain)
|
||||
MUST agree on these values:
|
||||
|
||||
1. **The slot number:** $sl$.
|
||||
|
||||
2. **The epoch nonce:** $\eta$.
|
||||
- For details see Cryptarchia v1 Protocol Specification - Epoch Nonce.
|
||||
|
||||
3. **The lottery function constants:**
|
||||
$t_0 = -\frac{\text{VRF\_order} \cdot \ln(1-f)}{\text{inferred\_total\_stake}}$
|
||||
and
|
||||
$t_1 = -\frac{\text{VRF\_order} \cdot \ln^2(1-f)}{2 \cdot \text{inferred\_total\_stake}^2}$.
|
||||
- For details see Lottery Approximation.
|
||||
- These numbers MUST be computed with high precision outside the proof.
|
||||
|
||||
4. **The root of the note Merkle tree when the stake distribution was frozen:**
|
||||
$ledger_{AGED}$.
|
||||
- For details see Cryptarchia v1 Protocol Specification - Epoch State Pseudocode.
|
||||
|
||||
5. **The latest root of the note Merkle tree:** $ledger_{LATEST}$.
|
||||
- Used to ensure the leadership note has not been spent.
|
||||
|
||||
6. **The leader's one-time public key** $P_{LEAD}$
|
||||
represented by 2 public inputs, each of 16 bytes in little endian.
|
||||
This key is needed to sign the proposed block.
|
||||
- For details see Linking the Proof of Leadership to a Block.
|
||||
|
||||
7. **The entropy contribution** $\rho_{LEAD}$ verified to be correctly derived.
|
||||
- This is the epoch nonce entropy contribution.
|
||||
See Cryptarchia v1 Protocol Specification - Epoch Nonce.
|
||||
|
||||
#### Circuit Private Inputs
|
||||
|
||||
The prover has to provide these values, but they remain secret:
|
||||
|
||||
1. **The slot secret and the related information** used for the slot $sl$
|
||||
as described in Protection Against Adaptive Adversaries:
|
||||
- The slot secret $r_{sl}$.
|
||||
- The Merkle path $slot\_secret\_path$ of $r_{sl}$ leading to the root $R$.
|
||||
- The starting secret slot $sl_{start}$.
|
||||
|
||||
2. **The eligible note and its related information** used to derive the $noteID$
|
||||
(the secret key is derived for the previous step):
|
||||
- The note value: $v$.
|
||||
- The note transaction zk hash: $note\_tx\_hash$.
|
||||
- The note outputs number: $note\_output\_number$.
|
||||
|
||||
3. **The proof of membership** of the note identifier in the zone ledgers
|
||||
$ledger_{AGED}$ and $ledger_{LATEST}$.
|
||||
This is done by providing the complementary Merkle nodes
|
||||
and indicating whether they are left (0) or right (1) through boolean selectors:
|
||||
- The aged ledger complementary nodes: $noteid\_aged\_path$.
|
||||
- The aged ledger complementary node selectors: $note\_id\_aged\_selectors$.
|
||||
- The latest ledger complementary nodes: $noteid\_latest\_path$.
|
||||
- The latest ledger complementary node selectors: $note\_id\_latest\_selectors$.
|
||||
|
||||
#### Circuit Constraints
|
||||
|
||||
The proof confirms the following relations:
|
||||
|
||||
1. The derivation of the Merkle tree root $R$
|
||||
using the slot secret $r_{sl}$ as the $sl - sl_{start}$'s leaf
|
||||
of the Merkle tree using the Merkle path.
|
||||
|
||||
This is a proof of knowledge of the secret slot $r_{sl}$
|
||||
guaranteeing protection against adaptive adversaries.
|
||||
|
||||
2. The derivation of $sk = \text{hash}(\text{NOMOS\_POL\_SK\_V1}||sl_{start}||R)$,
|
||||
as documented in Protection Against Adaptive Adversaries.
|
||||
|
||||
3. The computation of the note identifier.
|
||||
|
||||
4. The note identifier is in $ledger_{AGED}$ and $ledger_{LATEST}$.
|
||||
|
||||
5. The computation of the lottery ticket:
|
||||
$ticket := \text{hash}(\text{LEAD\_V1}||\eta||sl||noteID||sk)$ using Poseidon2.
|
||||
|
||||
6. The computation of the threshold: $t := v(t_0 + t_1 \cdot v)$.
|
||||
The ticket MUST be lower than this threshold to win the lottery.
|
||||
|
||||
7. The check that indeed $ticket < t$.
|
||||
|
||||
8. Compute and output the entropy contribution
|
||||
$\rho_{LEAD} := \text{hash}(\text{NOMOS\_NONCE\_CONTRIB\_V1}||sl||noteID||sk)$.
|
||||
|
||||
### Linking the Proof of Leadership to a Block
|
||||
|
||||
The PoL is bound to a public key from an asymmetric signature scheme.
|
||||
This public key $P_{LEAD}$ is given as two public inputs during the PoL proof generation,
|
||||
binding the proof to the key.
|
||||
|
||||
- The public key is represented by two public inputs of 16 bytes
|
||||
to guarantee the support of every possible EdDSA25519 public key.
|
||||
- This public key is later used to verify the signature $\sigma$ of a block
|
||||
when it is dispersed.
|
||||
This ensures that the PoL is tied to a specific block,
|
||||
and only the entity creating the proof can perform this binding.
|
||||
- The key is single-use, as reusing the same one could allow multiple PoLs
|
||||
to be linked to the same identity.
|
||||
An observer could then infer the stake of that identity
|
||||
by observing the frequency at which it emits a PoL.
|
||||
|
||||
## Appendix
|
||||
|
||||
### Lottery Approximation
|
||||
|
||||
- The $\phi_f(\alpha) = 1 - (1-f)^\alpha$ function of Ouroboros Crypsinous
|
||||
cannot be computed in a hand-written circuit
|
||||
as it can only operate on elements of $\mathbb{F}_p$
|
||||
for a certain prime number $p$.
|
||||
- Managing floating point numbers and mathematical functions
|
||||
involving floating points like exponentiations or logarithms in circuits
|
||||
is very inefficient.
|
||||
- Comparing the Taylor expansion of order 1 and 2,
|
||||
the Taylor expansion of order 2 method is used
|
||||
to approximate the Ouroboros Genesis (and Crypsinous) function
|
||||
by the following linear function:
|
||||
- $\stackrel{0}{\sim}$ means nearly equal in the neighborhood of 0
|
||||
- $f$ is the probability that at least one leader wins the lottery on each slot
|
||||
- $x$ is the stake of the proven note
|
||||
|
||||
$$1 - (1-f)^x = 1 - e^{x \ln(1-f)}$$
|
||||
|
||||
$$1 - e^{x \ln(1-f)} \stackrel{0}{\sim} x(-\ln(1-f) - 0.5 \ln^2(1-f)x)$$
|
||||
|
||||
Then the threshold is $stake(t_0 + t_1 \cdot stake)$ with
|
||||
$t_0 := -\frac{\text{VRF\_order} \cdot \ln(1-f)}{\text{inferred\_total\_stake}}$
|
||||
and
|
||||
$t_1 := -\frac{\text{VRF\_order} \cdot \ln^2(1-f)}{2 \cdot \text{inferred\_total\_stake}^2}$.
|
||||
|
||||
Since everything is known by every node except the value of the staked note,
|
||||
we pre-compute $t_0$ and $t_1$ outside of the circuit.
|
||||
|
||||
- The Hash functions used to derive the lottery ticket is Poseidon2
|
||||
so the VRF_order is $p$ the order of the scalar field of the BN254 elliptic curve.
|
||||
- To compute $t_0$ and $t_1$, we precomputed the constant parts using sagemath
|
||||
and real number of 512 bits precision.
|
||||
In the implementation, $t_0$ and $t_1$ should then be derived
|
||||
using 256-bit precision integers following:
|
||||
|
||||
| Variable | Formula |
|
||||
| -------- | ------- |
|
||||
| $p$ | `0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001` |
|
||||
| $t_0\_constant$ | `0x1a3fb997fd58374772808c13d1c2ddacb5ab3ea77413f86fd6e0d3d978e5438` |
|
||||
| $t_1\_constant$ | `0x71e790b41991052e30c93934b5612412e7958837bac8b1c524c24d84cc7d0` |
|
||||
| $t_0$ | $\frac{t_0\_constant}{\text{inferred\_total\_stake}}$ |
|
||||
| $t_1$ | $p - \lfloor\frac{t_1\_constant}{\text{inferred\_total\_stake}^2}\rfloor$ |
|
||||
|
||||
### Error Analysis
|
||||
|
||||
- For $f = 0.05$.
|
||||
The error percentage is computed with $100 \cdot \frac{estimation - real\_value}{real\_value}$.
|
||||
- This analysis considers inferred_total_stake to be 23.5B as in Cardano.
|
||||
- Original function: $1 - (1-f)^{\frac{stake}{\text{inferred\_total\_stake}}}$
|
||||
- Taylor expansion of order 1:
|
||||
$-\frac{stake}{\text{inferred\_total\_stake}} \ln(1-f) := stake \cdot t_0$
|
||||
- Taylor expansion of order 2:
|
||||
$\frac{stake}{\text{inferred\_total\_stake}}(-\ln(1-f) - 0.5 \ln^2(1-f)(\frac{stake}{\text{inferred\_total\_stake}})) := stake(t_0 + stake \cdot t_1)$
|
||||
|
||||
| stake (%) | order 1 error | order 2 error |
|
||||
| --------- | ------------- | ------------- |
|
||||
| 5% | 0.13% | -0.0001% |
|
||||
| 10% | 0.26% | -0.0004% |
|
||||
| 15% | 0.39% | -0.0010% |
|
||||
| 20% | 0.51% | -0.0018% |
|
||||
| 25% | 0.64% | -0.0027% |
|
||||
| 30% | 0.77% | -0.0040% |
|
||||
| 35% | 0.90% | -0.0054% |
|
||||
| 40% | 1.03% | -0.0071% |
|
||||
| 45% | 1.16% | -0.0089% |
|
||||
| 50% | 1.29% | -0.0110% |
|
||||
| 55% | 1.42% | -0.0134% |
|
||||
| 60% | 1.55% | -0.0159% |
|
||||
| 65% | 1.68% | -0.0187% |
|
||||
| 70% | 1.81% | -0.0217% |
|
||||
| 75% | 1.94% | -0.0249% |
|
||||
| 80% | 2.07% | -0.0284% |
|
||||
| 85% | 2.20% | -0.0320% |
|
||||
| 90% | 2.33% | -0.0359% |
|
||||
| 95% | 2.46% | -0.0406% |
|
||||
| 100% | 2.59% | -0.0444% |
|
||||
|
||||
### Benchmarks
|
||||
|
||||
The material used for the benchmarks is the following:
|
||||
|
||||
- CPU: 13th Gen Intel(R) Core(TM) i9-13980HX (24 cores / 32 threads)
|
||||
- RAM: 32GB - Speed: 5600 MT/s
|
||||
- Motherboard: Micro-Star International Co., Ltd. MS-17S1
|
||||
- OS: Ubuntu 22.04.5 LTS
|
||||
- Kernel: 6.8.0-59-generic
|
||||
|
||||

|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Cryptarchia v1 Protocol Specification](https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification-21c261aa09df810cb85eff1c76e5798c)
|
||||
\- Parent protocol specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Proof of Leadership Specification](https://nomos-tech.notion.site/Proof-of-Leadership-Specification-21c261aa09df819ba5b6d95d0fe3066d)
|
||||
\- Original Proof of Leadership documentation
|
||||
- [Ouroboros Crypsinous: Privacy-Preserving Proof-of-Stake](https://eprint.iacr.org/2018/1132.pdf)
|
||||
\- Foundation for the PoL design
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,164 +0,0 @@
|
||||
# CRYPTARCHIA-TOTAL-STAKE-INFERENCE
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Cryptarchia Total Stake Inference |
|
||||
| Slug | 94 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | David Rusu <davidrusu@status.im> |
|
||||
| Contributors | Alexander Mozeika <alexander.mozeika@status.im>, Daniel Kashepava <danielkashepava@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/cryptarchia-total-stake-inference.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/cryptarchia-total-stake-inference.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document defines the total stake inference algorithm for Cryptarchia.
|
||||
In proof-of-stake consensus protocols,
|
||||
the probability that an eligible participant wins the right to propose a block
|
||||
depends on that participant's stake relative to the total active stake.
|
||||
Because leader selection in Cryptarchia is private,
|
||||
the total active stake is not directly observable.
|
||||
Instead, nodes must infer it from observable chain growth.
|
||||
|
||||
**Keywords:** Cryptarchia, stake inference, proof-of-stake, epoch,
|
||||
slot occupancy, leadership lottery
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
|
||||
document are to be interpreted as described in
|
||||
[RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Background
|
||||
|
||||
The total active stake can be inferred by observing the slot occupancy rate:
|
||||
a higher fraction of occupied slots implies more stake participating in consensus.
|
||||
By observing the rate of occupied slots from the previous epoch
|
||||
and knowing the total stake estimate used during that period,
|
||||
nodes can infer a correction to the total stake estimate
|
||||
to compensate for any changes in consensus participation.
|
||||
|
||||
This inference process is done by each node following the chain.
|
||||
Leaders will use this *total stake estimate* to calculate their *relative stake*
|
||||
as part of the leadership lottery *without revealing their stake* to others.
|
||||
|
||||
The stake inference algorithm adjusts the previous total stake estimate
|
||||
based on the difference between the empirical slot activation rate
|
||||
(measured as the growth rate of the honest chain)
|
||||
and the expected slot activation rate.
|
||||
A large difference serves as an indicator
|
||||
that the total stake estimate is not accurate and must be adjusted.
|
||||
|
||||
This algorithm has been analyzed and shown to have good accuracy,
|
||||
precision, and convergence speed.
|
||||
A caveat to note is that accuracy decreases with increased network delays.
|
||||
The analysis can be found in [Total Stake Inference Analysis][stake-analysis].
|
||||
|
||||
## Construction
|
||||
|
||||
### Parameters and Variables
|
||||
|
||||
#### beta (learning rate)
|
||||
|
||||
- **Value:** 1.0
|
||||
- **Description:** Controls how quickly the algorithm adjusts to new participation levels.
|
||||
Lower values for `beta` give a more stable/gradual adjustment,
|
||||
while higher values give faster convergence but at the cost of less stability.
|
||||
|
||||
#### PERIOD (observation period)
|
||||
|
||||
- **Value:** ⌊6k/f⌋
|
||||
- **Description:** The length of the observation period in slots.
|
||||
|
||||
#### f (slot activation coefficient)
|
||||
|
||||
- **Value:** inherited from [Cryptarchia v1 Protocol][cryptarchia-v1]
|
||||
- **Description:** The target rate of occupied slots.
|
||||
Not all slots contain blocks, many are empty.
|
||||
|
||||
#### k (security parameter)
|
||||
|
||||
- **Value:** inherited from [Cryptarchia v1 Protocol][cryptarchia-v1]
|
||||
- **Description:** Block depth finality.
|
||||
Blocks deeper than `k` on any given chain are considered immutable.
|
||||
|
||||
### Functions
|
||||
|
||||
#### density_over_slots
|
||||
|
||||
```python
|
||||
def density_over_slots(s, p):
|
||||
"""
|
||||
Returns the number of blocks produced in the p slots
|
||||
following slot s in the honest chain.
|
||||
"""
|
||||
```
|
||||
|
||||
### Algorithm
|
||||
|
||||
For a current epoch's estimate `total_stake_estimate`
|
||||
and the epoch's first slot `epoch_slot`,
|
||||
the next epoch's estimate is calculated as shown below:
|
||||
|
||||
```python
|
||||
def total_stake_inference(total_stake_estimate, epoch_slot):
|
||||
period_block_density = density_over_slots(epoch_slot, PERIOD)
|
||||
slot_activation_error = 1 - period_block_density / (PERIOD * f)
|
||||
coefficient = total_stake_estimate * beta
|
||||
return total_stake_estimate - coefficient * slot_activation_error
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Stake Estimation Accuracy
|
||||
|
||||
The accuracy of the total stake inference depends on the observed slot occupancy rate.
|
||||
Implementations SHOULD be aware of the following security considerations:
|
||||
|
||||
- **Network delays**: Accuracy decreases with increased network delays,
|
||||
as delayed blocks may not be included in density calculations.
|
||||
- **Adversarial manipulation**: An adversary with significant stake
|
||||
could potentially influence the slot occupancy rate by withholding blocks.
|
||||
- **Convergence period**: During periods of rapid stake changes,
|
||||
the estimate may lag behind the actual total stake.
|
||||
|
||||
### Privacy Considerations
|
||||
|
||||
The stake inference algorithm is designed to maintain leader privacy:
|
||||
|
||||
- Leaders calculate their relative stake locally
|
||||
using the shared total stake estimate.
|
||||
- Individual stake amounts are never revealed to the network.
|
||||
- The algorithm relies only on publicly observable chain growth,
|
||||
not on private stake information.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Cryptarchia v1 Protocol][cryptarchia-v1]
|
||||
\- Protocol specification defining `f` and `k` constants
|
||||
|
||||
[cryptarchia-v1]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification-22d261aa09df80c4a0a1f8af0ddf65ca
|
||||
|
||||
### Informative
|
||||
|
||||
- [Total Stake Inference](https://nomos-tech.notion.site/Total-Stake-Inference-22d261aa09df8051a454caa46ec54b34)
|
||||
\- Original Total Stake Inference documentation
|
||||
- [Total Stake Inference Analysis][stake-analysis]
|
||||
\- Analysis of algorithm accuracy, precision, and convergence speed
|
||||
|
||||
[stake-analysis]: https://nomos-tech.notion.site/Total-Stake-Inference-Analysis-237261aa09df800285cccbb00b3aeb0a
|
||||
@@ -1,492 +0,0 @@
|
||||
# CRYPTARCHIA-V1-BOOTSTRAPPING-SYNCHRONIZATION
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Cryptarchia v1 Bootstrapping & Synchronization |
|
||||
| Slug | 96 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Youngjoon Lee <youngjoon@status.im> |
|
||||
| Contributors | David Rusu <david@status.im>, Giacomo Pasini <giacomo@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Daniel Sanchez Quiros <daniel@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/cryptarchia-v1-bootstr-sync.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/cryptarchia-v1-bootstr-sync.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies the bootstrapping and synchronization protocol
|
||||
for Cryptarchia v1 consensus.
|
||||
When a new node joins the network or a previously-bootstrapped node has been offline,
|
||||
it must catch up with the most recent honest chain
|
||||
by fetching missing blocks from peers before listening for new blocks.
|
||||
The protocol defines mechanisms for setting fork choice rules,
|
||||
downloading blocks, and handling orphan blocks
|
||||
while mitigating long range attacks.
|
||||
|
||||
**Keywords:** bootstrapping, synchronization, fork choice, initial block download,
|
||||
orphan blocks, long range attacks, checkpoint
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be
|
||||
interpreted as described in RFC 2119.
|
||||
|
||||
## Background
|
||||
|
||||
This protocol defines the bootstrapping mechanism
|
||||
that covers all of the following cases:
|
||||
|
||||
- From the **Genesis** block
|
||||
- From the **checkpoint** block obtained from a trusted checkpoint provider
|
||||
- From the **local block tree**
|
||||
(with $B_\text{imm}$ newer than the Genesis and the checkpoint)
|
||||
|
||||
Additionally, the protocol defines the synchronization mechanism
|
||||
that handles orphan blocks while listening for new blocks
|
||||
after the bootstrapping is completed.
|
||||
|
||||
The protocol consists of the following key components:
|
||||
|
||||
- Determining the fork choice rule (Bootstrap or Online) at startup
|
||||
- Switching the fork choice rule from Bootstrap to Online
|
||||
- Downloading blocks from peers
|
||||
|
||||
Upon startup, a node **determines the fork choice rule**,
|
||||
as defined in Setting the Fork Choice Rule.
|
||||
If the Bootstrap rule is selected, it is maintained for the Prolonged Bootstrap Period,
|
||||
after which the node switches to the Online rule.
|
||||
|
||||
Using the chosen fork choice rule, a node will **download blocks**
|
||||
to catch up with the head (also known as the tip) of each peer's local chain $c_{loc}$.
|
||||
|
||||
After downloading is done, the node starts **listening for new blocks**.
|
||||
Upon receiving a new block, the node validates and adds it to its local block tree.
|
||||
If the ancestors of the block are missing from the local block tree,
|
||||
the node downloads missing ancestors using the same mechanism as above.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Constants
|
||||
|
||||
| Constant | Name | Description | Value |
|
||||
| -------- | ---- | ----------- | ----- |
|
||||
| $T_\text{offline}$ | Offline Grace Period | A period during which a node can be restarted without switching to the Bootstrap rule. | 20 minutes |
|
||||
| $T_\text{boot}$ | Prolonged Bootstrap Period | A period during which Bootstrap fork choice rule must be continuously used after Initial Block Download is completed. This gives nodes additional time to compare their synced chain with a broader set of peers. | 24 hours |
|
||||
| $s_\text{gen}$ | Density Check Slot Window | A number of slots used by density check of Bootstrap rule. This constant is defined in Cryptarchia Fork Choice Rule - Definitions. | $\lfloor\frac{k}{4f}\rfloor$ (=4h30m) |
|
||||
|
||||
### Setting the Fork Choice Rule
|
||||
|
||||
Upon startup, a node sets the fork choice rule to the **Bootstrap** rule
|
||||
in one of the following cases.
|
||||
Otherwise, the node uses the **Online** fork choice rule.
|
||||
|
||||
- **A node is starting with $B_\text{imm}$ set to the Genesis block
|
||||
or from a checkpoint block.**
|
||||
|
||||
The node is setting its latest immutable block $B_\text{imm}$
|
||||
to the Genesis or a checkpoint,
|
||||
which clearly indicates that the node intends to catch up with the subsequent blocks.
|
||||
Regardless of how many subsequent blocks remain,
|
||||
the node SHOULD use the Bootstrap rule to mitigate long range attacks.
|
||||
|
||||
- **A node is restarting after being offline longer than $T_\text{offline}$ (20 minutes).**
|
||||
|
||||
Unlike starting from Genesis or checkpoint, in the case where a node is restarted
|
||||
while preserving its existing block tree,
|
||||
the node MUST choose a fork choice rule depending on how long it has been offline.
|
||||
|
||||
If it is certain that a node has been offline longer than the offline grace period
|
||||
$T_\text{offline}$ since it last used the Online rule,
|
||||
the node uses the Bootstrap rule upon startup.
|
||||
Otherwise, it starts with the Online rule.
|
||||
|
||||
Details of $T_\text{offline}$ are described in Offline Grace Period.
|
||||
A recommended way how to measure the offline duration
|
||||
is introduced in Offline Duration Measurement.
|
||||
|
||||
- **A node operator set the Bootstrap rule explicitly (e.g., by `--bootstrap` flag).**
|
||||
|
||||
In any case where the node operator is clearly aware that the node has fallen behind
|
||||
by more than $k$ blocks,
|
||||
they SHOULD be able to start the node with the Bootstrap rule.
|
||||
For example, the operator may obtain the latest block height
|
||||
from another trusted operator
|
||||
and realize that their node has fallen significantly behind due to some issue.
|
||||
|
||||
### Initial Block Download
|
||||
|
||||
If peers for Initial Block Download (IBD) are configured,
|
||||
a node performs IBD by downloading blocks to catch up with the tip
|
||||
of the local chain $c_{loc}$ of each peer
|
||||
using the fork choice rule chosen in Setting the Fork Choice Rule.
|
||||
|
||||
Blocks are downloaded in parent-to-child order,
|
||||
as defined in the Downloading Blocks mechanism.
|
||||
This mechanism applies not only when a node starts from the Genesis block,
|
||||
but also when it already has the local block tree (or a checkpoint block).
|
||||
|
||||
```python
|
||||
def initial_block_download(peers, local_tree):
|
||||
# In real implementation, these downloadings can be run in parallel.
|
||||
# Also, any optimization can be applied to minimize downloadings,
|
||||
# such as grouping peers by tip.
|
||||
for peer in peers:
|
||||
download_blocks(local_tree, peer, target_block=None)
|
||||
```
|
||||
|
||||
The downloaded blocks are validated and added to the local block tree
|
||||
using the fork choice rule determined above.
|
||||
|
||||
According to Cryptarchia v1 Protocol Specification - Block Header Validation,
|
||||
the downloaded blocks are validated and added to the local block tree
|
||||
using the fork choice rule determined above.
|
||||
|
||||
If all IBD peers become unavailable before the node catches up
|
||||
with at least one of the IBD peers,
|
||||
the node is terminated with an error,
|
||||
allowing the operator to restart the node with other IBD peers.
|
||||
|
||||
If downloading is done successfully,
|
||||
the node starts listening for new blocks as described in Listening for New Blocks.
|
||||
|
||||
### Prolonged Bootstrap Period
|
||||
|
||||
After Initial Block Download is completed,
|
||||
a node MUST maintain the Bootstrap fork choice rule during the Bootstrap Period $T_\text{boot}$,
|
||||
if the node chose the Bootstrap rule at Setting the Fork Choice Rule.
|
||||
|
||||
The purpose of the Prolonged Bootstrap Period is giving a syncing node
|
||||
additional time
|
||||
to compare its synced chain with a broader set of peers.
|
||||
In other words, it provides the node with an opportunity
|
||||
to connect to different peers
|
||||
and verify whether they are on the same chain.
|
||||
If the syncing node has downloaded blocks only from peers within an isolated network,
|
||||
the result of Initial Block Download may not reflect the honest chain
|
||||
followed by the majority of the entire network.
|
||||
To resolve such situations, the node SHOULD continue using the Bootstrap rule
|
||||
while discovering additional peers,
|
||||
allowing it to switch to a better chain if one is found.
|
||||
|
||||
Theoretically, the Bootstrap rule should be prolonged
|
||||
until the node has seen a sufficient number of blocks
|
||||
beyond the $s_\text{gen}$ slot window,
|
||||
which is required for the density check of the Bootstrap rule to be meaningful.
|
||||
However, if the node has seen a fork longer than $k$ blocks
|
||||
from its divergence block during Initial Block Download,
|
||||
it means that the node has already seen more slots than $s_\text{gen}$
|
||||
with very high probability, considering the small size of $s_\text{gen} = k/(4f)$.
|
||||
If the node has never seen any fork longer than $k$ blocks,
|
||||
it means that all forks could have been handled by the longest chain rule,
|
||||
which is part of the Bootstrap rule.
|
||||
Therefore, this protocol does not explicitly wait $s_\text{gen}$ slots
|
||||
after Initial Block Download.
|
||||
In other words, the protocol does not use $s_\text{gen}$
|
||||
to configure the Prolonged Bootstrap Period.
|
||||
|
||||
This protocol configures the Bootstrap Period to 24 hours.
|
||||
|
||||
A timer MUST be started when Listening for New Blocks is started
|
||||
after Initial Block Download is completed.
|
||||
Once the timer is completed, the fork choice rule is switched to the Online rule.
|
||||
|
||||
### Listening for New Blocks
|
||||
|
||||
Once Initial Block Download is complete and Prolonged Bootstrap Period is started,
|
||||
a node starts listening for new blocks relayed by its peers.
|
||||
|
||||
Upon receiving a new block,
|
||||
the node tries to validate and add it to its local block tree,
|
||||
as defined in Cryptarchia v1 Protocol Specification - Chain Maintenance.
|
||||
|
||||
If the parent of the block is missing from the local block tree,
|
||||
the block cannot be fully validated and added.
|
||||
These blocks are called *orphan blocks*.
|
||||
To handle an orphan block,
|
||||
the node downloads missing blocks from a randomly selected peer,
|
||||
as described in Downloading Blocks.
|
||||
If the request fails, the node MAY retry with different peers
|
||||
before abandoning the orphan block.
|
||||
The retry policy can be configured by implementers.
|
||||
|
||||
Note that downloading missing blocks does not need to be triggered
|
||||
if it is clear that the orphan block is in a fork
|
||||
diverged before the latest immutable (committed) block,
|
||||
as the node MUST never revert immutable blocks.
|
||||
|
||||
```python
|
||||
def listen_and_process_new_blocks(fork_choice: ForkChoice,
|
||||
local_tree: Tree,
|
||||
peers: List[Node]):
|
||||
for block in listen_for_new_blocks():
|
||||
try:
|
||||
# Run the chain maintenance defined in the Cryptarchia spec.
|
||||
local_tree.on_block(block, fork_choice)
|
||||
except InvalidBlock:
|
||||
continue
|
||||
except ParentNotFound:
|
||||
# Ignore the orphan block proactively,
|
||||
# if it's clear that the orphan block is in a fork
|
||||
# behind the latest immutable block
|
||||
# because immutable blocks should never be reverted.
|
||||
# This check doesn't cover all cases, but the uncovered cases
|
||||
# will be handled by the Cryptarchia block validation
|
||||
# during the `download_blocks` below.
|
||||
if block.height <= local_tree.latest_immutable_block().height:
|
||||
continue
|
||||
# In real implementation, downloading can be run in background
|
||||
# with the retry policy.
|
||||
download_blocks(local_tree, random.choice(peers),
|
||||
target_block=block.id)
|
||||
```
|
||||
|
||||
### Downloading Blocks
|
||||
|
||||
For performing Initial Block Download and handling orphan blocks
|
||||
while Listening for New Blocks,
|
||||
a node sends a `DownloadBlocksRequest` to a peer,
|
||||
which MUST respond with blocks in parent-to-child order.
|
||||
This communication should be implemented based on Libp2p streaming.
|
||||
|
||||
#### Libp2p Protocol ID
|
||||
|
||||
- Mainnet: `/nomos/cryptarchia/sync/1.0.0`
|
||||
- Testnet: `/nomos-testnet/cryptarchia/sync/1.0.0`
|
||||
|
||||
```python
|
||||
class DownloadBlocksRequest:
|
||||
# Ask blocks up to the target block.
|
||||
# The response may not contain the target block
|
||||
# if the responder limits the number of blocks returned.
|
||||
# In that case, the requester must repeat the request.
|
||||
target_block: BlockId
|
||||
# To allow the peer to determine the starting block to return.
|
||||
known_blocks: KnownBlocks
|
||||
|
||||
class KnownBlocks:
|
||||
local_tip: BlockId
|
||||
latest_immutable_block: BlockId
|
||||
# Additional known blocks.
|
||||
# A responder will reject a request if this list contains more than 5.
|
||||
additional_blocks: list[BlockId]
|
||||
|
||||
class DownloadBlocksResponse:
|
||||
# A stream of blocks in parent-to-child order.
|
||||
# The max number of blocks to be returned can be limited by implementers.
|
||||
# A requester can read the stream until the stream returns "NoMoreBlock".
|
||||
blocks: Stream[Block | "NoMoreBlock"]
|
||||
```
|
||||
|
||||
The responding peer uses `KnownBlocks` to determine the optimal starting block
|
||||
for the response stream, aiming to minimize the number of blocks to be returned.
|
||||
The requesting node can include any block it believes could assist in this process
|
||||
to the `KnownBlocks.additional_blocks`.
|
||||
To avoid spamming responders,
|
||||
the size of `KnownBlocks.additional_blocks` is limited to 5.
|
||||
|
||||
The responding peer finds the latest common ancestor (i.e. LCA)
|
||||
between the `target_block` and each of the known blocks.
|
||||
Then, it returns a stream of blocks, starting from the highest LCA.
|
||||
To mitigate malicious downloading requests,
|
||||
the peer limits the number of blocks to be returned.
|
||||
The detailed implementation is up to implementers,
|
||||
depending on their internal architecture (e.g. storage design).
|
||||
|
||||
The requesting node SHOULD repeat `DownloadBlocksRequest`s
|
||||
by updating the `KnownBlocks` in order to download the next batches of blocks.
|
||||
The following code shows how the requesting node can be implemented.
|
||||
|
||||
```python
|
||||
def download_blocks(local_tree: Tree, peer: Node,
|
||||
target_block: Optional[BlockId]):
|
||||
latest_downloaded: Optional[Block] = None
|
||||
while True:
|
||||
# Fetch the peer's tip if target is not specified.
|
||||
target_block = target_block if target_block is not None else peer.tip()
|
||||
# Don't start downloading if target is already in local.
|
||||
if local_tree.has(target_block):
|
||||
return
|
||||
|
||||
req = DownloadBlocksRequest(
|
||||
# If target_block is None, specify the current peer's tip
|
||||
# each time when building DownloadBlocksRequest,
|
||||
# so that the node can catch up with the most recent peer's tip.
|
||||
target_block=target_block,
|
||||
known_blocks=KnownBlocks(
|
||||
local_tip=local_tree.tip().id,
|
||||
latest_immutable_block=local_tree.latest_immutable_block().id,
|
||||
# Provide the latest downloaded block as well
|
||||
# to avoid downloading duplicate blocks
|
||||
additional_blocks=[latest_downloaded.id]
|
||||
if latest_downloaded is not None else [],
|
||||
)
|
||||
)
|
||||
resp = send_request(peer, req)
|
||||
|
||||
for block in resp.blocks():
|
||||
latest_downloaded = block
|
||||
try:
|
||||
# Run the chain maintenance defined in the Cryptarchia spec.
|
||||
local_tree.on_block(block)
|
||||
# Early stop if the target has been reached.
|
||||
if block == req.target_block:
|
||||
break
|
||||
except:
|
||||
return
|
||||
```
|
||||
|
||||
If the node is continuing from a previous `DownloadBlocksRequest`,
|
||||
it is important to include the latest downloaded block
|
||||
to the `KnownBlocks.additional_blocks` to avoid downloading duplicate blocks.
|
||||
|
||||
If the requesting node is downloading blocks up to the peer's tip $c_{loc}$
|
||||
(e.g. Initial Block Download) by repeating `DownloadBlocksRequest`s,
|
||||
the $c_{loc}$ may switch between requests.
|
||||
The algorithm described above also handles this case
|
||||
by specifying the most recent peer's tip each time
|
||||
when a `DownloadBlocksRequest` is constructed.
|
||||
|
||||
### Bootstrapping from Checkpoint
|
||||
|
||||
Instead of bootstrapping from the Genesis block or from the local block tree,
|
||||
a node can choose to bootstrap the honest chain
|
||||
starting from a checkpoint block obtained from a trusted checkpoint provider.
|
||||
A checkpoint provider is a trusted service (which MAY be a Nomos node
|
||||
or a dedicated server) that provides recent blockchain snapshots.
|
||||
In this case, the node fully trusts the checkpoint provider
|
||||
and considers blocks deeper than the checkpoint block as immutable
|
||||
(including the checkpoint block itself).
|
||||
|
||||
A trusted checkpoint provider exposes a HTTP endpoint,
|
||||
allowing nodes to download the checkpoint block and the corresponding ledger state.
|
||||
The details are defined in Checkpoint Provider HTTP API.
|
||||
|
||||
The bootstrapping node imports the downloaded checkpoint block and ledger state
|
||||
before starting bootstrapping.
|
||||
The imported checkpoint block is used as the latest immutable block $B_{imm}$
|
||||
and the local chain tip $c_{loc}$.
|
||||
Starting from the checkpoint block,
|
||||
the same Initial Block Download is used to download blocks
|
||||
up to the tip of the local chain of each peer.
|
||||
As defined in Setting the Fork Choice Rule,
|
||||
the Bootstrap fork choice rule MUST be used upon startup.
|
||||
|
||||
If it turns out that none of the peers' local chains are connected
|
||||
to the checkpoint block,
|
||||
the node is terminated with an error,
|
||||
allowing the node operator to select a new checkpoint.
|
||||
|
||||
## Details
|
||||
|
||||
### Offline Grace Period
|
||||
|
||||
The offline grace period $T_\text{offline}$ is a period during which
|
||||
a node can be restarted without switching to the Bootstrap rule.
|
||||
|
||||
This protocol configures $T_\text{offline}$ to 20 minutes.
|
||||
Here are the advantages and disadvantages of a short period:
|
||||
|
||||
**Advantages:**
|
||||
|
||||
- Limits chances for malicious peers to build long alternative chains
|
||||
beyond the scope of the Online rule.
|
||||
- Conservatively enables the Bootstrap rule to handle long forks.
|
||||
|
||||
**Disadvantages:**
|
||||
|
||||
- Even a short offline duration can too sensitively trigger the Bootstrap rule,
|
||||
which then lasts for the long Prolonged Bootstrap Period.
|
||||
|
||||
The following example explains why $T_\text{offline}$ SHOULD NOT be set too long:
|
||||
|
||||
- A local node stopped in the following situation.
|
||||
A malicious peer is building a fork which is now a little shorter ($k - d$)
|
||||
than the honest chain.
|
||||
- The local node has been offline shorter than $T_\text{offline}$ and just restarted.
|
||||
As defined in this protocol, the Online fork choice rule is used
|
||||
because the offline duration is short.
|
||||
- During the offline duration, the malicious peer made its fork longer
|
||||
by adding $k - d$ blocks.
|
||||
Now the fork is in the same length as the honest chain.
|
||||
- If the malicious peer sends the fork to the restarted node
|
||||
faster than the honest peer,
|
||||
the restarted node will commit to the fork because it has $k$ new blocks.
|
||||
Even if the node later receives the honest chain from the honest peer,
|
||||
it cannot revert blocks that are already immutable.
|
||||
|
||||
### Offline Duration Measurement
|
||||
|
||||
As defined in [Setting the Fork Choice Rule](#setting-the-fork-choice-rule),
|
||||
when a node is restarted,
|
||||
it should be able to choose a proper fork choice rule
|
||||
depending on how long it has been offline since it last used the Online rule.
|
||||
|
||||
It is considered unsafe to rely on any external information
|
||||
(e.g. the slot or height of peer's tip) to check how long the node has been offline,
|
||||
since such information could be manipulated as an attack vector.
|
||||
Instead, it is recommended to employ a local method to measure the offline duration.
|
||||
|
||||
While the specific implementation is left to the discretion of implementers,
|
||||
one approach is for the node to periodically record the current time to a local file
|
||||
while it is running with the **Online** fork choice rule.
|
||||
Upon restart, it can use this timestamp to calculate how long it has been offline.
|
||||
|
||||
### Checkpoint Provider HTTP API
|
||||
|
||||
A trusted checkpoint provider serves the `GET /checkpoint` API,
|
||||
allowing users (which are not connected via p2p)
|
||||
to download the latest checkpoint block and its corresponding ledger state.
|
||||
|
||||
```yaml
|
||||
openapi: 3.0
|
||||
|
||||
paths:
|
||||
/checkpoint:
|
||||
get:
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
multipart/mixed:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
checkpoint_block:
|
||||
type: string
|
||||
format: binary
|
||||
checkpoint_ledger_state:
|
||||
type: string
|
||||
format: binary
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Cryptarchia v1 Protocol Specification][cryptarchia-v1]
|
||||
\- Parent protocol specification
|
||||
- [Cryptarchia Fork Choice Rule][fork-choice]
|
||||
\- Fork choice rule specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Cryptarchia v1 Bootstrapping & Synchronization][bootstrap-origin]
|
||||
\- Original bootstrapping and synchronization documentation
|
||||
- [Libp2p Streaming][libp2p]
|
||||
\- Peer-to-peer networking library
|
||||
|
||||
[cryptarchia-v1]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification-21c261aa09df810cb85eff1c76e5798c
|
||||
[fork-choice]: https://nomos-tech.notion.site/Cryptarchia-Fork-Choice-Rule
|
||||
[bootstrap-origin]: https://nomos-tech.notion.site/Cryptarchia-v1-Bootstrapping-Synchronization-1fd261aa09df81ac94b5fb6a4eff32a6
|
||||
[libp2p]: https://docs.libp2p.io/
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,586 +0,0 @@
|
||||
# NOMOSDA-CRYPTOGRAPHIC-PROTOCOL
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | NomosDA Cryptographic Protocol |
|
||||
| Slug | 148 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Mehmet Gonen <mehmet@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Thomas Lavaur <thomaslavaur@status.im>, Daniel Kashepava <danielkashepava@status.im>, Marcin Pawlowski <marcin@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-30** — [`0ef87b1`](https://github.com/logos-co/logos-lips/blob/0ef87b1ba9491c854e48c8dfd7574d34ec69c704/docs/blockchain/raw/da-cryptographic-protocol.md) — New RFC: CODEX-MANIFEST (#191)
|
||||
- **2026-01-30** — [`25ebb3a`](https://github.com/logos-co/logos-lips/blob/25ebb3ac05e44da058c15db438fa371f10f57003/docs/blockchain/raw/da-cryptographic-protocol.md) — Replace nomosda-encoding with da-cryptographic-protocol (#264)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document describes the cryptographic protocol underlying NomosDA,
|
||||
the data availability (DA) layer for the Nomos blockchain.
|
||||
NomosDA ensures that all blob data submitted is made available and verifiable
|
||||
by all network participants, including sampling clients and validators.
|
||||
The protocol uses Reed–Solomon erasure coding for data redundancy
|
||||
and KZG polynomial commitments for cryptographic verification,
|
||||
enabling efficient and scalable data availability sampling.
|
||||
|
||||
**Keywords:** NomosDA, data availability, KZG, polynomial commitment,
|
||||
erasure coding, Reed-Solomon, sampling, BLS12-381
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Blob | A unit of data submitted to NomosDA for availability guarantees. |
|
||||
| Chunk | A 31-byte field element in the BLS12-381 scalar field. |
|
||||
| DA Node | A node responsible for storing and serving column data. |
|
||||
| Encoder | The entity that transforms blob data into encoded form with proofs. |
|
||||
| Sampling Client | A client (e.g., light node) that verifies availability by sampling columns. |
|
||||
| KZG Commitment | A polynomial commitment using the Kate-Zaverucha-Goldberg scheme. |
|
||||
| Reed-Solomon Coding | An erasure coding scheme used for data redundancy. |
|
||||
| Row Polynomial | A polynomial interpolated from chunks in a single row. |
|
||||
| Combined Polynomial | A random linear combination of all row polynomials. |
|
||||
|
||||
### Notations
|
||||
|
||||
| Symbol | Description |
|
||||
| ------ | ----------- |
|
||||
| $f_i(x)$ | Polynomial interpolated from the chunks in row $i$. |
|
||||
| $com_i$ | KZG commitment of the row polynomial $f_i(x)$. |
|
||||
| $f_C(x)$ | Combined polynomial formed as a random linear combination of all row polynomials. |
|
||||
| $com_C$ | KZG commitment of the combined polynomial $f_C(x)$. |
|
||||
| $w$ | Primitive $n$-th root of unity in the finite field. In this protocol, $n = 2k$. |
|
||||
| $h$ | Random scalar generated using the Fiat–Shamir heuristic from row commitments. |
|
||||
| $\pi_j$ | KZG evaluation proof for column $j$ of the combined polynomial. |
|
||||
| $v_j$ | Combined evaluation of column $j$ (i.e., $f_C(w^{j-1})$). |
|
||||
| $k$ | Number of columns in the original data matrix. |
|
||||
| $\ell$ | Number of rows in the data matrix. |
|
||||
|
||||
## Background
|
||||
|
||||
To achieve data availability,
|
||||
the blob data is first encoded using Reed–Solomon erasure coding
|
||||
and arranged in a matrix format.
|
||||
Each row of the matrix is interpreted as a polynomial
|
||||
and then committed using a KZG polynomial commitment.
|
||||
The columns of this matrix are then distributed across a set of decentralized DA nodes.
|
||||
|
||||
Rather than requiring individual proofs for each chunk,
|
||||
NomosDA uses a random linear combination of all row polynomials
|
||||
to construct a single combined polynomial.
|
||||
This allows for generating one proof per column,
|
||||
which enables efficient and scalable verification without sacrificing soundness.
|
||||
Sampling clients verify availability by selecting random columns
|
||||
and checking that the data and proof they receive
|
||||
are consistent with the committed structure.
|
||||
Because each column intersects all rows,
|
||||
even a small number of sampled columns provides strong confidence
|
||||
that the entire blob is available.
|
||||
|
||||
### Protocol Stages
|
||||
|
||||
The protocol is structured around three key stages:
|
||||
|
||||
1. **Encoding**: Transform blob data into a matrix with commitments and proofs.
|
||||
1. **Dispersal**: Distribute columns to DA nodes for storage.
|
||||
1. **Sampling**: Verify data availability by sampling random columns.
|
||||
|
||||
### Design Principles
|
||||
|
||||
The reason for expanding the original data row-wise
|
||||
is to ensure data availability by sending a column to each DA node
|
||||
and obtaining a sufficient number of responses from different DA nodes for sampling.
|
||||
Three core commitment types are used, and verification is done via column sampling:
|
||||
|
||||
- **Row commitment**: Ensures the integrity of the original and RS-encoded data
|
||||
and binds the order of chunks within each row.
|
||||
|
||||
- **Combined commitment**: Constructed by the verifier
|
||||
using a random linear combination of the row commitments.
|
||||
Used to verify the encoder's single proof per column
|
||||
and ensures that the column data is consistent with the committed row structure.
|
||||
Even if a single chunk is invalid,
|
||||
the combined evaluation will likely fail
|
||||
due to the unpredictability of the random coefficients.
|
||||
|
||||
- **Column sampling**: Allows sampling clients to verify data availability efficiently
|
||||
by checking a small number of columns.
|
||||
With the combined commitment and a single proof,
|
||||
the sampling client can validate that an entire column
|
||||
is consistent with the committed data.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Encoding
|
||||
|
||||
In the NomosDA protocol, encoders perform the encoding process
|
||||
by dividing the blob data into chunks.
|
||||
Each chunk represents a 31-byte element
|
||||
in the scalar finite field used for the BLS12-381 elliptic curve.
|
||||
31 bytes are chosen instead of 32 bytes
|
||||
because some 32-byte elements will exceed the BLS12-381 modulus,
|
||||
making it impossible to recover the data later.
|
||||
|
||||
The matrix representation has $k$ columns which include $\ell$ chunks each.
|
||||
The row and column numbers used in the representation are decided
|
||||
based on the size of the block data and the number of DA nodes.
|
||||
|
||||

|
||||
|
||||
*Figure 1: Data matrix structure showing chunks and columns.
|
||||
Each chunk is a 31-byte element, and each column contains $\ell$ chunks.*
|
||||
|
||||
The encoding process consists of three steps:
|
||||
|
||||
1. Calculating row commitments.
|
||||
1. Expanding the original data using RS coding.
|
||||
1. Computing the combined row polynomial and the combined column proofs.
|
||||
|
||||
#### Row Commitments
|
||||
|
||||
The original data chunks are considered in the evaluation form,
|
||||
and unique polynomials are interpolated for each row.
|
||||
For every row $i$, the encoder interpolates a unique degree $k - 1$ polynomial $f_i$
|
||||
such that $data^{j}_{i} = f_i(w^{j-1})$ for $i = 1, ..., \ell$ row indices
|
||||
and $j = 1, ..., k$ column indices.
|
||||
Recall that $w$ is a primitive element of the field.
|
||||
|
||||
Subsequently, 48-byte row commitment values $com_i = com(f_i)$
|
||||
for these polynomials are computed by the encoder.
|
||||
These commitments ensure the correct ordering of chunks within each row.
|
||||
|
||||
> **Note**: In this protocol, elliptic curves are used as a group,
|
||||
> thus the entries of $com_i$'s are also elliptic curve points.
|
||||
> Let the $x$-coordinate of $com_i$ be represented as $com^{x}_{i}$
|
||||
> and the $y$-coordinate of $com_i$ as $com^{y}_{j}$.
|
||||
> If you have just $com^{x}_{i}$ and one bit of $com^{y}_{i}$,
|
||||
> then you can construct $com_i$.
|
||||
> Therefore, there is no need to use both coordinates of $com_i$.
|
||||
> However, for the sake of simplicity in this document, the value $com_i$ is used.
|
||||
|
||||
#### Reed-Solomon Expansion
|
||||
|
||||
Using RS coding, the encoder extends the original data row-wise
|
||||
to obtain the expanded data matrix.
|
||||
The expansion is calculated by evaluating the row polynomials $f_i$
|
||||
at the new points $w^{j}$ where $j = k + 1, k + 2, \ldots, 2k$.
|
||||
The current design of NomosDA uses an expansion factor of 2,
|
||||
but it can also work with different factors.
|
||||
This expanded data matrix has rows of length $2k$.
|
||||
|
||||

|
||||
|
||||
*Figure 2: Extended data matrix showing original data ($k$ columns)
|
||||
and extended data ($2k$ columns total) after Reed-Solomon expansion.*
|
||||
|
||||
Due to the homomorphic property of KZG,
|
||||
the row commitment values calculated in the previous step
|
||||
are also valid for the row polynomials of the extended data.
|
||||
|
||||
#### Combined Row Commitment and Column Proofs
|
||||
|
||||
To eliminate the need for generating one proof per chunk,
|
||||
a more efficient technique using random linear combinations of row polynomials is used,
|
||||
allowing only one proof to be generated per column
|
||||
while still ensuring the validity of all underlying row data.
|
||||
|
||||

|
||||
|
||||
*Figure 3: Complete encoding pipeline showing row commitments (step 1),
|
||||
RS-encoding (step 2), and combined row commitment with column data (step 3).*
|
||||
|
||||
This process consists of the following steps:
|
||||
|
||||
##### Compute the Random Linear Combination Polynomial
|
||||
|
||||
Let each row $i \in \{1, \ldots, \ell\}$ have an associated polynomial $f_i(x)$
|
||||
and commitment $com_i = com(f_i)$.
|
||||
|
||||
The encoder computes random scalar $h \in \mathbb{F}$ using the Fiat–Shamir heuristic,
|
||||
applying the BLAKE2b hash function with a 31-byte output,
|
||||
over the row commitments with a domain separation tag `DA_V1`
|
||||
to ensure uniqueness and prevent cross-protocol collisions:
|
||||
|
||||
$$h = \text{Hash}(\text{'DA\_V1'} \| com_1 \| \ldots \| com_{\ell})$$
|
||||
|
||||
The resulting digest is interpreted as a field element
|
||||
in the scalar field of BLS12-381.
|
||||
|
||||
Then, the encoder computes the combined polynomial $f_C(x)$, defined as:
|
||||
|
||||
$$f_C(x) = f_1(x) + h \cdot f_2(x) + h^{2} \cdot f_3(x) + \cdots + h^{\ell-1} \cdot f_{\ell}(x)$$
|
||||
|
||||
The corresponding commitment to this polynomial is $com(f_C)$.
|
||||
This value does not need to be computed by the encoder,
|
||||
since the verifier can derive it directly from the row commitments
|
||||
using the same random scalar $h$.
|
||||
|
||||
##### Compute Combined Evaluation Points per Column
|
||||
|
||||
For each column $j \in \{1, \ldots, 2k\}$, the encoder has the set of column values
|
||||
$\{data^{j}_{1}, data^{j}_{2}, \ldots, data^{j}_{\ell}\}$,
|
||||
where each value corresponds to $f_i(w^{j-1})$.
|
||||
|
||||
The encoder computes the combined evaluation value at column position $j$ directly:
|
||||
|
||||
$$v_j = f_C(w^{j-1})$$
|
||||
|
||||
##### Generate One Proof per Column
|
||||
|
||||
For each column index $j$, the encoder computes a single KZG evaluation proof
|
||||
$\pi_j$ for the combined polynomial $f_C(x)$ at the evaluation point $w^{j-1}$:
|
||||
|
||||
$$eval(f_C, w^{j-1}) \rightarrow (v_j, \pi_j)$$
|
||||
|
||||
The result is a set of $2k$ evaluation proofs,
|
||||
one for each column, derived from the combined row structure.
|
||||
|
||||
### Dispersal
|
||||
|
||||
The encoder sends the following information to a DA node
|
||||
in the subnet corresponding to the expanded column number $j$:
|
||||
|
||||
- The row commitments $\{com_1, com_2, \ldots, com_{\ell}\}$.
|
||||
- The column chunks $\{data^{j}_{1}, data^{j}_{2}, \ldots, data^{j}_{\ell}\}$.
|
||||
- The combined proof of the column chunks $\pi_j$.
|
||||
|
||||
This information is also replicated by the receiving node
|
||||
to every other node in the subnet.
|
||||
|
||||
### Verification
|
||||
|
||||
A DA node that receives the column information described above
|
||||
performs the following checks:
|
||||
|
||||

|
||||
|
||||
*Figure 4: Dispersal and verification flow from Encoder to DA Node.
|
||||
The DA Node receives row commitments, column data, and combined proof,
|
||||
then verifies by calculating $h$, $com_C$, and $v_j$.*
|
||||
|
||||
1. The DA node computes the scalar challenge $h \in \mathbb{F}$
|
||||
using a Fiat–Shamir hash over the row commitments with a domain separation tag:
|
||||
|
||||
$$h = \text{Hash}(\text{'DA\_V1'} \| com_1 \| com_2 \| \ldots \| com_{\ell})$$
|
||||
|
||||
1. The DA node computes the combined commitment $com_C$:
|
||||
|
||||
$$com_C = com_1 + h \cdot com_2 + h^{2} \cdot com_3 + \cdots + h^{\ell-1} \cdot com_{\ell}$$
|
||||
|
||||
This is the commitment of the following polynomial:
|
||||
|
||||
$$f_C(x) = f_1(x) + h \cdot f_2(x) + h^{2} \cdot f_3(x) + \cdots + h^{\ell-1} \cdot f_{\ell}(x)$$
|
||||
|
||||
1. The DA node computes:
|
||||
|
||||
$$v_j = data^{j}_{1} + h \cdot data^{j}_{2} + h^{2} \cdot data^{j}_{3} + \cdots + h^{\ell-1} \cdot data^{j}_{\ell}$$
|
||||
|
||||
This represents $f_C(w^{j-1})$, the evaluation of the combined polynomial
|
||||
at the corresponding column index.
|
||||
|
||||
1. The DA node verifies that $\pi_j$ is a valid proof:
|
||||
|
||||
$$\text{Verify}(com_C, w^{j-1}, v_j, \pi_j) \rightarrow \text{true/false}$$
|
||||
|
||||
### Sampling
|
||||
|
||||
A sampling client, such as a light node, selects a random column index $s \in \{1, \ldots, 2k\}$.
|
||||
It sends a request for column $s$ to a DA node hosting that column's data.
|
||||
The DA node sends the client the column data $data^{s}_{i}$ and the combined proof $\pi_s$.
|
||||
|
||||

|
||||
|
||||
*Figure 5: Sampling flow between DA Node and Sampling Client.
|
||||
The client requests a random column index $s$, receives the column data and proof,
|
||||
then verifies by calculating $h$, $com_C$, and $v_s$.*
|
||||
|
||||
> **Note**: The row commitments $\{com_1, \ldots, com_{\ell}\}$ for a given blob are public
|
||||
> and remain unchanged across multiple queries to that blob.
|
||||
> If a sampling client has already obtained them,
|
||||
> it does not need to request them again.
|
||||
|
||||
The verification process run by the sampling client proceeds as follows:
|
||||
|
||||
1. Compute the scalar $h \in \mathbb{F}$ using the domain-separated Fiat–Shamir hash:
|
||||
|
||||
$$h = \text{Hash}(\text{'DA\_V1'} \| com_1 \| com_2 \| \ldots \| com_{\ell})$$
|
||||
|
||||
1. Compute the combined commitment $com_C$:
|
||||
|
||||
$$com_C = com_1 + h \cdot com_2 + h^{2} \cdot com_3 + \cdots + h^{\ell-1} \cdot com_{\ell}$$
|
||||
|
||||
1. Compute the combined evaluation value $v_s$ using the received column data:
|
||||
|
||||
$$v_s = data^{s}_{1} + h \cdot data^{s}_{2} + h^{2} \cdot data^{s}_{3} + \cdots + h^{\ell-1} \cdot data^{s}_{\ell}$$
|
||||
|
||||
1. Verify the evaluation proof:
|
||||
|
||||
$$\text{Verify}(com_C, w^{s-1}, v_s, \pi_s) \rightarrow \text{true/false}$$
|
||||
|
||||
If these checks succeed, then this proves to the sampling client
|
||||
that the column $s$ is correctly encoded and matches the committed data.
|
||||
The sampling client can query several columns
|
||||
to reach a local opinion on the availability of the entire data.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Fiat–Shamir Security
|
||||
|
||||
The random scalar $h$ MUST be computed using the Fiat–Shamir heuristic
|
||||
with the domain separation tag `DA_V1` to prevent cross-protocol attacks.
|
||||
The hash function MUST be BLAKE2b with a 31-byte output.
|
||||
|
||||
### Chunk Size
|
||||
|
||||
Chunks MUST be 31 bytes to ensure they fit within the BLS12-381 scalar field modulus.
|
||||
Using 32-byte chunks would cause some values to exceed the modulus,
|
||||
making data recovery impossible.
|
||||
|
||||
### Column Sampling Confidence
|
||||
|
||||
The more columns a sampling client verifies,
|
||||
the higher confidence it has in the availability of the entire blob.
|
||||
Implementations SHOULD sample a sufficient number of columns
|
||||
to achieve the desired confidence level.
|
||||
|
||||
### Proof Validity
|
||||
|
||||
If a single chunk is invalid,
|
||||
the combined evaluation will likely fail verification
|
||||
due to the unpredictability of the random coefficients.
|
||||
This provides strong guarantees against malicious encoders
|
||||
attempting to hide invalid data.
|
||||
|
||||
---
|
||||
|
||||
## Part II: Implementation Considerations
|
||||
|
||||
> **IMPORTANT**: The sections above define the normative protocol requirements.
|
||||
> All implementations MUST comply with those requirements.
|
||||
>
|
||||
> **The sections below are non-normative**.
|
||||
> They provide mathematical background for implementers
|
||||
> unfamiliar with the underlying cryptographic concepts.
|
||||
|
||||
### Mathematical Background
|
||||
|
||||
#### Polynomial Interpolation
|
||||
|
||||
Polynomial interpolation is the process of creating a unique polynomial from a set of data.
|
||||
In NomosDA, univariate interpolation is used,
|
||||
where each polynomial is defined over a single variable.
|
||||
There are two main ways to represent polynomials:
|
||||
|
||||
**Coefficient form:**
|
||||
Given a set of coefficients $a_0, a_1, \ldots, a_k \in \mathbb{F}$,
|
||||
a unique polynomial $f(x)$ of degree at most $k$ in coefficient form is:
|
||||
|
||||
$$f(x) = a_0 + a_1 x + a_2 x^{2} + \cdots + a_k x^{k}$$
|
||||
|
||||
If $a_k \neq 0$, then the degree of $f$ is exactly $k$.
|
||||
|
||||
**Evaluation form:**
|
||||
Let $w \in \mathbb{F}$ be a primitive $k$-th root of unity in the field,
|
||||
i.e., $w^{k} = 1$ and $w^{i} \neq 1$ for all $1 \leq i < k$.
|
||||
Given a dataset $a_0, a_1, \ldots, a_{k-1}$,
|
||||
there exists a unique polynomial $f$ in $\mathbb{F}[X]$ of degree less than $k$ such that:
|
||||
|
||||
$$f(w^{i}) = a_i \quad \text{for all } i = 0, 1, \ldots, k - 1$$
|
||||
|
||||
This representation of a polynomial using its values at $k$ distinct points
|
||||
is called the evaluation form.
|
||||
|
||||
#### KZG Polynomial Commitment
|
||||
|
||||
The KZG polynomial commitment scheme provides a way
|
||||
to commit to a polynomial and provide a proof for an evaluation of this polynomial.
|
||||
This scheme has 4 steps: setup, polynomial commitment, proof evaluation, and proof verification.
|
||||
|
||||
The setup phase generates a structured reference string (SRS)
|
||||
and is required only once for all future uses of the scheme.
|
||||
The prover performs the polynomial commitment and proof generation steps,
|
||||
while the verifier checks the validity of the proof
|
||||
against the commitment and the evaluation point.
|
||||
|
||||
**Setup:**
|
||||
|
||||
1. Choose a generator $g$ of a pairing-friendly elliptic curve group $G$.
|
||||
1. Select the maximum degree $d$ of the polynomials to be committed to.
|
||||
1. Choose a secret parameter $\tau$ and compute global parameters
|
||||
$gp = (g, g^{\tau}, g^{\tau^{2}}, \ldots, g^{\tau^{d}})$.
|
||||
Delete $\tau$ and release the parameters publicly.
|
||||
|
||||
> **Note**: The expression $g^{a}$ refers to elliptic curve point addition, i.e.,
|
||||
> $g^{a} = a * g = g + g + \cdots + g$
|
||||
> where $g$ is the generator point of the group $G$.
|
||||
> This is known as multiplicative notation.
|
||||
|
||||
**Polynomial Commitment:**
|
||||
Given a polynomial $f(x) = \sum_{i=0}^{d} a_i x^{i}$, compute the commitment of $f$ as follows:
|
||||
|
||||
$$com(f) = g^{f(\tau)} = (g)^{a_0} (g^{\tau})^{a_1} (g^{\tau^{2}})^{a_2} \cdots (g^{\tau^{d}})^{a_d}$$
|
||||
|
||||
**Proof Evaluation:**
|
||||
Given an evaluation $f(u) = v$, compute the proof $\pi = g^{q(\tau)}$,
|
||||
where $q(x) = \frac{f(x) - v}{x - u}$ is called the quotient polynomial
|
||||
and it is a polynomial if and only if $f(u) = v$.
|
||||
|
||||
**Proof Verification:**
|
||||
Given commitment $C = com(f)$, the evaluation point $u$, the evaluation $f(u) = v$,
|
||||
and proof $\pi = g^{q(\tau)}$, verify that:
|
||||
|
||||
$$e\left(\frac{C}{g^{v}}, g\right) = e\left(\pi, \frac{g^{\tau}}{g^{u}}\right)$$
|
||||
|
||||
where $e$ is a non-trivial bilinear pairing.
|
||||
|
||||
> **Note**: The evaluation of the polynomial commitment to the function $f$
|
||||
> at the point $u$, yielding the result $v$ and evaluation proof $\pi$,
|
||||
> is represented as: $eval(f, u) \rightarrow v, \pi$.
|
||||
> The verification function is defined as: $verify(com(f), u, v, \pi) \rightarrow \text{true/false}$.
|
||||
|
||||
#### Random Linear Combination of Commitments and Evaluations
|
||||
|
||||
When multiple committed polynomials are evaluated at the same point,
|
||||
it's possible to verify all evaluations using a single combined proof,
|
||||
thanks to the homomorphic properties of KZG commitments.
|
||||
This technique improves efficiency by reducing multiple evaluation proofs to just one.
|
||||
|
||||
Suppose there are $\ell$ polynomials $f_1(x), f_2(x), \ldots, f_{\ell}(x)$
|
||||
with corresponding commitments $C_i = com(f_i)$,
|
||||
and the goal is to verify that each $f_i(u) = v_i$.
|
||||
|
||||
Instead of generating $\ell$ separate proofs and performing $\ell$ pairing checks:
|
||||
|
||||
1. Use the Fiat–Shamir heuristic to derive deterministic random scalars
|
||||
$h_1, h_2, \ldots, h_{\ell}$ from the commitments $C_1, \ldots, C_{\ell}$:
|
||||
|
||||
$$(h_1, \ldots, h_{\ell}) = \text{Hash}(C_1 \| \ldots \| C_{\ell})$$
|
||||
|
||||
1. Form the combined polynomial:
|
||||
|
||||
$$f_C(x) = \sum_{i=1}^{\ell} h_i \cdot f_i(x)$$
|
||||
|
||||
1. Compute the combined evaluation:
|
||||
|
||||
$$v = f_C(u) = \sum_{i=1}^{\ell} h_i \cdot v_i$$
|
||||
|
||||
1. Compute the proof $\pi$ for $f_C(u) = v$ using the standard KZG method:
|
||||
|
||||
$$\pi = g^{q(\tau)} \quad \text{where} \quad q(x) = \frac{f_C(x) - v}{x - u}$$
|
||||
|
||||
**Verification:**
|
||||
Given commitments $C_1, \ldots, C_{\ell}$, evaluation point $u$ and value $v = f_C(u)$,
|
||||
and proof $\pi = g^{q(\tau)}$:
|
||||
|
||||
The verifier calculates the combined commitment $C = com(f_C)$
|
||||
using random scalars $h_1, h_2, \ldots, h_{\ell}$:
|
||||
|
||||
$$(h_1, \ldots, h_{\ell}) = \text{Hash}(C_1 \| \ldots \| C_{\ell})$$
|
||||
|
||||
$$C = h_1 \cdot com_1 + h_2 \cdot com_2 + \cdots + h_{\ell} \cdot com_{\ell}$$
|
||||
|
||||
and checks:
|
||||
|
||||
$$e\left(\frac{C}{g^{v}}, g\right) \stackrel{?}{=} e\left(\pi, \frac{g^{\tau}}{g^{u}}\right)$$
|
||||
|
||||
This ensures that all original evaluations $f_i(u) = v_i$ are correct
|
||||
with a single proof and a single pairing check.
|
||||
Since the random scalars $h_i$ are generated via Fiat–Shamir,
|
||||
any incorrect $v_i$ will almost certainly cause the combined evaluation to fail verification.
|
||||
|
||||
#### Reed-Solomon Erasure Coding
|
||||
|
||||
Reed-Solomon coding, also known as RS coding, is an error-correcting code
|
||||
based on the fact that any $n$-degree polynomial
|
||||
can be uniquely determined by $n + 1$ points satisfying the polynomial equation.
|
||||
It uses the interpreted polynomial over the data set
|
||||
to produce more points in a process called expansion or encoding.
|
||||
Once the data is expanded, any $n$ elements of the total set of points
|
||||
can be used to reconstruct the original data.
|
||||
|
||||
#### Pairing Details
|
||||
|
||||
Let $(G_1, .)$, $(G_2, .)$, and $(G_T, .)$ be three cyclic groups of large prime order.
|
||||
A map $e : G_1 \times G_2 \rightarrow G_T$ is a pairing map such that:
|
||||
|
||||
$$e(g^{x}, g^{y}) = e(g, g)^{xy} = e(g, g^{xy})$$
|
||||
|
||||
Given $g^{x}$ and $g^{y}$, a pairing can check that some element $h = g^{xy}$
|
||||
without knowing $x$ and $y$.
|
||||
|
||||
For the KZG commitment scheme to work, a so-called trusted setup is needed,
|
||||
consisting of a structured reference string (SRS).
|
||||
This is a set of curve points in $G_1$ and $G_2$.
|
||||
For a field element $u \in \mathbb{F}_q$, define $u * g_i = g^{u}_{i}$.
|
||||
The SRS consists of two sequences of group elements:
|
||||
|
||||
$$g^{0}_{1}, g^{\tau}_{1}, g^{\tau^{2}}_{1}, g^{\tau^{3}}_{1}, \ldots, g^{\tau^{D}}_{1} \in G_1$$
|
||||
|
||||
$$g^{0}_{2}, g^{\tau}_{2}, g^{\tau^{2}}_{2}, g^{\tau^{3}}_{2}, \ldots, g^{\tau^{K}}_{2} \in G_2$$
|
||||
|
||||
where $\tau \in \mathbb{F}_q$ is a secret field element, not known by either participant.
|
||||
$g_1$ is the generator point of $G_1$ and $g_2$ is the generator point of $G_2$.
|
||||
$D$ is the upper bound for the degree of the polynomials that can be committed to,
|
||||
and $K$ is the maximum number of evaluations to be proven using a batched proof.
|
||||
|
||||
**Verify Operation:**
|
||||
To verify an evaluation proof, the verifier checks the following equation:
|
||||
|
||||
$$q(x)(x - u) = f(x) - f(u) = f(x) - v$$
|
||||
|
||||
As the verifier does not have access to the actual polynomials $f$ and $q$,
|
||||
the next best thing would be to check that:
|
||||
|
||||
$$com(q) \cdot (x - u) = com(f - v)$$
|
||||
|
||||
Expanding the definition of $com$:
|
||||
|
||||
$$g^{q(\tau)(\tau - u)}_{1} = g^{f(\tau) - v}_{1}$$
|
||||
|
||||
For elliptic curve additive notation this is equivalent to:
|
||||
|
||||
$$q(\tau)(\tau - u) * g_1 = f(\tau) * g_1 - v * g_1$$
|
||||
|
||||
Now there is a problem, namely, the multiplication on the left-hand side.
|
||||
Pairings allow us to get away with one multiplication.
|
||||
So the verifier actually checks:
|
||||
|
||||
$$e(com(q), (\tau * g_2 - u * g_2)) = e(com(f) - v * g_1, g_2)$$
|
||||
|
||||
i.e.,
|
||||
|
||||
$$e(q(\tau) * g_1, (\tau * g_2 - u * g_2)) = e(f(\tau) * g_1 - v * g_1, g_2)$$
|
||||
|
||||
This works because of the bilinearity property of elliptic curve pairings:
|
||||
|
||||
$$e(a * g_1, b * g_2) = e(g_1, g_2)^{ab}$$
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [BLS12-381][bls12-381] - BLS12-381 elliptic curve specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [NomosDA Cryptographic Protocol][origin-ref] - Original specification document
|
||||
- [Elliptic Curve Pairings][ec-pairings] - Background on elliptic curve pairings
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[origin-ref]: https://nomos-tech.notion.site/NomosDA-Cryptographic-Protocol-1fd261aa09df816fa97ac81304732e77
|
||||
[bls12-381]: https://hackmd.io/@benjaminion/bls12-381
|
||||
[ec-pairings]: https://vitalik.ca/general/2017/01/14/exploring_ecp.html
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,421 +0,0 @@
|
||||
# NOMOSDA-REWARDING
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | NomosDA Rewarding |
|
||||
| Slug | 149 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski <marcin@status.im> |
|
||||
| Contributors | Alexander Mozeika <alexander.mozeika@status.im>, Mehmet Gonen <mehmet@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-30** — [`0ef87b1`](https://github.com/logos-co/logos-lips/blob/0ef87b1ba9491c854e48c8dfd7574d34ec69c704/docs/blockchain/raw/da-rewarding.md) — New RFC: CODEX-MANIFEST (#191)
|
||||
- **2026-01-30** — [`3f76dd8`](https://github.com/logos-co/logos-lips/blob/3f76dd87a0b6580c16caa401c7fd5ec7dd9a7d6b/docs/blockchain/raw/da-rewarding.md) — Add NomosDA Rewarding specification (#269)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies the opinion-based rewarding mechanism
|
||||
for the NomosDA (Nomos Data Availability) service.
|
||||
The mechanism incentivizes DA nodes to maintain consistent and high-quality service
|
||||
through peer evaluation using a binary opinion system.
|
||||
Nodes assess the service quality of their counterparts across different subnetworks,
|
||||
and rewards are distributed based on accumulated positive opinions
|
||||
exceeding a defined activity threshold.
|
||||
|
||||
**Keywords:** NomosDA, data availability, rewarding, incentives, peer evaluation,
|
||||
activity proof, quality of service, sampling
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| Block Finality | A period expressed in number of blocks (2160) after which a block is considered finalized, as defined by parameter $k$ in Cryptarchia. |
|
||||
| Session | A time period during which the same set of nodes executes the protocol. Session length is two block finalization periods (4320 blocks). |
|
||||
| Activity Proof | A data structure containing binary opinion vectors about other nodes' service quality. |
|
||||
| Active Message | A message registered on the ledger that contains a node's activity proof for a session. |
|
||||
| Opinion Threshold | The ratio of positive to negative opinions required for a node to be positively opinionated (default: 10). |
|
||||
| Activity Threshold | The number of positive opinions ($\theta = N_s/2$) a node must collect to be considered active. |
|
||||
| DA Node | A node providing data availability service, identified by a unique `ProviderId`. |
|
||||
| SDP | Service Declaration Protocol, used to retrieve the list of active DA nodes. |
|
||||
|
||||
### Notations
|
||||
|
||||
| Symbol | Description |
|
||||
| ------ | ----------- |
|
||||
| $s$ | Current session number. |
|
||||
| $N_s$ | Set of DA nodes (unique `ProviderId`s) active during session $s$. |
|
||||
| $S$ | Session length in blocks (4320). |
|
||||
| $b$ | Block number. |
|
||||
| $\theta$ | Activity threshold ($N_s/2$). |
|
||||
| $R_s$ | Base reward for session $s$. |
|
||||
| $I_s$ | Total income for DA service during session $s$. |
|
||||
| $R(n)$ | Reward for node $n$. |
|
||||
|
||||
## Background
|
||||
|
||||
The NomosDA service is a crucial component of the Nomos architecture,
|
||||
responsible for ensuring accessibility and retrievability of blockchain data.
|
||||
This specification defines an opinion-based rewarding mechanism
|
||||
that incentivizes DA nodes to maintain consistent and high-quality service.
|
||||
|
||||
The approach uses peer evaluation through a binary opinion system,
|
||||
where nodes assess the service quality of their counterparts
|
||||
across different subnetworks of DA.
|
||||
This mechanism balances simplicity and effectiveness
|
||||
by integrating with the existing Nomos architecture
|
||||
while promoting decentralized quality control.
|
||||
|
||||
The strength of this approach comes from its economic design,
|
||||
which reduces possibilities for dishonest behaviour and collusion.
|
||||
The reward calculation method divides rewards based on the total number of nodes
|
||||
rather than just active ones,
|
||||
further discouraging manipulation of the opinion system.
|
||||
|
||||
### Three-Session Operation
|
||||
|
||||
The mechanism operates across three consecutive sessions:
|
||||
|
||||
1. **Session $s$**: NomosDA nodes perform sampling of data blobs referenced in blocks.
|
||||
While sampling, nodes interact with and evaluate the service quality
|
||||
of other randomly selected nodes from different subnetworks.
|
||||
Nodes sample both new blocks and old blocks.
|
||||
|
||||
1. **Session $s + 1$**: Nodes formalize their evaluations by submitting Activity Proofs—
|
||||
binary vectors where each bit represents their opinion (positive or negative)
|
||||
about other nodes' service quality.
|
||||
These opinions are tracked separately for new and old blocks.
|
||||
The proofs are recorded on the ledger through Active Messages.
|
||||
|
||||
1. **Session $s + 2$**: Rewards are distributed.
|
||||
Nodes that accumulate positive opinions above the activity threshold
|
||||
receive a fixed reward calculated as a portion of the session's DA service income.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Session $s$: Sampling Phase
|
||||
|
||||
1. If the number of DA nodes (unique `ProviderId`s from declarations)
|
||||
retrieved from the SDP is below the minimum,
|
||||
then do not perform sampling for **new blocks**.
|
||||
|
||||
1. If the number of DA nodes retrieved from the SDP for session $s - 1$
|
||||
was below the minimum,
|
||||
then do not perform sampling for **old blocks**.
|
||||
|
||||
1. If the number of DA nodes retrieved from the SDP is below the minimum
|
||||
for both session $s$ and $s - 1$,
|
||||
then stop and do not execute this protocol.
|
||||
|
||||
1. The DA node performs sampling for every new block $b$ it receives,
|
||||
and for an old block $b - S$ for every new block received
|
||||
(where $S = 4320$ is the session length).
|
||||
|
||||
1. The node selects at random (without replacement) 20 out of 2048 subnetworks.
|
||||
|
||||
> **Note**: The set of nodes selected does not have to be the same
|
||||
> for old and new blocks.
|
||||
|
||||
1. The node connects to a random node in each of the selected subnetworks.
|
||||
If a node does not respond to a sampling request,
|
||||
another node is selected from the same subnetwork
|
||||
and the sampling request is repeated until success is achieved
|
||||
or a specified limit is reached.
|
||||
|
||||
1. During sampling, the node measures the quality of service
|
||||
provided by selected nodes as defined in
|
||||
[Quality of Service Measurement](#quality-of-service-measurement).
|
||||
|
||||
### Session $s + 1$: Opinion Submission Phase
|
||||
|
||||
1. The DA node generates an Activity Proof that contains opinion vectors,
|
||||
where all DA nodes are rated for positive or negative quality of service
|
||||
for new and old blocks.
|
||||
|
||||
1. The DA node sends an Active Message that is registered on the ledger
|
||||
and contains the node's Activity Proof.
|
||||
|
||||
### Session $s + 2$: Reward Distribution Phase
|
||||
|
||||
1. Every node that collected above $\theta$ positive opinions
|
||||
receives a fixed reward as defined in [Reward Calculation](#reward-calculation).
|
||||
|
||||
1. The rewards are distributed by the Service Reward Distribution Protocol.
|
||||
|
||||
## Constructions
|
||||
|
||||
### Quality of Service Measurement
|
||||
|
||||
A node MUST measure the quality of service for each sampling it performs
|
||||
to gather opinions about the quality of service of the entire DA network.
|
||||
These opinions are used to construct the Activity Proof.
|
||||
|
||||
The global parameter `opinion_threshold` is set to 10,
|
||||
meaning a node must receive 10 positive opinions for each negative opinion
|
||||
to be positively opinionated (at least 90% positive opinions).
|
||||
|
||||
To build an opinions vector describing the quality of data availability sampling,
|
||||
a node MUST:
|
||||
|
||||
1. Retrieve $\mathcal{N}_s$, a list of active DA nodes (unique `ProviderId`s)
|
||||
for session $s$, from the SDP.
|
||||
|
||||
1. Retrieve $\mathcal{N}_{s-1}$, a list of active DA nodes for session $s - 1$,
|
||||
from the SDP (can be retained from the previous session).
|
||||
|
||||
1. Order $\mathcal{N}_s$ and $\mathcal{N}_{s-1}$ in ascending lexicographical order
|
||||
by `ProviderId` of each node from both lists.
|
||||
|
||||
1. Create for each session and independently for old ($\mathcal{N} = \mathcal{N}_{s-1}$)
|
||||
and new ($\mathcal{N} = \mathcal{N}_s$) blocks:
|
||||
|
||||
1. `positive_opinions` vector of size $N = \text{length}(\mathcal{N})$
|
||||
where the $i$-th element (integer) represents positive opinions
|
||||
about the $i$-th node from list $\mathcal{N}$.
|
||||
|
||||
1. `negative_opinions` vector of size $N = \text{length}(\mathcal{N})$
|
||||
where the $i$-th element (integer) represents negative opinions
|
||||
about the $i$-th node from list $\mathcal{N}$.
|
||||
|
||||
1. `blacklist` vector of size $N = \text{length}(\mathcal{N})$
|
||||
where the $i$-th element (bool) marks whether the $i$-th node
|
||||
is blacklisted due to providing an invalid response.
|
||||
|
||||
1. Send a sampling request to a node $n \in \mathcal{N}$ such that `blacklist[n]==0`:
|
||||
|
||||
1. If the node $n$ responds:
|
||||
1. If the response is valid, then `positive_opinions[n]++`
|
||||
1. If the response is not valid, then:
|
||||
1. Clear positive opinions about the node: `positive_opinions[n]=0`
|
||||
1. Mark the node as blacklisted: `blacklist[n]=1`
|
||||
|
||||
1. If the node does not respond, then `negative_opinions[n]++`
|
||||
|
||||
1. When the next session starts, create an opinions binary for every node $i \in \mathcal{N}$:
|
||||
|
||||
```python
|
||||
previous_session_opinions[i] = opinion(i, old.positive_opinions,
|
||||
old.negative_opinions,
|
||||
old.opinions_threshold)
|
||||
|
||||
current_session_opinions[i] = opinion(i, new.positive_opinions,
|
||||
new.negative_opinions,
|
||||
new.opinions_threshold)
|
||||
|
||||
def opinion(i, positive_opinions, negative_opinions, opinion_threshold):
|
||||
return (positive_opinions[i] > (negative_opinions[i] * opinion_threshold))
|
||||
```
|
||||
|
||||
1. A node sets a positive opinion about itself in the `current_session_opinions` vector.
|
||||
|
||||
1. A node sets a positive opinion about itself in the `previous_session_opinions`
|
||||
if the node was taking part in the protocol during the previous session.
|
||||
|
||||
### Activity Proof
|
||||
|
||||
The Activity Proof structure is:
|
||||
|
||||
```python
|
||||
class ActivityProof:
|
||||
current_session: SessionNumber
|
||||
previous_session_opinions_length: int
|
||||
previous_session_opinions: Opinions
|
||||
current_session_opinions_length: int
|
||||
current_session_opinions: Opinions
|
||||
```
|
||||
|
||||
`Opinions` is a binary vector of length $N_s$
|
||||
(total number of nodes identified by unique `ProviderId`s from declarations)
|
||||
where each bit represents a node providing DA service for the session.
|
||||
A bit is set to 1 only when the node considers the sampling service
|
||||
provided by the DA node to meet quality standards.
|
||||
|
||||
#### Field Descriptions
|
||||
|
||||
- `current_session`: The session number of the assignations used for forming opinions.
|
||||
|
||||
- `previous_session_opinions_length`: The number of bytes used by `previous_session_opinions`.
|
||||
|
||||
- `previous_session_opinions`: Opinions gathered from sampling old blocks.
|
||||
When there are no old blocks (first session after genesis
|
||||
or after a non-operational DA period),
|
||||
these opinions SHOULD NOT be collected nor validated.
|
||||
|
||||
- `current_session_opinions_length`: The number of bytes used by `current_session_opinions`.
|
||||
|
||||
- `current_session_opinions`: Opinions gathered from sampling new blocks.
|
||||
|
||||
#### Validity Rules
|
||||
|
||||
The Activity Proof is **valid** when:
|
||||
|
||||
- The `current_session_opinions` vector is not provided
|
||||
(and `current_session_opinions_length==0`)
|
||||
when the DA service was not operational during that session.
|
||||
|
||||
- The byte-length of the `previous_session_opinions` vector is:
|
||||
|
||||
$$|\text{previous\_session\_opinions}| = \left\lceil \frac{\log_2(N_{s-1} + 1)}{8} \right\rceil$$
|
||||
|
||||
- The `previous_session_opinions` vector is not provided
|
||||
(and `previous_session_opinions_length==0`)
|
||||
when the DA service was not operational during that session.
|
||||
|
||||
- The byte-length of the `current_session_opinions` vector is:
|
||||
|
||||
$$|\text{current\_session\_opinions}| = \left\lceil \frac{\log_2(N_s + 1)}{8} \right\rceil$$
|
||||
|
||||
- The $n$-th node (note that $n \in \mathcal{N}_s \not\Rightarrow n \in \mathcal{N}_{s-1}$)
|
||||
is represented by the $n$-th bit of the vector (counting nodes from 0),
|
||||
with the vector encoded as little-endian.
|
||||
The rightmost byte of the vector MAY contain bits not mapped to any node;
|
||||
these bits are disregarded.
|
||||
|
||||
### Activity Threshold
|
||||
|
||||
The activity threshold $\theta$ defines the number of positive opinions
|
||||
a node must collect from peers to be considered active for session $s$.
|
||||
|
||||
$$\theta = N_s / 2$$
|
||||
|
||||
Where $\theta$ controls the number of positive opinions
|
||||
a node must collect to be considered active.
|
||||
|
||||
### Active Message
|
||||
|
||||
Each node for every session constructs an `active_message`
|
||||
that MUST follow the specified format.
|
||||
|
||||
A node MAY stop sending `active_message`
|
||||
when the DA service is non-operational for more than a single session.
|
||||
|
||||
The `active_message` metadata field MUST be populated with:
|
||||
|
||||
- A `header` containing a one-byte `version` field fixed to `0x01` value.
|
||||
- The `activity_proof` as defined above.
|
||||
|
||||
#### Active Message Rules
|
||||
|
||||
- An Active Message is stored on the ledger.
|
||||
- An Active Message is used for calculating the node reward.
|
||||
- An Active Message for session $s$ MUST only be sent during session $s + 1$;
|
||||
otherwise, it MUST be rejected.
|
||||
- The ledger MUST only accept a single Active Message per node per session;
|
||||
any duplicate MUST be rejected.
|
||||
|
||||
### Reward Calculation
|
||||
|
||||
The reward calculation follows these steps:
|
||||
|
||||
#### Step 1: Calculate Base Reward
|
||||
|
||||
Calculate the base reward for session $s$:
|
||||
|
||||
$$R_s = \frac{I_s}{N_s}$$
|
||||
|
||||
Where $I_s$ is the income for DA service during session $s$,
|
||||
and $N_s$ is the number of nodes providing DA service during session $s$.
|
||||
|
||||
> **Note**: The base reward is fixed to the total number of nodes providing the service
|
||||
> instead of the number of active nodes.
|
||||
> This disincentivizes nodes from providing dishonest opinions about other nodes
|
||||
> to increase their own reward.
|
||||
|
||||
The income leftovers MUST be burned or consumed
|
||||
in such a way that will not benefit the nodes.
|
||||
|
||||
#### Step 2: Count Positive Opinions
|
||||
|
||||
Count the number of positive opinions for node $n$ in session $s$:
|
||||
|
||||
$$\text{opinions}(n, s) = \sum_{i=1}^{N} \text{valid}(\text{activity\_proof}(i, n, s))$$
|
||||
|
||||
Where $\text{valid}()$ returns true only when the `activity_proof` for node $n$ is valid
|
||||
and the opinion about node $n$ is **positive** for session $s$.
|
||||
|
||||
#### Step 3: Calculate Node Reward
|
||||
|
||||
Calculate the reward for node $n$ based on node activity:
|
||||
|
||||
$$R(n) = \frac{R_s}{2} \cdot \text{active}(n, s) + \frac{R_{s-1}}{2} \cdot \text{active}(n, s - 1)$$
|
||||
|
||||
Where $\text{active}(n, s)$ returns true only when $n \in \mathcal{N}_s$
|
||||
and the number of positive opinions on node $n$ for session $s$
|
||||
is greater than or equal to $\theta$:
|
||||
|
||||
$$\text{opinions}(n, s) \geq \theta$$
|
||||
|
||||
The reward is a function of the node's capacity (quality)
|
||||
to respond to sampling requests for both new and old blocks.
|
||||
Therefore, the reward draws from half of the income from session $s$ (for new blocks)
|
||||
and half of the income from session $s - 1$ (for old blocks).
|
||||
|
||||
The base reward $R_s$ is distributed to nodes that both:
|
||||
|
||||
- Submitted a valid Activity Proof
|
||||
- Received positive opinions exceeding the activity threshold
|
||||
for at least one of the sessions
|
||||
|
||||
> **Note**: Inactive nodes are not rewarded.
|
||||
> Nodes that have not participated in the previous session
|
||||
> are not rewarded for the past session.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Subjective Opinions
|
||||
|
||||
The mechanism intentionally uses subjective node opinions
|
||||
rather than strict performance metrics.
|
||||
While this introduces some arbitrariness,
|
||||
it provides a simple and flexible approach
|
||||
that aligns with Nomos' architectural goals.
|
||||
|
||||
### Dishonest Evaluation
|
||||
|
||||
The system has potential for dishonest evaluation.
|
||||
However, the economic design reduces possibilities
|
||||
for dishonest behaviour and collusion:
|
||||
|
||||
- The reward calculation divides rewards based on total number of nodes
|
||||
rather than just active ones,
|
||||
discouraging manipulation of the opinion system.
|
||||
- Income leftovers are burned to prevent benefit from underreporting.
|
||||
|
||||
### Collusion Resistance
|
||||
|
||||
The activity threshold of $N_s/2$ requires a node
|
||||
to receive positive opinions from at least half of all nodes.
|
||||
This makes collusion attacks expensive,
|
||||
as an attacker would need to control a majority of nodes
|
||||
to guarantee rewards for malicious nodes.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Service Declaration Protocol][sdp] - Protocol for declaring DA node participation
|
||||
|
||||
### Informative
|
||||
|
||||
- [NomosDA Rewarding][origin-ref] - Original specification document
|
||||
- [Analysis of Sampling Strategy][sampling-analysis] - Motivation for sampling 20 subnetworks
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[origin-ref]: https://nomos-tech.notion.site/NomosDA-Rewarding-203261aa09df80af8c77dfb3dc593673
|
||||
[sdp]: https://nomos-tech.notion.site/Service-Declaration-Protocol
|
||||
[sampling-analysis]: https://nomos-tech.notion.site/Analysis-of-Sampling-Strategy
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,188 +0,0 @@
|
||||
# NOMOS-DIGITAL-SIGNATURE
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Digital Signature |
|
||||
| Slug | 150 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Jimmy Debe <jimmy@status.im> |
|
||||
| Contributors | Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/blockchain/raw/digital-signature.md) — chore: add math support (#287)
|
||||
- **2026-01-30** — [`99ca13a`](https://github.com/logos-co/logos-lips/blob/99ca13af02087ff94fabbf3e7fca7cb8e5320f46/docs/blockchain/raw/digital-signature.md) — New RFC: Nomos Digital Signature (#167)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification describes the digital signature schemes
|
||||
used across different components in the Nomos system design.
|
||||
Throughout the system, each Nomos layer shares the same signature scheme,
|
||||
ensuring consistent security and interoperability.
|
||||
The specification covers EdDSA for general-purpose signing
|
||||
and ZKSignature for zero-knowledge proof of key ownership.
|
||||
|
||||
**Keywords:** digital signature, EdDSA, Ed25519, zero-knowledge proof,
|
||||
ZKSignature, cryptography, elliptic curve, Curve25519
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Term | Description |
|
||||
| ---- | ----------- |
|
||||
| EdDSA | Edwards-curve Digital Signature Algorithm, a signature scheme based on twisted Edwards curves. |
|
||||
| Ed25519 | An instance of EdDSA using Curve25519, providing 128-bit security. |
|
||||
| ZKSignature | A zero-knowledge signature scheme that proves knowledge of a secret key without revealing it. |
|
||||
| Prover | An entity that generates a cryptographic proof or signature. |
|
||||
| Verifier | An entity that validates a cryptographic proof or signature. |
|
||||
| Public Key | The publicly shareable component of a key pair, used for verification. |
|
||||
| Secret Key | The private component of a key pair, used for signing and proof generation. |
|
||||
|
||||
## Background
|
||||
|
||||
The Nomos Bedrock consists of a few key components that Nomos Network is built on.
|
||||
See the [Nomos whitepaper][nomos-whitepaper] for more information.
|
||||
The Bedrock Mantle component serves as the operating system of Nomos.
|
||||
This includes facilitating operations like writing data to the blockchain or
|
||||
a restricted ledger of notes to support payments and staking.
|
||||
This component also defines how Nomos zones update their state and
|
||||
the coordination between the Nomos zone executor nodes.
|
||||
It is like a system call interface designed to provide a minimal set of operations
|
||||
to interact with lower-level Bedrock services.
|
||||
It is an execution layer that connects Nomos services
|
||||
to provide the necessary functionality for sovereign rollups and zones.
|
||||
See [Common Ledger specification][common-ledger] for more on Nomos zones.
|
||||
|
||||
In order for the Bedrock layer to remain lightweight, it focuses on data availability
|
||||
and verification rather than execution.
|
||||
Native zones on the other hand will be able to define their state transition function
|
||||
and prove to the Bedrock layer their correct execution.
|
||||
The Bedrock layer components share the same digital signature mechanism to ensure security and privacy.
|
||||
This document describes the validation tools that are used with Bedrock services in the Nomos network.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
The signature schemes used by the provers and verifiers include:
|
||||
|
||||
- EdDSA Digital Signature Algorithm
|
||||
- ZKSignature (Zero-Knowledge Signature)
|
||||
|
||||
### EdDSA
|
||||
|
||||
EdDSA is a signature scheme based on elliptic-curve cryptography,
|
||||
defined over twisted [Edwards curves][edwards-curves].
|
||||
Nomos uses the Ed25519 instance with Curve25519,
|
||||
providing 128-bit security for general-purpose signing.
|
||||
EdDSA SHOULD NOT be used for ZK circuit construction.
|
||||
|
||||
The prover computes the following EdDSA signature using twisted Edwards curve Curve25519:
|
||||
|
||||
$$-x^2 + y^2 = 1 - (121665/121666)x^2y^2 \mod{(2^{255} - 19)}$$
|
||||
|
||||
- The public key size MUST be 32 bytes.
|
||||
- The signature size MUST be 64 bytes.
|
||||
- The public key MUST NOT already exist in the system.
|
||||
|
||||
### ZKSignature
|
||||
|
||||
The ZKSignature scheme enables a prover to demonstrate cryptographic knowledge of a secret key,
|
||||
corresponding to a publicly available key,
|
||||
without revealing the secret key itself.
|
||||
The following is the structure for a proof attesting public key ownership:
|
||||
|
||||
```python
|
||||
class ZkSignaturePublic:
|
||||
public_keys: list[ZkPublicKey] # The public keys signing the message
|
||||
msg: hash # The hash of the message
|
||||
```
|
||||
|
||||
The prover knows a witness:
|
||||
|
||||
```python
|
||||
class ZkSignatureWitness:
|
||||
# The list of secret keys used to sign the message
|
||||
secret_keys: list[ZkSecretKey]
|
||||
```
|
||||
|
||||
Such that the following constraints hold:
|
||||
|
||||
1. The number of secret keys is equal to the number of public keys:
|
||||
|
||||
```python
|
||||
assert len(secret_keys) == len(public_keys)
|
||||
```
|
||||
|
||||
1. Each public key is derived from the corresponding secret key:
|
||||
|
||||
```python
|
||||
assert all(
|
||||
notes[i].public_key == hash("NOMOS_KDF", secret_keys[i])
|
||||
for i in range(len(public_keys))
|
||||
)
|
||||
```
|
||||
|
||||
- The proof MUST be embedded in the hashed `msg`.
|
||||
|
||||
The ZKSignature circuit MUST take a maximum of 32 public keys as inputs.
|
||||
To prove ownership when using fewer than 32 keys,
|
||||
the remaining inputs MUST be padded with the public key corresponding
|
||||
to the secret key `0`.
|
||||
These padding entries are ignored during execution.
|
||||
The outputs of the circuit have no size limit,
|
||||
as they MUST be included in the hashed `msg`.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Key Management
|
||||
|
||||
Secret keys MUST be stored securely and never transmitted in plaintext.
|
||||
Implementations MUST use secure random number generators for key generation.
|
||||
|
||||
### EdDSA Security
|
||||
|
||||
EdDSA provides 128-bit security when used with Ed25519.
|
||||
Implementations MUST validate public keys before use to prevent small subgroup attacks.
|
||||
Signature verification MUST reject malformed signatures.
|
||||
|
||||
### ZKSignature Security
|
||||
|
||||
The ZKSignature scheme relies on the security of the underlying hash function
|
||||
and the zero-knowledge proof system.
|
||||
The hash function used for key derivation (`NOMOS_KDF`) MUST be collision-resistant.
|
||||
Implementations MUST verify that proofs are well-formed before accepting them.
|
||||
|
||||
### Replay Protection
|
||||
|
||||
Signatures SHOULD include context-specific data (such as timestamps or nonces)
|
||||
to prevent replay attacks across different contexts or time periods.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [RFC 2119][rfc-2119] - Key words for use in RFCs to Indicate Requirement Levels
|
||||
|
||||
### Informative
|
||||
|
||||
- [Nomos whitepaper][nomos-whitepaper] - The Nomos Whitepaper
|
||||
- [Common Ledger specification][common-ledger] - Common Ledger Specification
|
||||
- [Edwards curves][edwards-curves] - Twisted Edwards Curves
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[nomos-whitepaper]: https://nomos-tech.notion.site/The-Nomos-Whitepaper-1fd261aa09df81318690c6f398064efb
|
||||
[common-ledger]: https://nomos-tech.notion.site/Common-Ledger-Specification-1fd261aa09df81088b76f39cbbe7c648
|
||||
[edwards-curves]: https://eprint.iacr.org/2008/013.pdf
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,239 +0,0 @@
|
||||
# CRYPTARCHIA-FORK-CHOICE
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Cryptarchia Fork Choice Rule |
|
||||
| Slug | 147 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | David Rusu <davidrusu@status.im> |
|
||||
| Contributors | Jimmy Debe <jimmy@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-30** — [`0ef87b1`](https://github.com/logos-co/logos-lips/blob/0ef87b1ba9491c854e48c8dfd7574d34ec69c704/docs/blockchain/raw/fork-choice.md) — New RFC: CODEX-MANIFEST (#191)
|
||||
- **2026-01-29** — [`a428c03`](https://github.com/logos-co/logos-lips/blob/a428c0370733bdeadc019952a49264443d27edd0/docs/blockchain/raw/fork-choice.md) — New RFC: NOMOS-FORK-CHOICE (#247)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document describes the consensus mechanism of the fork choice rule,
|
||||
followed by nodes in the Cryptarchia protocol.
|
||||
Cryptarchia implements two fork choice rules,
|
||||
one during node bootstrapping
|
||||
and the second fork choice once a node is connected to the network.
|
||||
|
||||
**Keywords:** fork choice, Cryptarchia, Ouroboros Genesis, Ouroboros Praos,
|
||||
bootstrapping, long-range attack, consensus
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Term | Description |
|
||||
| ---- | ----------- |
|
||||
| $k$ | Safety parameter, the depth at which a block is considered immutable. |
|
||||
| $s_{gen}$ | Sufficient time measured in slots to measure the density of block production with enough statistical significance. In practice, $s_{gen} = \frac{k}{4f}$, where $f$ is the active slot coefficient from the leader lottery. See [Theorem 2 of Badertscher et al., 2018][ouroboros-genesis] for more information. |
|
||||
| CommonPrefixDepth | A function $\textbf{CommonPrefixDepth}(b_1, b_2) \rightarrow (\mathbb{N}, \mathbb{N})$ that returns the minimum block depth at which the two branches converge to a common chain. |
|
||||
| density | A function $\textbf{density}(b_i, d, s_{gen})$ that returns the number of blocks produced in the $s_{gen}$ slots following block $b_{i-d}$. |
|
||||
|
||||
## Background
|
||||
|
||||
In blockchain networks,
|
||||
the consensus process may encounter multiple competing branches (forks) of the blockchain state.
|
||||
A Nomos node maintains a local copy of the blockchain state and
|
||||
connects to a set of peers to download new blocks.
|
||||
|
||||
During bootstrapping, Cryptarchia v1 implements [Ouroboros Genesis][ouroboros-genesis] and
|
||||
[Ouroboros Praos][ouroboros-praos] for the fork choice mechanism.
|
||||
These translate to two fork choice rules,
|
||||
the bootstrap rule and the online rule.
|
||||
This approach is meant to help nodes defend against malicious peers feeding false chains to download.
|
||||
This calls for a more expensive fork choice rule that can differentiate between malicious long-range attacks and
|
||||
the honest chain.
|
||||
|
||||
### The Long Range Attack
|
||||
|
||||
The protocol has a time window for a node, which is the **lottery** leader winner,
|
||||
to complete a new block.
|
||||
Nodes with more stake have a higher probability of being selected through the **lottery**.
|
||||
The **lottery difficulty** is determined by protocol parameters and the node's stake.
|
||||
The leadership lottery difficulty will adjust dynamically
|
||||
based on the total stake that is participating in the consensus at the time.
|
||||
The scenario, which this fork choice rule solves,
|
||||
is when an adversary forks the chain and
|
||||
generates a very sparse branch where he is the only winner for an epoch.
|
||||
This fork would be very sparse
|
||||
since the attacker does not control a large amount of stake initially.
|
||||
|
||||
Each epoch,
|
||||
the lottery difficulty is adjusted
|
||||
based on participation in the previous epoch to maintain a target block rate.
|
||||
When this happens on the adversary's chain,
|
||||
the lottery difficulty will plummet and
|
||||
he will be able to produce a chain that has a similar growth rate to the main chain.
|
||||
The advantage is that his chain is more efficient.
|
||||
Unlike the honest chain,
|
||||
which needs to deal with unintentional forks caused by network delays,
|
||||
the adversary's branch has no wasted blocks.
|
||||
|
||||
With this advantage,
|
||||
the adversary can eventually make up for that sparse initial period and
|
||||
extend his fork until it's longer than the honest chain.
|
||||
He can then convince bootstrapping nodes to join his fork,
|
||||
where he has had a monopoly on block rewards.
|
||||
|
||||
#### Genesis Fork Choice Rule Mitigation
|
||||
|
||||
When the honest branch and the adversary branch are in the period immediately following the fork,
|
||||
the honest chain is dense and
|
||||
the adversary's fork will be quite sparse.
|
||||
If an honest node had seen the adversary's fork in that period,
|
||||
it would not have followed this fork since the honest chain would be longer,
|
||||
so selecting the fork using the longest chain rule is fine for a short-range fork.
|
||||
|
||||
If an honest node sees the adversary's fork after he's completed the attack,
|
||||
the longest chain rule is no longer enough to protect them.
|
||||
Instead, the node can look at the density of both chains in that short period after they diverge and
|
||||
select the chain with the higher density of blocks.
|
||||
|
||||
#### Praos Fork Choice Rule Mitigation
|
||||
|
||||
Under two assumptions:
|
||||
|
||||
1. A node has successfully bootstrapped and found the honest chain.
|
||||
1. Nodes see honest blocks reasonably quickly.
|
||||
|
||||
Nodes will remain on the honest chain if they reject forks that diverge further back than $k$ blocks,
|
||||
without further inspection.
|
||||
In order for an adversary to succeed,
|
||||
they would need to build a $k$-deep chain faster than the time it takes the honest nodes to grow the honest chain by $k$ blocks.
|
||||
The adversary must build this chain live,
|
||||
alongside the honest chain.
|
||||
They cannot build this chain after-the-fact,
|
||||
since online nodes will be rejecting any fork that diverges before their $k$-deep block.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### CommonPrefixDepth Examples
|
||||
|
||||
1. $\textbf{CommonPrefixDepth}(b_1, b_2) = (0, 4)$
|
||||
implies that $b_2$ is ahead of $b_1$ by 4 blocks.
|
||||
|
||||

|
||||
|
||||
1. $\textbf{CommonPrefixDepth}(b_2, b_5) = (2, 3)$
|
||||
would represent a forking tree like the one illustrated below:
|
||||
|
||||

|
||||
|
||||
1. $\textbf{density}(b_i, d, s_{gen})$
|
||||
returns the number of blocks produced in the $s_{gen}$ slots following block $b_{i-d}$.
|
||||
For example, in the following diagram,
|
||||
count the number of blocks produced in the $s_{gen}$ slots of the highlighted area.
|
||||
|
||||

|
||||
|
||||
### Bootstrap Fork Choice Rule
|
||||
|
||||
During bootstrapping, the Ouroboros Genesis fork choice rule (`maxvalid-bg`) is used.
|
||||
|
||||
```python
|
||||
def bootstrap_fork_choice(c_local, forks, k, s_gen):
|
||||
c_max = c_local
|
||||
for c_fork in forks:
|
||||
depth_max, depth_fork = common_prefix_depth(c_max, c_fork)
|
||||
if depth_max <= k:
|
||||
# the fork depth is less than our safety parameter `k`. It's safe
|
||||
# to use longest chain to decide the fork choice.
|
||||
if depth_max < depth_fork:
|
||||
# strict inequality to ensure to choose first-seen chain as the tie break
|
||||
c_max = c_fork
|
||||
else:
|
||||
# here the fork depth is larger than our safety parameter `k`.
|
||||
# It's unsafe to use the longest chain here, instead check the density
|
||||
# of blocks immediately after the divergence.
|
||||
if density(c_max, depth_max, s_gen) < density(c_fork, depth_fork, s_gen):
|
||||
# The denser chain immediately after the divergence wins.
|
||||
c_max = c_fork
|
||||
return c_max
|
||||
```
|
||||
|
||||
### Online Fork Choice Rule
|
||||
|
||||
When `bootstrap-rule` is complete,
|
||||
a node SHOULD switch to the `online-rule`.
|
||||
See [CRYPTARCHIA-V1-BOOTSTRAPPING-SYNCHRONIZATION][bootstrap] for more information on bootstrapping.
|
||||
With the `online-rule` flag,
|
||||
the node SHOULD now reject any forks that diverge further back than $k$ blocks.
|
||||
|
||||
```python
|
||||
def online_fork_choice(c_local, forks, k):
|
||||
c_max = c_local
|
||||
for c_fork in forks:
|
||||
depth_max, depth_fork = common_prefix_depth(c_max, c_fork)
|
||||
if depth_max <= k:
|
||||
# the fork depth is less than our safety parameter `k`. It's safe
|
||||
# to use the longest chain to decide the fork choice.
|
||||
if depth_max < depth_fork:
|
||||
# strict inequality to ensure to choose the first-seen chain as our tie break
|
||||
c_max = c_fork
|
||||
else:
|
||||
# The fork depth is larger than our safety parameter `k`.
|
||||
# Ignore this fork.
|
||||
continue
|
||||
return c_max
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Long-Range Attack Resistance
|
||||
|
||||
The bootstrap fork choice rule provides resistance against long-range attacks
|
||||
by comparing chain density in the period immediately following divergence.
|
||||
Implementations MUST use the Genesis fork choice rule during bootstrapping
|
||||
to protect against adversaries who have built alternative chains over extended periods.
|
||||
|
||||
### Safety Parameter Selection
|
||||
|
||||
The safety parameter $k$ determines the depth at which blocks are considered immutable.
|
||||
Implementations SHOULD choose $k$ based on the expected network conditions and
|
||||
the desired security guarantees.
|
||||
A larger $k$ provides stronger security but requires longer confirmation times.
|
||||
|
||||
### Online Rule Assumptions
|
||||
|
||||
The online fork choice rule assumes that nodes have successfully bootstrapped
|
||||
and are receiving honest blocks in a timely manner.
|
||||
If these assumptions are violated,
|
||||
nodes MAY be vulnerable to attacks and
|
||||
SHOULD fall back to the bootstrap rule.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [CRYPTARCHIA-V1-BOOTSTRAPPING-SYNCHRONIZATION][bootstrap] - Bootstrapping and synchronization protocol
|
||||
|
||||
### Informative
|
||||
|
||||
- [Ouroboros Genesis][ouroboros-genesis] - Composable Proof-of-Stake Blockchains with Dynamic Availability
|
||||
- [Ouroboros Praos][ouroboros-praos] - An adaptively-secure, semi-synchronous proof-of-stake blockchain
|
||||
- [Cryptarchia Fork Choice Rule][origin-ref] - Original specification
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[bootstrap]: ./bootstrap.md
|
||||
[ouroboros-genesis]: https://eprint.iacr.org/2018/378.pdf
|
||||
[ouroboros-praos]: https://eprint.iacr.org/2017/573.pdf
|
||||
[origin-ref]: https://nomos-tech.notion.site/Cryptarchia-Fork-Choice-Rule-21b261aa09df811584dfd362abb26627
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
Before Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 18 KiB |
@@ -1,957 +0,0 @@
|
||||
# NOMOS-BLEND-PROTOCOL
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Blend Protocol |
|
||||
| Slug | 95 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski |
|
||||
| Contributors | Alexander Mozeika <alexander.mozeika@status.im>, Youngjoon Lee <youngjoon@status.im>, Frederico Teixeira <frederico@status.im>, Mehmet Gonen <mehmet@status.im>, Daniel Sanchez Quiros <danielsq@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Daniel Kashepava <danielkashepava@status.im>, Thomas Lavaur <thomaslavaur@status.im>, Antonio Antonino <antonio@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-blend-protocol.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-blend-protocol.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
The Blend Protocol is an anonymous broadcasting protocol for the Nomos network that provides network-level privacy for block proposers.
|
||||
It addresses network-based de-anonymization by making it difficult and costly to link block proposals to their proposers through network analysis.
|
||||
The protocol increases the time to link a sender to a proposal by at least 300 times,
|
||||
making stake inference highly impractical.
|
||||
|
||||
The protocol achieves probabilistic unlinkability in a highly decentralized environment with low bandwidth cost but high latency.
|
||||
It hides the sender of a block proposal through cryptographic obfuscation and timing delays,
|
||||
routing encrypted messages through multiple blend nodes before revelation.
|
||||
|
||||
**Keywords:** Blend, anonymous broadcasting, privacy, mix network,
|
||||
unlinkability, stake privacy, encryption
|
||||
|
||||
## Motivation
|
||||
|
||||
All Proof of Stake (PoS) systems have an inherent privacy problem where stake determines node behavior.
|
||||
By observing node behavior,
|
||||
one can infer the node's stake.
|
||||
The Blend Protocol addresses network-based de-anonymization where an adversary observes network activity to link nodes to their proposals and estimate stake.
|
||||
|
||||
The protocol achieves:
|
||||
|
||||
1. **Unlinkability**:
|
||||
Block proposers cannot be linked to their proposals through network analysis
|
||||
2. **Stake privacy**:
|
||||
Inferring relative stake takes more than 10 years for adversaries controlling 10% stake (targeting 0.1% stake node)
|
||||
|
||||
The Blend Protocol is one of the Nomos Bedrock Services,
|
||||
providing censorship resistance and network-level privacy for block producers.
|
||||
It must be used alongside mempool protections (like NomosDA) to achieve truly privacy-preserving system.
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be
|
||||
interpreted as described in RFC 2119.
|
||||
|
||||
### Definitions
|
||||
|
||||
| Term | Description |
|
||||
| ------ | ----------- |
|
||||
| **Data message** | A message generated by a consensus leader containing a block proposal. Indistinguishable from other messages until fully processed. |
|
||||
| **Cover message** | A message with meaningless content that creates noise for data messages to hide in. Indistinguishable from data messages. |
|
||||
| **Core node** | A Nomos node that declared willingness to participate in Blend Network through [SDP](https://www.notion.so/Service-Declaration-Protocol). Responsible for message generation, relaying, processing, and broadcasting. |
|
||||
| **Edge node** | A Nomos node that is not a core node. Connects to core nodes to send messages. |
|
||||
| **Block proposer node** | A core or edge node generating a new data message. |
|
||||
| **Blend node** | A core node that processes a data or cover message. |
|
||||
| **Blending** | Cryptographically transforming and randomly delaying messages to shuffle temporal order. |
|
||||
| **Broadcasting** | Sending a data message payload (block proposal) to all Nomos nodes. |
|
||||
| **Disseminating** | Relaying messages by core nodes through the network. |
|
||||
| **Epoch** | 648,000 slots (each 1 second), with average 21,600 blocks per epoch. |
|
||||
| **Session** | Time period with same set of core nodes executing the protocol. Length follows epoch length (21,600 blocks average). |
|
||||
| **Round** | Primitive time measure (1 second) during which a node can emit a new message. |
|
||||
| **Interval** | 30 rounds, approximating time between two consecutive block production events. |
|
||||
| **Blending token** | Information extracted from processed messages, used as proof of processing for rewards. |
|
||||
|
||||
### Node Types
|
||||
|
||||
| Type | Description |
|
||||
| ------ | ----------- |
|
||||
| **Honest node** | Follows the protocol fully. |
|
||||
| **Lazy node** | Does not follow protocol due to lack of incentives; only participates when directly beneficial. |
|
||||
| **Spammy node** | Emits more messages than protocol expects. |
|
||||
| **Unhealthy node** | Emits fewer messages than expected (may be under attack). |
|
||||
| **Malicious node** | Does not follow protocol regardless of incentives. |
|
||||
| **Unresponsive node** | Does not follow protocol due to technical reasons. |
|
||||
|
||||
### Adversary Types
|
||||
|
||||
| Type | Description |
|
||||
| ------ | ----------- |
|
||||
| **Passive adversary** | Can only observe, cannot modify node behavior. |
|
||||
| **Active adversary** | Can modify node behavior and observe network. |
|
||||
| **Local observer** | Passive adversary with limited network view and ability to observe internals of limited nodes. |
|
||||
|
||||
## Document Structure
|
||||
|
||||
This specification is organized into two distinct parts to serve different audiences and use cases:
|
||||
|
||||
**Part I: Protocol Specification** contains the normative requirements necessary for implementing an interoperable Blend Protocol node.
|
||||
This section defines the cryptographic primitives,
|
||||
message formats,
|
||||
network protocols,
|
||||
and behavioral requirements that all implementations MUST follow to ensure compatibility and maintain the protocol's privacy guarantees.
|
||||
Protocol designers,
|
||||
auditors,
|
||||
and those seeking to understand the core mechanisms should focus on this part.
|
||||
|
||||
**Part II: Implementation Details** provides non-normative guidance for implementers.
|
||||
This section offers practical recommendations,
|
||||
optimization strategies,
|
||||
and detailed examples that help developers build efficient and robust implementations.
|
||||
While these details are not required for interoperability,
|
||||
they represent best practices learned from reference implementations and can significantly improve performance and reliability.
|
||||
|
||||
This separation provides several benefits:
|
||||
|
||||
1. **Clarity of Requirements**:
|
||||
Implementers can clearly distinguish between mandatory requirements for interoperability (Part I) and optional optimizations (Part II)
|
||||
2. **Protocol Evolution**:
|
||||
The core protocol specification (Part I) can remain stable while implementation guidance (Part II) evolves with new techniques and optimizations
|
||||
3. **Multiple Implementations**:
|
||||
Different implementations can make different trade-offs in Part II while maintaining full compatibility through adherence to Part I
|
||||
4. **Audit Focus**:
|
||||
Security auditors can concentrate on the normative requirements in Part I that are critical for the protocol's privacy guarantees
|
||||
5. **Accessibility**:
|
||||
Protocol researchers can understand the essential mechanisms without being overwhelmed by implementation details,
|
||||
while developers get the practical guidance they need
|
||||
|
||||
## Part I: Protocol Specification
|
||||
|
||||
---
|
||||
|
||||
### Protocol Overview
|
||||
|
||||
The Blend Protocol works as follows:
|
||||
|
||||
1. Core nodes form a network by establishing encrypted connections with other
|
||||
core nodes at random
|
||||
2. A block proposer node selects several core nodes and creates a data message
|
||||
containing a block proposal that can only be processed by selected nodes in
|
||||
specified order
|
||||
3. The block proposer sends the data message to its neighbors (or connects to
|
||||
random core nodes if edge node)
|
||||
4. Core nodes disseminate (relay) the message to the rest of the network
|
||||
5. Core nodes generate new cover messages every round, blended with other
|
||||
messages
|
||||
6. When a data message reaches a designated blend node:
|
||||
- Message is cryptographically transformed (incoming/outgoing messages
|
||||
unlinkable by content)
|
||||
- Message is randomly delayed (unlinkable by timing observation)
|
||||
7. The blend node disseminates the processed message so next blend node can
|
||||
process it
|
||||
8. When message reaches the last blend node:
|
||||
- Node processes (decrypts and delays) the message
|
||||
- Extracts the block proposal payload
|
||||
- Broadcasts block proposal to Nomos Network
|
||||
|
||||
**Note**:
|
||||
Current protocol version is optimized for privacy of core nodes.
|
||||
Edge nodes gain lower privacy level,
|
||||
which is acceptable as they are assumed mobile without static long-term network identifiers and have lower stake.
|
||||
|
||||
### Network Protocol
|
||||
|
||||
#### Network Formation
|
||||
|
||||
Core nodes form a peer-to-peer network at the beginning of each session:
|
||||
|
||||
1. All core nodes retrieve the set of participating core nodes from [SDP](https://www.notion.so/Service-Declaration-Protocol)
|
||||
protocol
|
||||
2. Each core node establishes encrypted connections with randomly selected
|
||||
core nodes
|
||||
3. Network is considered formed when nodes reach minimum connectivity
|
||||
requirements
|
||||
|
||||
Edge nodes connect to core nodes on-demand when they need to send messages.
|
||||
|
||||
#### Minimal Network Size
|
||||
|
||||
The protocol requires a minimum number of core nodes to operate safely.
|
||||
If this minimum is not met,
|
||||
nodes MUST NOT use the Blend protocol and MUST broadcast data messages directly.
|
||||
|
||||
#### Network Maintenance
|
||||
|
||||
Nodes monitor connection quality and adjust their connections based on:
|
||||
|
||||
- Message frequency and correctness
|
||||
- Network health indicators
|
||||
- Protocol compliance of peers
|
||||
|
||||
Nodes may close connections with misbehaving peers and establish new connections to maintain network quality.
|
||||
|
||||
#### Session Transitions
|
||||
|
||||
When a new session or epoch begins,
|
||||
the network implements a transition period to allow messages generated with old credentials to safely complete their journey through the network.
|
||||
|
||||
### Quota Protocol
|
||||
|
||||
The protocol limits the number of messages that can be generated during a session through a quota system.
|
||||
Two types of quota exist:
|
||||
|
||||
1. **Core Quota**:
|
||||
Limits cover message generation and blending operations for core nodes during a session
|
||||
2. **Leadership Quota**:
|
||||
Limits blending operations a block proposer can perform per proof of leadership
|
||||
|
||||
Nodes generate session-specific key pools,
|
||||
where each key is associated with a proof of quota.
|
||||
This ensures messages are properly rate-limited and nodes cannot exceed their allowed message generation capacity.
|
||||
|
||||
### Message Protocol
|
||||
|
||||
#### Message Structure
|
||||
|
||||
Messages consist of three components:
|
||||
|
||||
1. **Public Header (H)**:
|
||||
Contains public key,
|
||||
proof of quota,
|
||||
and signature
|
||||
2. **Encrypted Private Header (h)**:
|
||||
Contains blending headers for each hop,
|
||||
with proofs of selection
|
||||
3. **Payload (P)**:
|
||||
The actual content (block proposal or cover message data)
|
||||
|
||||
#### Message Lifecycle
|
||||
|
||||
Messages follow a defined lifecycle through the network:
|
||||
|
||||
1. **Generation**:
|
||||
Triggered by consensus lottery (data) or schedule (cover)
|
||||
2. **Relaying**:
|
||||
Nodes validate and forward messages to neighbors
|
||||
3. **Processing**:
|
||||
Designated nodes decrypt and extract next-hop information
|
||||
4. **Delaying**:
|
||||
Random delays hide timing correlations
|
||||
5. **Releasing**:
|
||||
Messages released according to delay schedule
|
||||
6. **Broadcasting**:
|
||||
Final nodes extract and broadcast block proposals
|
||||
|
||||
### Proof Mechanisms
|
||||
|
||||
#### Proof of Quota (PoQ)
|
||||
|
||||
Guarantees that honestly generated messages use valid quota allocation.
|
||||
Two types exist:
|
||||
|
||||
- **Core Quota Proof**:
|
||||
Validated message is within core node's session quota
|
||||
- **Leadership Quota Proof**:
|
||||
Validated message is within leader's quota per won slot
|
||||
|
||||
Combined proof uses logical OR of both proof types.
|
||||
|
||||
#### Proof of Selection (PoSel)
|
||||
|
||||
Makes node selection for message processing random and verifiable.
|
||||
Prevents:
|
||||
|
||||
- Targeting specific nodes
|
||||
- Selfish behavior (sending all messages to self)
|
||||
- Predictable routing patterns
|
||||
|
||||
### Rewarding Protocol
|
||||
|
||||
Nodes are rewarded for participating in the protocol:
|
||||
|
||||
1. **Message Processing**: Nodes collect blending tokens as proof of work
|
||||
2. **Activity Proof**: Probabilistic attestation using [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance)
|
||||
3. **Two-Tier Rewards**: Base reward for all active nodes, premium reward for
|
||||
nodes with minimal [Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance)
|
||||
|
||||
### Security Considerations
|
||||
|
||||
#### DoS Protection
|
||||
|
||||
Multiple mechanisms prevent denial-of-service attacks:
|
||||
|
||||
- Quota system limits message generation
|
||||
- Connection monitoring detects spammy/malicious nodes
|
||||
- Minimal network size requirement
|
||||
- Message uniqueness verification
|
||||
|
||||
#### Privacy Properties
|
||||
|
||||
The protocol provides probabilistic unlinkability with quantifiable privacy guarantees.
|
||||
Time to link sender to proposal and time to infer stake increase significantly with each additional hop in the blending path.
|
||||
|
||||
#### Attack Resistance
|
||||
|
||||
Protection against various attack vectors:
|
||||
|
||||
- **Grinding attacks**:
|
||||
Prevented by unpredictable session randomness
|
||||
- **Tagging attacks**:
|
||||
Addressed by mempool protections (NomosDA)
|
||||
- **Timing attacks**:
|
||||
Mitigated by random delays
|
||||
- **Content inspection**:
|
||||
Prevented by layered encryption
|
||||
- **Replay attacks**:
|
||||
Prevented by TLS and key uniqueness verification
|
||||
|
||||
### Rationale
|
||||
|
||||
#### Design Decisions
|
||||
|
||||
**Blending vs Mixing**:
|
||||
Protocol uses blending (spatial anonymity through multiple nodes) rather than mixing (temporal anonymity through single node) for higher decentralization and censorship resistance.
|
||||
|
||||
**Two-tier reward system**:
|
||||
Base reward ensures fairness;
|
||||
premium reward continues motivating nodes through lottery mechanism.
|
||||
|
||||
**Edge node privacy trade-off**:
|
||||
Lower privacy acceptable as edge nodes are assumed mobile,
|
||||
without static identifiers,
|
||||
with lower stake.
|
||||
|
||||
**Cover traffic motivation**:
|
||||
Nodes must generate cover messages for own privacy;
|
||||
protocol enforces statistical indistinguishability.
|
||||
|
||||
**Statistical bias**:
|
||||
Modulo operation for node selection introduces negligible bias (< 2^{-128}) for expected network sizes.
|
||||
|
||||
---
|
||||
|
||||
## Part II: Implementation Details
|
||||
|
||||
---
|
||||
|
||||
### Network Implementation
|
||||
|
||||
#### Core Network Bootstrapping
|
||||
|
||||
At the beginning of a session:
|
||||
|
||||
1. All core nodes retrieve fresh set of core nodes' connectivity information
|
||||
from [SDP](https://www.notion.so/Service-Declaration-Protocol) protocol
|
||||
2. Each core node selects at random a set of other core nodes and connects
|
||||
through fully encrypted connections
|
||||
3. After all core nodes connect, a new network is formed
|
||||
|
||||
#### Detailed Bootstrapping Procedure
|
||||
|
||||
1. Core node retrieves set of core nodes' information from SDP protocol at
|
||||
session start
|
||||
2. If number of core nodes is below minimum (32), stop and use regular
|
||||
broadcasting
|
||||
3. Start opening new connections:
|
||||
- Select at random (without replacement) a node from set of core nodes
|
||||
- Establish secure TLS 1.3 connection using ephemeral Ed25519 keys
|
||||
- Identify neighbor using Neighbor Distinction Process (NDP)
|
||||
- Stop connecting after reaching maximum retries (3 by default)
|
||||
4. Repeat until connected to minimal core peering degree (4 by default, both
|
||||
incoming and outgoing count)
|
||||
5. Start accepting incoming connections and maintaining all connections:
|
||||
- Can maintain up to maximum connections with core nodes (8 by default)
|
||||
- Can receive up to maximum connections with edge nodes (300 by default)
|
||||
6. If two nodes open connections to each other:
|
||||
- Node with lower public key value (provider_id from SDP, compared via
|
||||
Base58 encoding) closes outgoing connection
|
||||
- Node with higher public key value closes incoming connection
|
||||
|
||||
#### Connection Details
|
||||
|
||||
- **Protocol**: libp2p with TLS 1.3 (not older)
|
||||
- **Cryptographic scheme**: Ed25519 with ephemeral keys
|
||||
- **libp2p protocol name**:
|
||||
- Mainnet: `/nomos/blend/1.0.0`
|
||||
- Testnet: `/nomos-testnet/blend/1.0.0`
|
||||
|
||||
#### Connectivity Maintenance Implementation
|
||||
|
||||
Core nodes monitor connection quality by verifying message correctness and
|
||||
frequency:
|
||||
|
||||
1. Count messages after successful connection-level decryption during
|
||||
observation window (30 rounds)
|
||||
2. If frequency exceeds maximum: mark neighbor as spammy, close connection,
|
||||
establish new one
|
||||
3. If frequency below minimum: mark connection as unhealthy, establish
|
||||
additional connection
|
||||
4. Unhealthy connections are monitored continuously and may recover
|
||||
5. If maximum connections exceeded: log situation, pause new connections until
|
||||
below maximum
|
||||
6. Edge nodes MUST send message immediately after connection then close;
|
||||
otherwise core node closes connection
|
||||
7. Messages with invalid proof of quota or signature from core node: mark as
|
||||
malicious, close connection
|
||||
8. Messages with duplicate identifier: close connection with neighbor (with
|
||||
grace period for network delay)
|
||||
|
||||
#### Edge Network Bootstrapping Implementation
|
||||
|
||||
Edge nodes connect to core nodes when needing to send messages:
|
||||
|
||||
1. Retrieve set of core nodes from SDP at session start
|
||||
2. If below minimum size (32), stop and use regular broadcasting
|
||||
3. When needing to send message, select random core node
|
||||
4. Establish secure TLS connection
|
||||
5. Identify and authenticate using NDP
|
||||
6. Send message and close connection
|
||||
7. Repeat for communication redundancy number (4 by default)
|
||||
|
||||
#### Transition Period Implementation
|
||||
|
||||
When new session or epoch begins, protocol implements Transition Period (30
|
||||
rounds) to allow messages generated with old keys to safely exit the network:
|
||||
|
||||
**New session**:
|
||||
|
||||
- Validate message proofs against both new and past session-related public
|
||||
input for TP duration
|
||||
- Open new connections for new session
|
||||
- Maintain old connections and process messages for TP duration
|
||||
|
||||
**New epoch**:
|
||||
|
||||
- Validate message proofs against both new and past epoch-related public info
|
||||
for TP duration
|
||||
|
||||
### Quota Implementation
|
||||
|
||||
Quota limits the number of messages that can be generated during a session for
|
||||
network health and fair reward calculation.
|
||||
|
||||
### Core Quota
|
||||
|
||||
Core quota (Q_C) defines messaging allowance for a core node during single
|
||||
session:
|
||||
|
||||
```text
|
||||
Q_C = ⌈(C · (β_C + R_C · β_C)) / N⌉
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- C = S · F_C = expected number of cover messages per session by all core
|
||||
nodes
|
||||
- β_C = 3 = expected blending operations per cover message
|
||||
- R_C = redundancy parameter for cover messages
|
||||
- N = number of core nodes from SDP
|
||||
|
||||
Total core quota (all nodes): Q^Total_C = N · Q_C = C · (β_C + R_C · β_C)
|
||||
|
||||
### Leadership Quota
|
||||
|
||||
Leadership quota (Q_L) defines blending operations a block proposer can
|
||||
perform. Single quota used per proof of leadership:
|
||||
|
||||
```text
|
||||
Q_L = β_D + β_D · R_D
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- β_D = 3 = expected blending operations per data message
|
||||
- R_D = redundancy parameter for data messages
|
||||
|
||||
Average data messages per session: D_Avg = L_Avg · Q_L, where L_Avg = 21,600
|
||||
(average leaders per session)
|
||||
|
||||
#### Quota Application Details
|
||||
|
||||
Nodes create session-specific key pools:
|
||||
|
||||
```text
|
||||
K^{n,s}_q = {(K^n_0, k^n_0, π_Q^{K^n_0}), ..., (K^n_{q-1}, k^n_{q-1}, π_Q^{K^n_{q-1}})}
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- q = Q_C + Q^n_L = sum of core quota and leadership quota for node n
|
||||
- K^n_i = i-th public key
|
||||
- k^n_i = corresponding private key
|
||||
- π_Q^{K^n_i} = proof of quota (confirms i < h without disclosing node
|
||||
identity)
|
||||
|
||||
### Message Structure Implementation
|
||||
|
||||
A node n constructs message M = (H, h, P):
|
||||
|
||||
#### Public Header (H)
|
||||
|
||||
- K^n_i: public key from set K^n_h
|
||||
- π^{K^n_i}_Q: proof of quota for key (contains key nullifier)
|
||||
- σ_{K^n_i}(P_i): signature of i-th encapsulation, verifiable by K^n_i
|
||||
|
||||
#### Encrypted Private Header (h)
|
||||
|
||||
Contains β_max blending headers (b_1, ..., b_{β_max}), each with:
|
||||
|
||||
- K^n_l: public key from set K^n_h
|
||||
- π^{K^n_l}_Q: proof of quota for key
|
||||
- σ_{K^n_l}(P_l): signature of l-th encapsulation
|
||||
- π^{K^n_{l+1}, m_{l+1}}_S: proof of selection of node index m_{l+1}
|
||||
- Ω: flag indicating last blending header
|
||||
|
||||
#### Payload (P)
|
||||
|
||||
Message content (block proposal or random data for cover messages)
|
||||
|
||||
**Encapsulation Overhead**: Using Groth16 SNARKs, total overhead is ~1123
|
||||
bytes for 3 hops (~3% increase for typical block proposal of 33,129 bytes).
|
||||
|
||||
### Message Lifecycle Implementation
|
||||
|
||||
#### Generation Details
|
||||
|
||||
Message generation is triggered by:
|
||||
|
||||
1. **Data message**: Core/edge node won consensus lottery and has proof of
|
||||
leadership
|
||||
2. **Cover message**: Released at random by core node per Cover Message
|
||||
Schedule
|
||||
|
||||
Generation process:
|
||||
|
||||
1. Generate keys according to [Key Types and Generation Specification](https://nomos-tech.notion.site/Key-Types-and-Generation-Specification-215261aa09df81088b8fd7c3089162e8)
|
||||
- Each key uses message-type-specific allowance (quota)
|
||||
- Correct usage proven by Proof of Quota
|
||||
2. Format payload according to Payload Formatting Specification
|
||||
3. Encapsulate payload using Message Encapsulation Mechanism
|
||||
- Each key for single encapsulation, processable by single node
|
||||
- Node selection is random and deterministic, provable by Proof of
|
||||
Selection
|
||||
4. Format message according to Message Formatting Specification
|
||||
5. Release message according to Releasing logic
|
||||
|
||||
#### Relaying Details
|
||||
|
||||
When node receives message from neighbor:
|
||||
|
||||
1. Check public header:
|
||||
- Version MUST equal 0x01
|
||||
- Proof of quota MUST be valid
|
||||
- Signature MUST be valid
|
||||
- Public key MUST be unique
|
||||
2. Release message to network (Releasing section)
|
||||
3. Concurrently, add message to processing queue (Processing section)
|
||||
|
||||
**Duplicate Detection**: Node MUST cache public key for every relayed message
|
||||
for duration of session plus safety buffer and transition period (~65 MB).
|
||||
|
||||
#### Processing Details
|
||||
|
||||
When message M is received with correct public header:
|
||||
|
||||
1. Decapsulate message per Message Encapsulation Mechanism
|
||||
2. If decapsulation succeeds:
|
||||
- Validate proof of selection (points to node index in SDP list)
|
||||
- Store blending token: τ = (π^{K^n_l}_Q, π^{K^n_l,l}_S)
|
||||
- If last flag set (Ω == 1):
|
||||
- If payload is block proposal: verify structure and broadcast
|
||||
- If payload is cover message: discard
|
||||
- Else:
|
||||
- Validate decapsulated public header (key uniqueness, signature, proof
|
||||
of quota)
|
||||
- Format message per Message Formatting Specification
|
||||
- Attempt subsequent decapsulation recursively
|
||||
- If decapsulation fails: randomly delay and release to neighbors
|
||||
3. If decapsulation fails: return failure message
|
||||
|
||||
#### Delaying Details
|
||||
|
||||
Purpose: Hide timing correlations between incoming/outgoing messages.
|
||||
|
||||
Maximum delay between release attempts: Δ_max = 3 rounds
|
||||
|
||||
Delaying logic:
|
||||
|
||||
1. Select random delay: δ ∈ (1, Δ_max)
|
||||
2. Start counting rounds from r_s
|
||||
3. Every round check if r_c == r_s + δ:
|
||||
- Release messages from queue (Releasing logic)
|
||||
- Select new random delay
|
||||
- Restart round counting
|
||||
|
||||
Release round selection works independently of queue state.
|
||||
|
||||
#### Releasing Details
|
||||
|
||||
Release process:
|
||||
|
||||
- **Received messages**: Immediately released to all neighbors (except sender)
|
||||
after header validation
|
||||
- **Processed messages**: Queued and released at next release round (per
|
||||
Delaying logic)
|
||||
- **Generated messages**: Released at beginning of next round after generation
|
||||
- **Statistical indistinguishability**: When data message generated, one random
|
||||
unreleased cover message MUST be removed from schedule
|
||||
- **Multiple messages**: If multiple messages scheduled for same round,
|
||||
randomly shuffle before release
|
||||
|
||||
Expected messages per release round for single node:
|
||||
|
||||
```text
|
||||
μ = ⌈(Δ_max · β_C · α) / N⌉
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- Δ_max = 3 (maximal delay)
|
||||
- β_C = 3 (blending operations per cover message)
|
||||
- α ≈ 1.03 (normalization constant for data messages)
|
||||
- N = number of core nodes
|
||||
|
||||
Results:
|
||||
|
||||
- N=16: μ=1 message per round
|
||||
- N=8: μ=2 messages per round
|
||||
- N=4: μ=3 messages per round
|
||||
|
||||
### Broadcasting Implementation
|
||||
|
||||
When payload added to broadcasting queue:
|
||||
|
||||
1. Verify payload contains valid block proposal structure (proposal not
|
||||
validated yet)
|
||||
2. Extract block proposal
|
||||
3. Broadcast to Nomos broadcasting channel after random delay
|
||||
|
||||
## Cover Message Schedule Implementation
|
||||
|
||||
Core nodes generate cover messages in fully random manner to maintain privacy.
|
||||
Messages evenly distributed across session duration.
|
||||
|
||||
### Safety Buffer Implementation
|
||||
|
||||
Problem: Session length in rounds is non-deterministic due to random block
|
||||
production. Safety buffer (100 intervals) reserves cover messages for when
|
||||
session lasts longer than expected.
|
||||
|
||||
### Cover Message Generation Algorithm
|
||||
|
||||
Given:
|
||||
|
||||
- Core quota Q_C
|
||||
- Expected blending operations β_C = 3
|
||||
- Last interval I_end = 21,600
|
||||
- Last interval of safety buffer I_max > I_end
|
||||
|
||||
For every session:
|
||||
|
||||
1. Calculate maximum cover messages: c = ⌈Q_C / β_C⌉
|
||||
2. For i ∈ {1, ..., c}:
|
||||
- Select random interval I ∈ {1, ..., I_max}
|
||||
- Select random round r ∈ {1, ..., |I|}
|
||||
- If (I, r) already selected, repeat; else add to winning slots W
|
||||
3. During each interval I and round r: if (I, r) ∈ W, generate cover message
|
||||
|
||||
**Important**: Number of generated cover messages MUST be reduced by number of
|
||||
data messages node generates during session (for statistical
|
||||
indistinguishability).
|
||||
|
||||
## Proof Mechanisms Implementation
|
||||
|
||||
### Proof of Quota Implementation (PoQ)
|
||||
|
||||
Guarantees honestly generated messages are relayed and disseminated. Two parts:
|
||||
|
||||
**Core Quota Proof** (π^{K^n_a}_{Q_C}) is true when:
|
||||
|
||||
- n ∈ N = SDP(s): node is in registered set (identity hidden)
|
||||
- K^n_a ∈ K^{n,s}_h: key generated by node for session
|
||||
- a < Q_C: index limits proof nullifiers per session
|
||||
|
||||
Public input: s, K^n_a, Q_C
|
||||
Private input: n, a
|
||||
Public output: ν_s (key nullifier uniquely identifying PoQ)
|
||||
|
||||
**Leadership Quota Proof** (π^{K^n_b}_{Q_L}) is true when:
|
||||
|
||||
- ∃ π^{n,e}_L: valid proof of leadership for node n in epoch e
|
||||
- K^n_b ∈ K^{n,s}_h: key generated by node for session
|
||||
- b < Q^n_L: index limits proof nullifiers per won slot
|
||||
|
||||
Public input: e, s, K^n_b, Q^n_L
|
||||
Private input: π^{n,e}_L, n, b
|
||||
Public output: ν_s (key nullifier)
|
||||
|
||||
**Combined Proof**: π^{K^n_i}_Q = π^{K^n_i}_{Q_C} ∨ π^{K^n_i}_{Q_L} (logical
|
||||
OR)
|
||||
|
||||
### Proof of Selection Implementation (PoSel)
|
||||
|
||||
Makes node selection for message processing random and verifiable. Prevents
|
||||
targeting specific nodes and selfish behavior.
|
||||
|
||||
PoSel (π^{K^n_i, m_i}_S) is true when:
|
||||
|
||||
- m_i = CSPRBG(H_N(ρ))_8 mod N, where:
|
||||
- ρ = secret selection randomness (little-endian)
|
||||
- m_i = recipient node index (little-endian)
|
||||
- CSPRBG()_8 = cryptographically secure pseudo-random bytes generator (8
|
||||
bytes, little-endian)
|
||||
- H_N() = domain separated blake2b hash
|
||||
- N = number of core nodes
|
||||
- v == v', where:
|
||||
- v = key nullifier of π^{K^n_i}_Q
|
||||
- v' = H_Ψ(b"KEY_NULLIFIER\V1", ρ)
|
||||
- H_Ψ() = Poseidon2 hash function
|
||||
|
||||
PoSel MUST be used alongside PoQ as they are tightly coupled.
|
||||
|
||||
## Rewarding Implementation
|
||||
|
||||
### Rewarding Motivation Details
|
||||
|
||||
Nodes must be rewarded for protocol actions:
|
||||
|
||||
1. **Message generation**: Especially for cover messages (data messages
|
||||
rewarded through consensus)
|
||||
2. **Message relaying**: Motivated by connection quality monitoring (fear of
|
||||
losing reward)
|
||||
3. **Message processing**: Motivated by collecting blending tokens
|
||||
(activity-based reward)
|
||||
4. **Message broadcasting**: Motivated by increasing service income pool
|
||||
|
||||
### Blending Tokens Implementation
|
||||
|
||||
When node processes message, it stores blending token:
|
||||
|
||||
```text
|
||||
τ = (π^{K^n_l}_Q, π^{K^n_l,l}_S)
|
||||
```
|
||||
|
||||
Tokens stored with context (session number) in set Τ^{l,s}.
|
||||
|
||||
### Session Randomness Implementation
|
||||
|
||||
Rewarding requires common unbiased randomness provided by consensus:
|
||||
|
||||
```text
|
||||
R_s = H('BLEND_SESSION_RANDOMNESS\V1' || R_e(s) || s)_512
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- H()_512 = blake2b hash (512 bits output)
|
||||
- R_e(s) = epoch nonce from consensus for epoch corresponding to session s
|
||||
- s = session number
|
||||
|
||||
### Activity Proof Implementation
|
||||
|
||||
Node activity proof (π^{l,τ,s}_A) attests in probabilistic manner that node l
|
||||
was active during session s by presenting blending token τ.
|
||||
|
||||
Activity proof is true when:
|
||||
|
||||
- Node l has blending token τ ∈ Τ^{l,s} collected during session s where:
|
||||
- Proof of Quota π^{K^n_l}_Q ∈ τ is true for session s
|
||||
- Proof of Selection π^{K^n_l,l}_S ∈ τ is true for session s
|
||||
- Hamming distance between token and next session randomness is below activity
|
||||
threshold:
|
||||
|
||||
```text
|
||||
Δ_H(H(τ)_ε, H(R_{s+1})_ε) < A_ε
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- H() = blake2b hash
|
||||
- ε = ⌈log_2(Q^Total_C + 1) / 8⌉ · 8 (bits, rounded to full bytes)
|
||||
|
||||
**Activity Threshold**:
|
||||
|
||||
```text
|
||||
A_ε = χ - ν - θ
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- ν = ⌈log_2(N + 1)⌉ (bits needed for number of nodes)
|
||||
- χ = ⌈log_2(Q^Total_C + 1)⌉ (bits needed for all blending tokens)
|
||||
- θ = 1 (sensitivity parameter)
|
||||
|
||||
### Active Message Implementation
|
||||
|
||||
Node l constructs active message M_A = {l, τ, s, π^{l,τ,s}_A} for every
|
||||
session following Active Message format.
|
||||
|
||||
Active message metadata field MUST start with one byte version field (fixed to
|
||||
0x01), followed by Activity Proof.
|
||||
|
||||
Node l selects activity proof minimizing Hamming distance to new randomness:
|
||||
|
||||
```text
|
||||
π^{l,τ,s}_A = min_{Δ_H}(true(π^{i,τ,s}_A))
|
||||
```
|
||||
|
||||
Active message for session s MUST only be sent during session s+1; otherwise
|
||||
rejected.
|
||||
|
||||
Ledger MUST only accept single active message per-node per-session. Duplicates
|
||||
rejected.
|
||||
|
||||
### Reward Calculation Details
|
||||
|
||||
Rewards for session s calculated as:
|
||||
|
||||
1. **No calculation** if number of nodes from SDP below Minimal Network Size
|
||||
(32)
|
||||
2. **Count base proofs**: B = number of true activity proofs
|
||||
3. **Count premium proofs**: P = number of true activity proofs with minimal
|
||||
Hamming distance
|
||||
4. **Calculate base reward**: R = I / (B + P), where I = service income for
|
||||
session s
|
||||
5. **Calculate node reward**:
|
||||
|
||||
```text
|
||||
R(n) = R · [true(π^{i,τ,s}_A) + min_{Δ_H}(true(π^{i,τ,s}_A))]
|
||||
```
|
||||
|
||||
Base reward (R) paid to all nodes with true activity proof; reward doubled for
|
||||
nodes with minimal Hamming distance proof.
|
||||
|
||||
### Rewarding Distribution Logic Details
|
||||
|
||||
1. Node sends Active Message with activity proof in metadata field
|
||||
- Must point to single declaration (declaration_id) and single provider
|
||||
identity (provider_id)
|
||||
- Any reuse of provider_id makes Active Message invalid
|
||||
2. Active Message sent after end of session s (during s+1), after transition
|
||||
period
|
||||
- Delay allows including tokens from transition period
|
||||
3. When session s+2 begins, Mantle distributes rewards per Service Reward
|
||||
Distribution Protocol
|
||||
- Delay required to calculate reward partition
|
||||
4. No Active Message on time = no reward
|
||||
|
||||
## Security Considerations Implementation
|
||||
|
||||
### DoS Protection Details
|
||||
|
||||
The protocol includes multiple DoS mitigation mechanisms:
|
||||
|
||||
- Quota system limits message generation
|
||||
- Connectivity maintenance monitors and drops spammy/malicious nodes
|
||||
- Minimal network size requirement (32 nodes)
|
||||
- Connection limits prevent resource exhaustion
|
||||
- Message uniqueness verification prevents replay attacks
|
||||
|
||||
### Privacy Properties Details
|
||||
|
||||
**Unlinkability**: For adversary controlling 10% stake targeting 0.1% stake
|
||||
node with 3-hop blending:
|
||||
|
||||
- Time to Link (TTL): > 9 epochs
|
||||
- Time to Infer (TTI): > 10 years (487 epochs)
|
||||
|
||||
**Trade-offs**:
|
||||
|
||||
- Each additional hop increases TTL/TTI by ~10x
|
||||
- Latency penalty: ~1.5s per hop
|
||||
- Optimal configuration: 3-hop blending (4.5s average latency increase)
|
||||
|
||||
### Attack Resistance Details
|
||||
|
||||
- **Grinding attacks**: Prevented by unpredictable session randomness
|
||||
- **Tagging attacks**: Addressed by NomosDA (separate mempool protection)
|
||||
- **Timing attacks**: Mitigated by random delays (Δ_max = 3 rounds)
|
||||
- **Content inspection**: Prevented by layered encryption
|
||||
- **Replay attacks**: Prevented by TLS and public key uniqueness verification
|
||||
|
||||
## Rationale Implementation
|
||||
|
||||
### Design Decisions Details
|
||||
|
||||
**Blending vs Mixing**: Anonymity in blending comes from processing same
|
||||
message by multiple nodes (spatial anonymity), while mixing processes multiple
|
||||
messages by same node (temporal anonymity). Blending chosen for higher
|
||||
decentralization and censorship resistance.
|
||||
|
||||
**Two-tier reward system**: Base reward ensures fairness (all active nodes
|
||||
receive it); premium reward continues motivating lazy nodes through lottery
|
||||
mechanism.
|
||||
|
||||
**Edge node privacy trade-off**: Lower privacy acceptable as edge nodes
|
||||
assumed mobile, without static identifiers, with lower stake, and sporadic
|
||||
connections.
|
||||
|
||||
**Cover traffic motivation**: Nodes must generate cover messages for own
|
||||
privacy protection; protocol enforces indistinguishability by requiring cover
|
||||
message removal when data message generated.
|
||||
|
||||
**Statistical bias**: Modulo operation for node selection introduces negligible
|
||||
bias (< 2^{-128} for N < 2^{128}), acceptable for expected network sizes (<
|
||||
10 million nodes).
|
||||
|
||||
## Parameters Summary
|
||||
|
||||
### Global Parameters
|
||||
|
||||
- Session length (S): 648,000 rounds (average 21,600 blocks)
|
||||
- Interval length: 30 rounds
|
||||
- Maximum delay (Δ_max): 3 rounds
|
||||
- Maximum blending operations (β_max): 3
|
||||
- Expected blending operations (β_C, β_D): 3
|
||||
- Observation window (W): 30 rounds
|
||||
- Safety buffer: 100 intervals
|
||||
- Transition period: 30 rounds
|
||||
- Minimal network size: 32 nodes
|
||||
|
||||
### Core Node Parameters
|
||||
|
||||
- Minimal core peering degree (Φ_{CC}^{Min}): 4
|
||||
- Maximum core peering degree (Φ_{CC}^{Max}): 8
|
||||
- Maximum edge connections (Φ_{CE}^{Max}): 300
|
||||
- Maximum connection retries (Ω_C): 3
|
||||
|
||||
### Edge Node Parameters
|
||||
|
||||
- Connection redundancy (Φ_{EC}): 4
|
||||
- Maximum connection retries (Ω_E): 3
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt) - Key words for use in LIPs
|
||||
to Indicate Requirement Levels
|
||||
- [Service Declaration Protocol](https://www.notion.so/Service-Declaration-Protocol)
|
||||
\- Nomos SDP specification
|
||||
- [Nomos Bedrock](https://www.notion.so/Nomos-Bedrock) - Nomos foundational
|
||||
layer specification
|
||||
- [Data Availability Network Specification](https://www.notion.so/NomosDA-Network-Specification-1fd261aa09df81188e76cb083791252d)
|
||||
\- NomosDA specification
|
||||
- [Service Reward Distribution Protocol](https://www.notion.so/Service-Reward-Distribution-Protocol)
|
||||
\- Reward distribution specification
|
||||
|
||||
### Informative
|
||||
|
||||
- [Blend Protocol](https://nomos-tech.notion.site/Blend-Protocol-215261aa09df81ae8857d71066a80084)
|
||||
\- Original Blend Protocol documentation
|
||||
- [Nomos Services](https://www.notion.so/Nomos-Services) - Overview of Nomos
|
||||
services architecture
|
||||
- [Anonymous Leaders Reward Protocol](https://www.notion.so/Anonymous-Leaders-Reward-Protocol)
|
||||
\- Consensus reward mechanism
|
||||
- [Cryptarchia v1 Protocol Specification](https://www.notion.so/Cryptarchia-v1-Protocol-Specification)
|
||||
\- Consensus protocol details
|
||||
- [Block Construction, Validation and Execution Specification](https://www.notion.so/Block-Construction-Validation-and-Execution-Specification)
|
||||
\- Block structure details
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via
|
||||
[CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,630 +0,0 @@
|
||||
# NOMOS-CRYPTARCHIA-V1-PROTOCOL
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Cryptarchia v1 Protocol Specification |
|
||||
| Slug | 92 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | David Rusu <david@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Giacomo Pasini <giacomo@status.im>, Thomas Lavaur <thomas@status.im>, Mehmet <mehmet@status.im>, Marcin Pawlowski <marcin@status.im>, Daniel Sanchez Quiros <daniel@status.im>, Youngjoon Lee <youngjoon@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-cryptarchia-v1-protocol.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-cryptarchia-v1-protocol.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
Cryptarchia is the consensus protocol of Nomos Bedrock.
|
||||
This document specifies how Bedrock comes to agreement on a single history of blocks.
|
||||
The values that Cryptarchia optimizes for are resilience and privacy,
|
||||
which come at the cost of block times and finality.
|
||||
Cryptarchia is a probabilistic consensus protocol with properties similar to
|
||||
Bitcoin's Nakamoto Consensus,
|
||||
dividing time into slots with a leadership lottery run at each slot.
|
||||
|
||||
**Keywords:** consensus, proof-of-stake, leadership lottery, fork choice,
|
||||
block validation, epoch, slot, immutability
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be
|
||||
interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
## Background
|
||||
|
||||
### Resilience
|
||||
|
||||
In consensus, there is a trade-off between prioritizing either safety or liveness
|
||||
in the presence of catastrophic failure (this is a re-formalization of the CAP theorem).
|
||||
Choosing safety means the chain never forks,
|
||||
instead the chain halts until the network heals.
|
||||
On the other hand, choosing liveness (a la Bitcoin/Ethereum) means that
|
||||
block production continues but finality will stall,
|
||||
leading to confusion around which blocks are on the honest chain.
|
||||
|
||||
On the surface both options seem to provide similar guarantees.
|
||||
If finality is delayed indefinitely, is this not equivalent to a halted chain?
|
||||
The differences come down to how safety or liveness is implemented.
|
||||
|
||||
#### Prioritizing Safety
|
||||
|
||||
Chains that provide a safety guarantee do so using quorum-based consensus.
|
||||
This requires a known set of participants (i.e. a permissioned network)
|
||||
and extensive communication between them to reach agreement.
|
||||
This restricts the number of participants in the network.
|
||||
Furthermore, quorum based consensus can only tolerate up to 1/3rd of
|
||||
the participants becoming faulty.
|
||||
|
||||
A small participant set and low threshold for faults generally pushes these networks
|
||||
to put large barriers to entry,
|
||||
either through large staking requirements or politics.
|
||||
|
||||
#### Prioritizing Liveness
|
||||
|
||||
Chains that prioritize liveness generally do so by relying on fork choice rules
|
||||
such as the longest chain rule from Nakamoto consensus.
|
||||
These protocols allow each participant to make a local choice
|
||||
about which fork to follow,
|
||||
and therefore do not require quorums and thus can be permissionless.
|
||||
|
||||
Additionally, due to a lack of quorums, these protocols can be quite message efficient.
|
||||
Thus, participation does not need to be artificially reduced
|
||||
to remain within bandwidth restrictions.
|
||||
|
||||
These protocols tolerate up to 1/2 of participants becoming faulty.
|
||||
The large fault tolerance threshold and the large number of participants
|
||||
provides for much higher resilience to corruption.
|
||||
|
||||
### Privacy
|
||||
|
||||
The motivation behind the design of Cryptarchia can be boiled down to this statement:
|
||||
|
||||
*A block proposer should not feel the need to self-censor when proposing a block.*
|
||||
|
||||
Working to give leaders confidence in this statement has had ripple effects
|
||||
throughout the protocol, including that:
|
||||
|
||||
- **The block proposals should not be linkable to a leader**.
|
||||
An adversary should not be able to connect together
|
||||
the block proposals of a leader in order to build a profile.
|
||||
In particular, one should not be able to infer a proposer's stake
|
||||
from their past on-chain activity.
|
||||
- **Cryptarchia must not reveal the stake of the leader** -
|
||||
that is, it must be a Private Proof of Stake (PPoS) protocol.
|
||||
If the activity of the leader reveals their stake values
|
||||
(e.g. through weighted voting),
|
||||
then this value can be used to reduce the anonymity set for the leader
|
||||
by bucketing the leader as high/low stake and can open him up to targeting.
|
||||
- **Leaders should be protected against network triangulation attacks**.
|
||||
This is outside of the scope of this document,
|
||||
but it suffices to say that in-protocol cryptographic privacy
|
||||
is not sufficient to guarantee a leader's privacy.
|
||||
This topic is dealt with directly in Blend Network Specification.
|
||||
|
||||
### Limitations of Cryptarchia V1
|
||||
|
||||
Despite best efforts, it is not possible to provide perfect privacy and censorship resistance
|
||||
to all parties. In particular:
|
||||
|
||||
- It is not possible to protect leaders from leaking information about themselves
|
||||
based on the contents of blocks they propose.
|
||||
The tagging attack is an example of this,
|
||||
where an adversary may distribute a transaction to only a small subset of the network.
|
||||
If the block proposal includes this transaction,
|
||||
the adversary learns that the leader was one of those nodes in that subset.
|
||||
- The leader is a single point of failure (SPOF).
|
||||
Despite all the efforts to protect the leader,
|
||||
the network can be easily censored by the leader.
|
||||
The leader may choose to exclude certain types of transactions from blocks,
|
||||
leading to a worse UX for targeted parties.
|
||||
|
||||
These limitations are not considered insurmountable
|
||||
and there are sketches towards solutions that will be developed
|
||||
in following iterations of the protocol.
|
||||
|
||||
## Design Overview
|
||||
|
||||
Cryptarchia is a probabilistic consensus protocol with properties similar to
|
||||
Bitcoin's Nakamoto Consensus.
|
||||
|
||||
At a high level, Cryptarchia divides time into slots and at each slot,
|
||||
a leadership lottery is run.
|
||||
To participate in the lottery, a node must have held stake in the chain
|
||||
in the form of a note for a minimum time period.
|
||||
Given a sufficiently aged note, you can check if it has won a slot lottery
|
||||
by cryptographically flipping a weighted coin.
|
||||
The weight of the coin is proportional to the value of your note,
|
||||
thus higher valued notes lead to increased chances of winning.
|
||||
To ensure privacy and avoid revealing the note value,
|
||||
this lottery result is proven within a ZK proof system.
|
||||
|
||||
The design starts from the solid foundation provided by
|
||||
Ouroboros Crypsinous: Privacy-Preserving Proof-of-Stake
|
||||
and builds upon it, incorporating the latest research at the intersection of
|
||||
cryptography, consensus and network engineering.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Constants
|
||||
|
||||
| Symbol | Name | Description | Value |
|
||||
| ------ | ---- | ----------- | ----- |
|
||||
| $f$ | slot activation coefficient | The target rate of occupied slots. Not all slots contain blocks, many are empty. (See Block Times & Blend Network Analysis for analysis leading to the choice of value.) | 1/30 |
|
||||
| $k$ | security parameter | Block depth finality. Blocks deeper than $k$ on any given chain are considered immutable. | 2160 blocks |
|
||||
| none | slot length | The duration of a single slot. | 1 second |
|
||||
| MAX_BLOCK_SIZE | max block size | The maximum size of the block body (not including the header) | 1 MB |
|
||||
| MAX_BLOCK_TXS | max block transactions | The maximum number of transactions in a block | 1024 |
|
||||
|
||||
### Notation
|
||||
|
||||
| Symbol | Name | Description | Value |
|
||||
| ------ | ---- | ----------- | ----- |
|
||||
| $s$ | slot security parameter | Sufficient slots such that $k$ blocks have been produced with high probability. | $3\lfloor \frac{k}{f}\rfloor$ |
|
||||
| $T$ | the block tree | This is the block tree observed by a node. | |
|
||||
| $F_T$ | tips of block tree $T$ | The set of concurrent forks of some block tree $T$. | $F_T=\{b\in T:\forall c \in T\space \textbf{parent}(c) \neq b \}$ |
|
||||
| $c_{loc}$ | tip of local chain | The chain that a node considers to be the honest chain. | $c_{loc} \in F_{T}$ |
|
||||
| $B_\text{imm}$ | the latest immutable block | The latest block which was committed (finalized) by the chain maintenance. | $B_\text{imm} \in \textbf{ancestors}(c_{loc})$ |
|
||||
| $sl$ | slot number | Index of slot. $sl=0$ denotes the genesis slot. | $sl=0,1,2,3,\dots$ |
|
||||
| $ep$ | epoch number | Index of epoch. $ep=0$ denotes the genesis epoch. | $ep=0,1,2,3,\dots$ |
|
||||
|
||||
### Latest Immutable Block
|
||||
|
||||
The latest immutable block $B_\text{imm}$ is the most recent block
|
||||
considered permanently finalized.
|
||||
The blocks deeper than $B_\text{imm}$ in the local chain $c_{loc}$
|
||||
are never to be reorganized.
|
||||
|
||||
This is maintained locally by the Chain Maintenance procedure.
|
||||
When the Online fork choice rule is in use,
|
||||
$B_\text{imm}$ corresponds to the $k$-deep block.
|
||||
However, it may be deeper than the $k$-deep block if the fork choice rule
|
||||
has been switched from Online to Bootstrap.
|
||||
Unlike the $k$-deep block, $B_\text{imm}$ does not advance as new blocks are added
|
||||
unless the Online fork choice rule is used.
|
||||
|
||||
The details of fork choice rule transitions are defined in the bootstrap spec:
|
||||
[Cryptarchia v1 Bootstrapping & Synchronization][bootstrap-sync].
|
||||
|
||||
### Slot
|
||||
|
||||
Time is divided up into slots of equal length,
|
||||
where one instance of the leadership lottery is held in each slot.
|
||||
A slot is said to be occupied if some validator has won the leadership lottery
|
||||
and proposed a block for that slot,
|
||||
otherwise the slot is said to be unoccupied.
|
||||
|
||||
### Epoch
|
||||
|
||||
Cryptarchia has a few global variables that are adjusted periodically
|
||||
in order for consensus to function. Namely, the protocol requires:
|
||||
|
||||
- Dynamic participation, thus the eligible notes must be refreshed regularly.
|
||||
- An unpredictable source of randomness for the leadership lottery.
|
||||
This source of randomness is derived from in-protocol activity
|
||||
and thus must be selected carefully to avoid giving adversaries an advantage.
|
||||
- Approximately constant block production rate achieved by dynamically adjusting
|
||||
the lottery difficulty based on observed participation levels.
|
||||
|
||||
The order in which these variables are calculated is important
|
||||
and is done w.r.t. the epoch schedule.
|
||||
|
||||
#### Epoch Schedule
|
||||
|
||||
An epoch is divided into 3 phases, as outlined below.
|
||||
|
||||
| Epoch Phase | Phase Length | Description |
|
||||
| ----------- | ------------ | ----------- |
|
||||
| Stake Distribution Snapshot | $s$ slots | A snapshot of note commitments are taken at the beginning of the epoch. The protocol waits for this value to finalize before entering the next phase. |
|
||||
| Buffer phase | $s$ slots | After the stake distribution is finalized, the protocol waits another slot finality period before entering the next phase. This is to further ensure that there is at least one honest leader contributing to the epoch nonce randomness. If an adversary can predict the nonce, they can grind their coin secret keys to gain an advantage. |
|
||||
| Lottery Constants Finalization | $s+\lfloor\frac{k}{f}\rfloor=4\lfloor\frac{k}{f}\rfloor$ slots | On the $2s^{th}$ slot into the epoch, the epoch nonce $\eta$ and the inferred total stake $D$ can be computed. The protocol waits another $4\frac{k}{f}$ slots for these values to finalize. |
|
||||
|
||||
The **epoch length** is the sum of the individual phases:
|
||||
$3\lfloor \frac{k}{f} \rfloor + 3\lfloor \frac{k}{f} \rfloor + 4\lfloor \frac{k}{f} \rfloor = 10 \lfloor \frac{k}{f} \rfloor$ slots.
|
||||
|
||||
#### Epoch State
|
||||
|
||||
The epoch state holds the variables derived over the course of the epoch schedule.
|
||||
It is the 3-tuple $(\mathbb{C}_\text{LEAD}, \eta, D)$ described below.
|
||||
|
||||
| Symbol | Name | Description | Value |
|
||||
| ------ | ---- | ----------- | ----- |
|
||||
| $\mathbb{C}_{\text{LEAD}}$ | Eligible Leader Notes Commitment | A commitment to the set of notes eligible for leadership. | See Eligible Leader Notes |
|
||||
| $\eta$ | Epoch Nonce | Randomness used in the leadership lottery (selected once per epoch) | See Epoch Nonce |
|
||||
| $D$ | Inferred Total Stake (Lottery Difficulty) | Total stake inferred from watching the results of the lottery during the course of the epoch. $D$ is used as the stake relativization constant for the following epoch. | See Total Stake Inference |
|
||||
|
||||
### Eligible Leader Notes
|
||||
|
||||
A note is eligible to participate in the leadership lottery if it has not been spent
|
||||
and was a member of the note set at the beginning of the previous epoch,
|
||||
i.e. they are members of $\mathbb{C}_\text{LEAD}$.
|
||||
|
||||
#### Note Ageing
|
||||
|
||||
If an adversary knows the epoch nonce $\eta$,
|
||||
they may grind a note that wins the lottery more frequently
|
||||
than should be statistically expected.
|
||||
Thus, it's critical that notes participating in the lottery are sufficiently old
|
||||
to ensure that they have no predictive power over $\eta$.
|
||||
|
||||
### Epoch Nonce
|
||||
|
||||
The epoch nonce $\eta$ is evolved after each block.
|
||||
|
||||
Given block $B = (parent, sl, \rho_\text{LEAD}, \dots)$ where:
|
||||
|
||||
- $parent$ is the parent of block $B$
|
||||
- $sl$ is the slot that $B$ is occupying.
|
||||
- $\rho_\text{LEAD}$ is the epoch nonce entropy contribution
|
||||
from the block's leadership proof
|
||||
|
||||
Then, $\eta_B$ is derived as:
|
||||
|
||||
$$\eta_{B} = \text{zkHASH}(\text{EPOCH\_NONCE\_V1}||\eta_{\text{parent}}||\rho_\text{LEAD}||\text{Fr}(sl))$$
|
||||
|
||||
where $\text{Fr}(sl)$ maps the slot number to the corresponding scalar
|
||||
in Poseidon's scalar field and $\text{zkHASH}(..)$ is Poseidon2
|
||||
as specified in Common Cryptographic Components.
|
||||
|
||||
The epoch nonce used in the next epoch is $\eta_{B'}$
|
||||
where $B'$ is the last block before the start of the
|
||||
"Lottery Constants Finalization" phase in the epoch schedule.
|
||||
|
||||
### Total Stake Inference
|
||||
|
||||
Given that stake is private in Cryptarchia,
|
||||
and that the goal is to maintain an approximately constant block rate,
|
||||
the difficulty of the slot lottery must be adjusted
|
||||
based on the level of participation.
|
||||
The details can be found in the Total Stake Inference specification.
|
||||
|
||||
### Epoch State Pseudocode
|
||||
|
||||
At the start of each epoch, each validator must derive the new epoch state variables.
|
||||
This is done through the following protocol:
|
||||
|
||||
```text
|
||||
define compute_epoch_state(ep, tip ∈ T) → (C_LEAD^ep, η^ep, D^ep):
|
||||
|
||||
case ep = 0:
|
||||
The genesis epoch state is hardcoded upon chain initialization.
|
||||
return (C_GENESIS, η_GENESIS, D_GENESIS)
|
||||
|
||||
otherwise:
|
||||
The epoch state is derived w.r.t. observations in the previous epoch.
|
||||
First, compute the slot at the start of the previous epoch.
|
||||
Observations will be queried relative to this slot.
|
||||
|
||||
sl_{ep-1} := (ep-1) · EPOCH_LENGTH
|
||||
|
||||
Notes eligible for leadership lottery are those present in the
|
||||
commitment root at the start of the previous epoch.
|
||||
|
||||
C_LEAD^ep := commitment_root_at_slot(sl_{ep-1}, tip)
|
||||
|
||||
The epoch nonce for epoch ep is the value of η at the beginning
|
||||
of the lottery constants finalization phase in the epoch schedule
|
||||
|
||||
η^ep := epoch_nonce_at_slot(sl_{ep-1} + ⌊6k/f⌋, tip)
|
||||
|
||||
Total active stake is inferred from the number of blocks produced
|
||||
in the previous epoch during the stake freezing phase.
|
||||
It is also derived from the previous estimate of total stake,
|
||||
thus recursion is used here to retrieve the previous epochs estimate D^{ep-1}
|
||||
|
||||
(_, _, D^{ep-1}) := compute_epoch_state(ep-1, tip)
|
||||
|
||||
The number of blocks produced during the first 6k/f slots
|
||||
of the previous epoch
|
||||
|
||||
N_BLOCKS^{ep-1} := |{B ∈ T | sl_{ep-1} ≤ sl_B < sl_{ep-1} + ⌊6k/f⌋}|
|
||||
|
||||
D^ep := infer_total_active_stake(D^{ep-1}, N_BLOCKS^{ep-1})
|
||||
|
||||
return (C_LEAD^ep, η^ep, D^ep)
|
||||
```
|
||||
|
||||
## Leadership Lottery
|
||||
|
||||
A lottery is run for every slot to decide who is eligible to propose a block.
|
||||
For each slot, there can be 0 or more winners.
|
||||
In fact, it's desirable to have short slots and many empty slots
|
||||
to allow for the network to propagate blocks
|
||||
and to reduce the chances of two leaders winning the same slot
|
||||
which are guaranteed forks.
|
||||
|
||||
### Proof of Leadership
|
||||
|
||||
The specifications of how a leader can prove that they have won the lottery
|
||||
are specified in the Proof of Leadership Specification.
|
||||
|
||||
### Leader Rewards
|
||||
|
||||
As an incentive for producing blocks,
|
||||
leaders are rewarded with every block proposal.
|
||||
The rewarding protocol is specified in Anonymous Leaders Reward Protocol.
|
||||
|
||||
## Block Chain
|
||||
|
||||
### Fork Choice Rule
|
||||
|
||||
Two fork choice rules are used,
|
||||
one during bootstrapping and a second once a node completes bootstrapping.
|
||||
|
||||
During bootstrapping, the protocol must be resilient to malicious peers feeding false chains,
|
||||
this calls for a more expensive fork choice rule that can differentiate
|
||||
between malicious long-range attacks and honest chains.
|
||||
|
||||
After bootstrapping, the node commits to the most honest looking chain found
|
||||
and switches to a fork choice rule that rejects chains that diverge
|
||||
by more than $k$ blocks.
|
||||
|
||||
The details are specified in Cryptarchia Fork Choice Rule.
|
||||
|
||||
### Block ID
|
||||
|
||||
Block ID is defined by the hash of the block header,
|
||||
where hash is Blake2b as specified in Common Cryptographic Components.
|
||||
|
||||
```python
|
||||
def block_id(header: Header) -> hash:
|
||||
return hash(
|
||||
b"BLOCK_ID_V1",
|
||||
header.bedrock_version,
|
||||
header.parent_block,
|
||||
header.slot.to_bytes(8, byteorder='little'),
|
||||
header.block_root,
|
||||
# PoL fields
|
||||
header.proof_of_leadership.leader_voucher,
|
||||
header.proof_of_leadership.entropy_contribution,
|
||||
header.proof_of_leadership.proof.serialize(),
|
||||
header.proof_of_leadership.leader_key.compressed(),
|
||||
)
|
||||
```
|
||||
|
||||
### Block Header
|
||||
|
||||
```python
|
||||
class Header: # 297 bytes
|
||||
bedrock_version: byte # 1 byte
|
||||
parent_block: hash # 32 bytes
|
||||
slot: int # 8 bytes
|
||||
block_root: hash # 32 bytes
|
||||
proof_of_leadership: ProofOfLeadership # 224 bytes
|
||||
|
||||
class ProofOfLeadership: # 224 bytes
|
||||
leader_voucher: zkhash # 32 bytes
|
||||
entropy_contribution: zkhash # 32 bytes
|
||||
proof: Groth16Proof # 128 bytes
|
||||
leader_key: Ed25519PublicKey # 32 bytes
|
||||
```
|
||||
|
||||
### Block
|
||||
|
||||
Block construction, validation and execution are specified in
|
||||
Block Construction, Validation and Execution Specification.
|
||||
|
||||
### Block Header Validation
|
||||
|
||||
Given block $B=(header, transactions)$ and the block tree $T$ where:
|
||||
|
||||
- $header$ is the header defined in Header
|
||||
- $transactions$ is the sequence of transactions in the block
|
||||
|
||||
The function $\textbf{valid\_header}(B)$ returns True
|
||||
if all of the following constraints hold,
|
||||
otherwise it returns False.
|
||||
|
||||
1. `header.version.bedrock_version = 1`
|
||||
Ensure bedrock version number.
|
||||
|
||||
2. `bytes(transactions) < MAX_BLOCK_SIZE`
|
||||
Ensure block size is smaller than the maximum allowed block size.
|
||||
|
||||
3. `length(transactions) < MAX_BLOCK_TXS`
|
||||
Ensure the number of transactions in the block is below the limit.
|
||||
|
||||
4. `merkle_root(transactions) = header.block_root`
|
||||
Ensure block root is over the transaction list.
|
||||
|
||||
5. `header.slot > fetch_header(header.parent_block).slot`
|
||||
Ensure the block's slot comes after the parent block's slot.
|
||||
|
||||
6. `wallclock_time() > slot_time(header.slot)`
|
||||
Ensure this block's slot time has elapsed.
|
||||
Local time is used in this validation.
|
||||
See Clocks for discussion around clock synchronization.
|
||||
|
||||
7. `header.parent ∈ T`
|
||||
Ensure the block's parent has already been accepted into the block tree.
|
||||
|
||||
8. `height(B) > height(B_imm)`
|
||||
Ensure the block comes after the latest immutable block.
|
||||
Assuming that $T$ prunes all forks diverged deeper than $B_\text{imm}$,
|
||||
this step, along with step 5, ensures that $B$ is descendant from $B_\text{imm}$.
|
||||
If all forks cannot be pruned completely in the implementation,
|
||||
this step must be replaced with `is_ancestor(B_imm, B)`,
|
||||
which checks whether $B_\text{imm}$ is an ancestor of $B$.
|
||||
|
||||
9. Verify the leader's right to propose
|
||||
and ensure it is the one proposing this block:
|
||||
Given leadership proof $\pi_\text{LEAD} = (\pi_\text{PoL}, P_\text{LEAD}, \sigma)$,
|
||||
where:
|
||||
- $\pi_\text{PoL}$ is the slot lottery win proof
|
||||
as defined in Proof of Leadership Specification
|
||||
- $P_\text{LEAD}$ is the public key committed to in $\pi_\text{PoL}$
|
||||
- $\sigma$ is a signature
|
||||
|
||||
10. A leader's proposal is valid if:
|
||||
- `verify_PoL(T, parent, sl, P_LEAD, π_PoL) = True`
|
||||
- `verify_signature(block_id(H), σ, P_LEAD) = True`
|
||||
Ensure that the leader who won the lottery is actually proposing this block
|
||||
since PoL's are not bound to blocks directly.
|
||||
|
||||
### Chain Maintenance
|
||||
|
||||
The chain maintenance procedure `on_block(state, B)`
|
||||
governs how the block tree $T$ is updated.
|
||||
|
||||
**Note:** It's assumed that block contents have already been validated
|
||||
by the execution layer w.r.t. the parent block's execution state.
|
||||
|
||||
```text
|
||||
define on_block(state, B) → state':
|
||||
|
||||
(c_loc, B_imm, T) := state
|
||||
|
||||
if B ∈ T ∨ ¬valid_header(B):
|
||||
Either B has already been seen or it's invalid, in both cases the block is ignored
|
||||
return state
|
||||
|
||||
T' := T ∪ {B}
|
||||
|
||||
c_loc' := B if parent(B) = c_loc
|
||||
fork_choice(c_loc, F_T', k, s) if parent(B) ≠ c_loc
|
||||
|
||||
if fork_choice_rule = ONLINE:
|
||||
Explicitly commit to the k-deep block
|
||||
if the Online Fork Choice Rule is being used.
|
||||
(T', B_imm) := commit(T', c_loc', k)
|
||||
|
||||
return (c_loc', B_imm, T')
|
||||
```
|
||||
|
||||
### Commit
|
||||
|
||||
The following procedure commits to the block
|
||||
which is $depth$ deep from $c_{loc}$.
|
||||
This procedure computes the new latest immutable block $B_\text{imm}$.
|
||||
|
||||
```text
|
||||
define commit(T, c_loc, depth) → (T', B_imm):
|
||||
|
||||
assert fork_choice_rule = ONLINE
|
||||
|
||||
Compute the latest immutable block, which is depth deep from c_loc.
|
||||
B_imm := block_at_depth(c_loc, depth)
|
||||
|
||||
Prune all forks diverged deeper than B_imm,
|
||||
so that future blocks on those forks can be rejected by Block Header Validation.
|
||||
T' := prune_forks(T, B_imm, c_loc)
|
||||
|
||||
return (T', B_imm)
|
||||
```
|
||||
|
||||
### Fork Pruning
|
||||
|
||||
The fork pruning procedure removes all blocks
|
||||
which are part of forks diverged deeper than a certain block.
|
||||
|
||||
```text
|
||||
define prune_forks(T, B) → T':
|
||||
|
||||
T' := T
|
||||
|
||||
for each B_tip ∈ F_T:
|
||||
If B_tip is a fork diverged deeper than B, prune the fork.
|
||||
B_div := common_ancestor(B_tip, B)
|
||||
if B_div ≠ B:
|
||||
T' := prune_blocks(B_tip, B_div, T)
|
||||
|
||||
return T'
|
||||
|
||||
define prune_blocks(B_new, B_old, T) → T':
|
||||
|
||||
Remove all blocks in the chain within range (B_old, B_new] from T.
|
||||
(B, T') := (B_new, T)
|
||||
|
||||
while B ≠ B_old:
|
||||
T' := T' \ {B}
|
||||
B := parent(B)
|
||||
|
||||
return T'
|
||||
```
|
||||
|
||||
### Versioning and Protocol Upgrades
|
||||
|
||||
Protocol versions are signalled through the `bedrock_version` field
|
||||
of the block header.
|
||||
Protocol upgrades need to be co-ordinated well in advance
|
||||
to ensure that node operators have enough time to update their node.
|
||||
Block height is used to schedule the activation of protocol updates.
|
||||
E.g. bedrock version 35 will be active after block height 32000.
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Proof of Stake vs. Proof of Work
|
||||
|
||||
From a privacy and resiliency point of view, Proof of Work is highly attractive.
|
||||
The amount of hashing power of a node is private,
|
||||
they can provide a new public key for each block they mine
|
||||
ensuring that their blocks cannot be connected by this identity,
|
||||
and PoW is not susceptible to long range attacks as is PoS.
|
||||
Unfortunately, it is wasteful and demands that leaders have powerful machines.
|
||||
The goal is to ensure strong decentralization by having a low barrier to entry
|
||||
and a good enough level of security can be achieved
|
||||
by having participants have an economic stake in the protocol.
|
||||
|
||||
### Clocks
|
||||
|
||||
Cryptarchia depends on honest nodes having relatively in-sync clocks.
|
||||
The protocol currently relies on NTP to synchronize clocks,
|
||||
this may be improved upon in the future,
|
||||
borrowing ideas from Ouroboros Chronos: Permissionless Clock Synchronization
|
||||
via Proof-of-Stake.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Proof of Leadership Specification][proof-of-leadership]
|
||||
\- ZK proof specification for leadership lottery
|
||||
- [Anonymous Leaders Reward Protocol][leaders-reward]
|
||||
\- Leader reward mechanism
|
||||
- [Cryptarchia Fork Choice Rule][fork-choice]
|
||||
\- Fork choice rule specification
|
||||
- [Block Construction, Validation and Execution Specification][block-construction]
|
||||
\- Block structure details
|
||||
- [Common Cryptographic Components][crypto-components]
|
||||
\- Cryptographic primitives (Blake2b, Poseidon2)
|
||||
- [Cryptarchia v1 Bootstrapping & Synchronization][bootstrap-sync]
|
||||
\- Bootstrap and synchronization procedures
|
||||
- [Total Stake Inference][stake-inference]
|
||||
\- Stake inference mechanism
|
||||
- [Block Times & Blend Network Analysis][block-times]
|
||||
\- Analysis for slot activation coefficient
|
||||
|
||||
### Informative
|
||||
|
||||
- [Cryptarchia v1 Protocol Specification][cryptarchia-origin]
|
||||
\- Original Cryptarchia v1 Protocol documentation
|
||||
- [Ouroboros Crypsinous: Privacy-Preserving Proof-of-Stake][ouroboros-crypsinous]
|
||||
\- Foundation for Cryptarchia design
|
||||
- [Ouroboros Chronos: Permissionless Clock Synchronization via Proof-of-Stake][ouroboros-chronos]
|
||||
\- Clock synchronization research
|
||||
- [Blend Network Specification][blend-network]
|
||||
\- Network privacy layer
|
||||
|
||||
[proof-of-leadership]: https://nomos-tech.notion.site/Proof-of-Leadership-215261aa09df8145a0f2c0d059aed59c
|
||||
[leaders-reward]: https://nomos-tech.notion.site/Anonymous-Leaders-Reward-Protocol
|
||||
[fork-choice]: https://nomos-tech.notion.site/Cryptarchia-Fork-Choice-Rule
|
||||
[block-construction]: https://nomos-tech.notion.site/Block-Construction-Validation-and-Execution-Specification
|
||||
[crypto-components]: https://nomos-tech.notion.site/Common-Cryptographic-Components
|
||||
[bootstrap-sync]: https://nomos-tech.notion.site/Cryptarchia-v1-Bootstrapping-Synchronization
|
||||
[stake-inference]: https://nomos-tech.notion.site/Total-Stake-Inference
|
||||
[block-times]: https://nomos-tech.notion.site/Block-Times-Blend-Network-Analysis
|
||||
[cryptarchia-origin]: https://nomos-tech.notion.site/Cryptarchia-v1-Protocol-Specification-21c261aa09df810cb85eff1c76e5798c
|
||||
[ouroboros-crypsinous]: https://eprint.iacr.org/2018/1132.pdf
|
||||
[ouroboros-chronos]: https://eprint.iacr.org/2019/838.pdf
|
||||
[blend-network]: https://nomos-tech.notion.site/Blend-Protocol-215261aa09df81ae8857d71066a80084
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,349 +0,0 @@
|
||||
# NOMOS-KEY-TYPES-GENERATION
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Key Types and Generation |
|
||||
| Slug | 84 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Mehmet Gonen <mehmet@status.im> |
|
||||
| Contributors | Marcin Pawlowski <marcin@status.im>, Youngjoon Lee <youngjoon@status.im>, Alexander Mozeika <alexander@status.im>, Thomas Lavaur <thomaslavaur@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-key-types-and-generation.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-key-types-and-generation.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document defines the key types used in the Blend protocol
|
||||
and describes the process of generating them.
|
||||
|
||||
**Keywords:** cryptography, keys, Blend, encryption, signing,
|
||||
NQK, NSK, ESK, NEK, EEK
|
||||
|
||||
## Background
|
||||
|
||||
The Blend protocol is a mix network protocol
|
||||
that provides anonymous communication in the Nomos network.
|
||||
It uses layered encryption and message mixing
|
||||
to prevent traffic analysis and ensure sender anonymity.
|
||||
For more details, see [Blend Protocol](#references).
|
||||
|
||||
This document ensures that the keys are used and generated in a common manner,
|
||||
which is necessary for making the Blend protocol work.
|
||||
|
||||
**Core nodes** are nodes that participate in the Blend network
|
||||
by mixing and forwarding messages.
|
||||
They are registered through the Service Declaration Protocol (SDP)
|
||||
and store their credentials on the Nomos blockchain ledger.
|
||||
|
||||
**Blend messages** are encrypted messages
|
||||
that are routed through the mix network.
|
||||
Each message is encapsulated with multiple layers of encryption,
|
||||
one for each hop in the network.
|
||||
|
||||
The keys defined in this specification include:
|
||||
|
||||
- **Non-ephemeral Quota Key (NQK)** —
|
||||
used for proving that a node is a core node.
|
||||
- **Non-ephemeral Signing Key (NSK)** —
|
||||
used to authenticate the node on the network level
|
||||
and derive the Non-ephemeral Encryption Key.
|
||||
- **Ephemeral Signing Key (ESK)** —
|
||||
used for signing Blend messages, one per encapsulation.
|
||||
- **Non-ephemeral Encryption Key (NEK)** —
|
||||
used for deriving shared secrets for message encryption.
|
||||
- **Ephemeral Encryption Key (EEK)** —
|
||||
used for encrypting Blend messages, one per encapsulation.
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
|
||||
document are to be interpreted as described in
|
||||
[RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Document Structure
|
||||
|
||||
This specification is organized into two distinct parts
|
||||
to serve different audiences and use cases:
|
||||
|
||||
**Protocol Specification** contains the normative requirements necessary
|
||||
for implementing an interoperable Blend Protocol node.
|
||||
This section defines the cryptographic primitives, message formats,
|
||||
network protocols, and behavioral requirements that all implementations
|
||||
must follow to ensure compatibility and maintain the protocol's
|
||||
privacy guarantees.
|
||||
Protocol designers, auditors, and those seeking to understand the core
|
||||
mechanisms should focus on this part.
|
||||
|
||||
**Implementation Considerations** provides non-normative guidance
|
||||
for implementers.
|
||||
This section offers practical recommendations, optimization strategies,
|
||||
and detailed examples that help developers build efficient and robust
|
||||
implementations.
|
||||
While these details are not required for interoperability,
|
||||
they represent best practices learned from reference implementations
|
||||
and can significantly improve performance and reliability.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
This section defines the normative cryptographic protocol requirements
|
||||
for interoperability.
|
||||
|
||||
### Construction
|
||||
|
||||
#### Non-ephemeral Quota Key
|
||||
|
||||
A node generates a Non-ephemeral Quota Key (NQK)
|
||||
that is a ZkSignature (Zero Knowledge Signature Scheme).
|
||||
The NQK is stored on the Nomos blockchain ledger
|
||||
as the `zk_id` field in the `DeclarationInfo`
|
||||
(see [Service Declaration Protocol](#references))
|
||||
of the node's outcome of the participation in the
|
||||
Service Declaration Protocol (SDP).
|
||||
|
||||
The NQK is used to prove that the node is part of the set of core nodes
|
||||
as indicated through the SDP.
|
||||
|
||||
**Properties:**
|
||||
|
||||
- **Type**: ZkSignature (Zero Knowledge Signature Scheme)
|
||||
- **Storage**: Nomos blockchain ledger (`zk_id` field in `DeclarationInfo`)
|
||||
- **Purpose**: Prove core node membership
|
||||
- **Lifecycle**: Non-ephemeral (persistent across sessions)
|
||||
|
||||
#### Non-ephemeral Signing Key
|
||||
|
||||
A node generates a Non-ephemeral Signing Key (NSK)
|
||||
using the Ed25519 algorithm (see [RFC 8032](#references)).
|
||||
The NSK is stored on the Nomos blockchain ledger
|
||||
as the `provider_id` field in the `DeclarationInfo`
|
||||
(see [Service Declaration Protocol](#references))
|
||||
of the node's outcome of the participation in the
|
||||
Service Declaration Protocol (SDP).
|
||||
|
||||
The NSK is used to authenticate the node on the network level
|
||||
and to derive the Non-ephemeral Encryption Key.
|
||||
|
||||
**Properties:**
|
||||
|
||||
- **Type**: Ed25519 (see [RFC 8032](#references))
|
||||
- **Storage**: Nomos blockchain ledger (`provider_id` field in `DeclarationInfo`)
|
||||
- **Purpose**:
|
||||
- Network-level node authentication
|
||||
- Derivation of Non-ephemeral Encryption Key (NEK)
|
||||
- **Lifecycle**: Non-ephemeral (persistent across sessions)
|
||||
|
||||
#### Ephemeral Signing Key
|
||||
|
||||
A node generates Ephemeral Signing Keys (ESK) that are proved to be limited
|
||||
in number by the Proof of Quota (PoQ).
|
||||
The PoQ for core nodes requires a valid NQK for the session for which the
|
||||
PoQ is generated.
|
||||
|
||||
A unique signing key MUST be generated for every encapsulation as required
|
||||
by the Message Encapsulation Mechanism.
|
||||
|
||||
**Properties:**
|
||||
|
||||
- **Type**: Ed25519
|
||||
- **Quantity**: Limited by Proof of Quota (PoQ)
|
||||
- **Requirements**: Valid NQK for the session
|
||||
- **Purpose**: Signing Blend messages
|
||||
- **Lifecycle**: Ephemeral (one per encapsulation)
|
||||
|
||||
**Security Requirements:**
|
||||
|
||||
- The key MUST NOT be reused.
|
||||
Otherwise, the messages that reuse the same key can be linked together.
|
||||
- The node is responsible for not reusing the key.
|
||||
- A unique signing key MUST be generated for every encapsulation.
|
||||
|
||||
#### Non-ephemeral Encryption Key
|
||||
|
||||
A node generates a Non-ephemeral Encryption Key (NEK).
|
||||
It is an X25519 curve key (see [RFC 7748](#references))
|
||||
derived from the NSK (Ed25519) public key retrieved from the `provider_id`,
|
||||
which is stored on the Nomos blockchain ledger
|
||||
when the node executes the SDP protocol.
|
||||
|
||||
The NEK key is used for deriving a shared secret
|
||||
(alongside EEK defined below) for the Blend message encapsulation purposes.
|
||||
|
||||
**Properties:**
|
||||
|
||||
- **Type**: X25519 (see [RFC 7748](#references))
|
||||
- **Derivation**: Derived from NSK (Ed25519) public key
|
||||
- **Source**: `provider_id` field from Nomos blockchain ledger
|
||||
- **Purpose**: Deriving shared secrets for message encryption
|
||||
- **Lifecycle**: Non-ephemeral (persistent across sessions)
|
||||
|
||||
**Derivation Process:**
|
||||
|
||||
1. Retrieve NSK (Ed25519) public key from `provider_id` on Nomos blockchain ledger
|
||||
2. Derive X25519 curve key from Ed25519 public key
|
||||
3. Use resulting NEK for shared secret derivation
|
||||
|
||||
#### Ephemeral Encryption Key
|
||||
|
||||
A node derives an Ephemeral Encryption Key (EEK) pair
|
||||
using the X25519 curve (see [RFC 7748](#references)) from the ESK.
|
||||
|
||||
A unique encryption key MUST be generated for every encapsulation
|
||||
as required by the Message Encapsulation Mechanism.
|
||||
|
||||
**Properties:**
|
||||
|
||||
- **Type**: X25519 (see [RFC 7748](#references))
|
||||
- **Derivation**: Derived from ESK (Ed25519)
|
||||
- **Purpose**: Encrypting Blend messages
|
||||
- **Lifecycle**: Ephemeral (one per encapsulation)
|
||||
|
||||
**Shared Secret Derivation:**
|
||||
|
||||
The derivation of a shared secret for the encryption of an encapsulated
|
||||
message requires:
|
||||
|
||||
- **Sender**: EEK (Ephemeral Encryption Key of sender)
|
||||
- **Recipient**: X25519 key derived from NEK
|
||||
(Non-ephemeral Encryption Key of recipient)
|
||||
|
||||
The shared secret is computed using the X25519 Diffie-Hellman key exchange
|
||||
between the sender's EEK and the recipient's derived NEK.
|
||||
|
||||
### Security Considerations
|
||||
|
||||
#### Key Reuse
|
||||
|
||||
- **CRITICAL**: Ephemeral keys (ESK, EEK) MUST NOT be reused across
|
||||
different encapsulations
|
||||
- Key reuse enables message linking, breaking anonymity guarantees
|
||||
- Implementations MUST enforce unique key generation per encapsulation
|
||||
|
||||
#### Key Derivation
|
||||
|
||||
- NEK derivation from NSK MUST use standard Ed25519 to X25519 conversion
|
||||
- EEK derivation from ESK MUST use standard Ed25519 to X25519 conversion
|
||||
- Derivations MUST be deterministic for the same input
|
||||
|
||||
#### Proof of Quota
|
||||
|
||||
- ESK generation MUST be limited by valid Proof of Quota (PoQ)
|
||||
- PoQ MUST include valid NQK for the current session
|
||||
- Implementations MUST verify PoQ before accepting ephemeral signatures
|
||||
|
||||
#### Ledger Storage
|
||||
|
||||
- NQK and NSK MUST be retrievable from Nomos blockchain ledger via SDP protocol
|
||||
- Ledger data MUST be integrity-protected
|
||||
- Implementations SHOULD verify ledger data authenticity before use
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
This section provides guidance for implementing the protocol specification.
|
||||
|
||||
### Key Hierarchy Summary
|
||||
|
||||
```text
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Nomos Blockchain Ledger (SDP Protocol) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ DeclarationInfo: │
|
||||
│ - zk_id: NQK (ZkSignature) │
|
||||
│ - provider_id: NSK (Ed25519) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ Derivation
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Non-ephemeral Keys (Persistent) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ NQK (ZkSignature) ──► Proves core node membership │
|
||||
│ NSK (Ed25519) ──► Network authentication │
|
||||
│ NEK (X25519) ──► Derived from NSK for encryption │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
│ Per-encapsulation
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Ephemeral Keys (Per Encapsulation) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ ESK (Ed25519) ──► Signs Blend messages (via PoQ + NQK) │
|
||||
│ EEK (X25519) ──► Derived from ESK for encryption │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Key Usage Matrix
|
||||
|
||||
| Key Type | Algorithm | Storage | Lifecycle | Primary Use | Derived From |
|
||||
| -------- | --------- | ------- | --------- | ----------- | ------------ |
|
||||
| **NQK** | ZkSignature | Nomos blockchain (`zk_id`) | Non-ephemeral | Core node proof | Generated |
|
||||
| **NSK** | Ed25519 | Nomos blockchain (`provider_id`) | Non-ephemeral | Authentication | Generated |
|
||||
| **NEK** | X25519 | Derived | Non-ephemeral | Shared secret derivation | NSK public key |
|
||||
| **ESK** | Ed25519 | Memory | Ephemeral | Message signing | Generated (PoQ-limited) |
|
||||
| **EEK** | X25519 | Memory | Ephemeral | Message encryption | ESK |
|
||||
|
||||
### Implementation Requirements
|
||||
|
||||
Implementations of this specification MUST:
|
||||
|
||||
1. Generate NQK as ZkSignature and store in `DeclarationInfo.zk_id`
|
||||
2. Generate NSK as Ed25519 and store in `DeclarationInfo.provider_id`
|
||||
3. Derive NEK from NSK using Ed25519 to X25519 conversion
|
||||
4. Generate unique ESK per encapsulation, limited by PoQ
|
||||
5. Derive EEK from ESK using Ed25519 to X25519 conversion
|
||||
6. Never reuse ephemeral keys across encapsulations
|
||||
7. Verify PoQ includes valid NQK before generating ESK
|
||||
|
||||
Implementations SHOULD:
|
||||
|
||||
1. Securely erase ephemeral keys after use
|
||||
2. Implement key generation auditing
|
||||
3. Validate all derived keys before use
|
||||
4. Monitor for key reuse attempts
|
||||
|
||||
### Best Practices
|
||||
|
||||
#### Secure Key Management
|
||||
|
||||
- Store non-ephemeral keys in secure storage
|
||||
(HSM, secure enclave, or encrypted memory)
|
||||
- Implement secure key erasure for ephemeral keys immediately after use
|
||||
- Use constant-time operations for key comparisons to prevent timing attacks
|
||||
|
||||
#### Operational Security
|
||||
|
||||
- Log key generation events (without logging key material)
|
||||
- Monitor for anomalous key usage patterns
|
||||
- Implement rate limiting on key generation to prevent resource
|
||||
exhaustion
|
||||
- Regularly audit key lifecycle management
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- Blend Protocol - Mix network protocol for anonymous communication in Nomos
|
||||
- Service Declaration Protocol (SDP) - Protocol for registering core nodes
|
||||
and storing `DeclarationInfo` on the Nomos blockchain ledger
|
||||
- Proof of Quota Specification (PoQ)
|
||||
- Message Encapsulation Mechanism
|
||||
- Zero Knowledge Signature Scheme (ZkSignature)
|
||||
|
||||
### Informative
|
||||
|
||||
- [Key Types and Generation Specification](https://nomos-tech.notion.site/Key-Types-and-Generation-Specification-215261aa09df81088b8fd7c3089162e8)
|
||||
\- Original Key Types and Generation documentation
|
||||
- [RFC 8032](https://www.rfc-editor.org/rfc/rfc8032) - Edwards-Curve Digital Signature Algorithm (EdDSA)
|
||||
- [RFC 7748](https://www.rfc-editor.org/rfc/rfc7748) - Elliptic Curves for Security (X25519)
|
||||
- Ed25519 to Curve25519 conversion: Standard practice for deriving X25519 keys from Ed25519 keys
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,262 +0,0 @@
|
||||
# NOMOS-MESSAGE-FORMATTING
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Message Formatting Specification |
|
||||
| Slug | 89 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski |
|
||||
| Contributors | Youngjoon Lee <youngjoon@status.im>, Alexander Mozeika <alexander.mozeika@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-message-formatting.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-message-formatting.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies the Message Formatting for the Blend Protocol.
|
||||
The Message contains a header and a payload,
|
||||
where the header informs the protocol about the version and the payload type.
|
||||
The Message contains either a drop or a non-drop payload,
|
||||
with fixed-length payloads to prevent adversaries from
|
||||
distinguishing message types based on length.
|
||||
This specification reuses notation from the Notation document
|
||||
and integrates with the Message Encapsulation Mechanism.
|
||||
|
||||
**Keywords:** Blend, message formatting, header, payload, drop, non-drop
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be
|
||||
interpreted as described in RFC 2119.
|
||||
|
||||
## Document Structure
|
||||
|
||||
This specification is organized into two distinct parts
|
||||
to serve different audiences and use cases:
|
||||
|
||||
**Protocol Specification** contains the normative requirements
|
||||
necessary for implementing an interoperable Blend Protocol node.
|
||||
This section defines the cryptographic primitives, message formats, network protocols,
|
||||
and behavioral requirements that all implementations MUST follow
|
||||
to ensure compatibility and maintain the protocol's privacy guarantees.
|
||||
Protocol designers, auditors,
|
||||
and those seeking to understand the core mechanisms should focus on this part.
|
||||
|
||||
**Implementation Details** provides non-normative guidance for implementers.
|
||||
This section offers practical recommendations, optimization strategies,
|
||||
and detailed examples that help developers build efficient and robust implementations.
|
||||
While these details are not required for interoperability,
|
||||
they represent best practices learned from reference implementations
|
||||
and can significantly improve performance and reliability.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Construction
|
||||
|
||||
#### Message
|
||||
|
||||
The Message is a structure that contains a public_header, private_header and a payload.
|
||||
|
||||
```python
|
||||
class Message:
|
||||
public_header: PublicHeader
|
||||
private_header: PrivateHeader
|
||||
payload: bytes
|
||||
```
|
||||
|
||||
#### Public Header
|
||||
|
||||
The public_header must be generated as the outcome of the Message Encapsulation Mechanism.
|
||||
|
||||
The public_header is defined as follows:
|
||||
|
||||
```python
|
||||
class PublicHeader:
|
||||
version: byte
|
||||
public_key: PublicKey
|
||||
proof_of_quota: ProofOfQuota
|
||||
signature: Signature
|
||||
```
|
||||
|
||||
**Fields:**
|
||||
|
||||
- version=0x01 is version of the protocol.
|
||||
- public_key is $K^{n}_{i}$,
|
||||
a public key from the set $\mathbf{K}^{n}_{h}$
|
||||
as defined in the Message Encapsulation spec.
|
||||
- proof_of_quota is $\pi^{K^{n}_{i}}_{Q}$,
|
||||
a corresponding proof of quota for the key $K^{n}_{i}$ from the $\mathbf{K}^{n}_{h}$;
|
||||
it also contains the key nullifier.
|
||||
- signature is $\sigma_{K^{n}_{i}}(\mathbf{h|P}_{i})$,
|
||||
a signature of the concatenation of the $i$-th encapsulation
|
||||
of the payload $\mathbf{P}$ and the private header $\mathbf{h}$,
|
||||
that can be verified by the public key $K^{n}_{i}$.
|
||||
|
||||
#### Private Header
|
||||
|
||||
The private_header must be generated as the outcome of
|
||||
the Message Encapsulation Mechanism.
|
||||
|
||||
The private header contains a set of encrypted BlendingHeader entries
|
||||
$\mathbf{h} = (\mathbf{b}_{1},...,\mathbf{b}_{h_{max}})$.
|
||||
|
||||
```python
|
||||
private_header: list[BlendingHeader]
|
||||
```
|
||||
|
||||
The size of the set is limited to $\beta_{max}=3$ BlendingHeader entries,
|
||||
as defined in the Global Parameters.
|
||||
|
||||
**Blending Header:**
|
||||
|
||||
The BlendingHeader ($\mathbf{b}_{l}$) is defined as follows:
|
||||
|
||||
```python
|
||||
class BlendingHeader:
|
||||
public_key: PublicKey
|
||||
proof_of_quota: ProofOfQuota
|
||||
signature: Signature
|
||||
proof_of_selection: ProofOfSelection
|
||||
is_last: byte
|
||||
```
|
||||
|
||||
**Fields:**
|
||||
|
||||
- public_key is $K^{n}_{l}$,
|
||||
a public key from the set $\mathbf{K}^{n}_{h}$.
|
||||
- proof_of_quota is $\pi^{K^{n}_{l}}_{Q}$,
|
||||
a corresponding proof of quota for the key $K^{n}_{l}$ from the $\mathbf{K}^{n}_{h}$;
|
||||
it also contains the key nullifier.
|
||||
- signature is $\sigma_{K^{n}_{l}}(\mathbf{h|P}_{l})$,
|
||||
a signature of the concatenation of $l$-th encapsulation
|
||||
of the payload $\mathbf{P}$ and the private header $\mathbf{h}$,
|
||||
that can be verified by public key $K^{n}_{l}$.
|
||||
- proof_of_selection is $\pi^{K^{n}_{l+1},m_{l+1}}_{S}$,
|
||||
a proof of selection of the node index $m_{l+1}$
|
||||
assuming valid proof of quota $\pi^{K^{n}_{l}}_{Q}$.
|
||||
- is_last is $\Omega$,
|
||||
a flag that indicates that this is the last encapsulation.
|
||||
|
||||
#### Payload
|
||||
|
||||
The Payload is formatted according to the
|
||||
[Payload Formatting Specification][payload-formatting].
|
||||
The formatted Payload is generated as the outcome of
|
||||
the [Message Encapsulation Mechanism][message-encapsulation].
|
||||
|
||||
#### Maximum Payload Length
|
||||
|
||||
The `MAX_PAYLOAD_LENGTH` parameter defines the maximum length of the payload,
|
||||
which for version 1 of the Blend Protocol is fixed as `MAX_PAYLOAD_LENGTH=34003`.
|
||||
That is, 34kB for the payload body (`MAX_BODY_LENGTH`)
|
||||
and 3 bytes for the payload header.
|
||||
More information about payload formatting can be found in
|
||||
[Payload Formatting Specification][payload-formatting].
|
||||
|
||||
```python
|
||||
MAX_PAYLOAD_LENGTH = 34003
|
||||
MAX_BODY_LENGTH = 34000
|
||||
PAYLOAD_HEADER_SIZE = 3
|
||||
```
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Message Size Uniformity
|
||||
|
||||
**Fixed-Length Design:**
|
||||
|
||||
- All messages have a fixed total length to prevent traffic analysis attacks
|
||||
- The payload length is constant regardless of actual content size
|
||||
- Padding is used to fill unused space in the payload body
|
||||
- This design prevents adversaries from distinguishing message types based on size
|
||||
|
||||
### Protocol Version
|
||||
|
||||
**Version Field:**
|
||||
|
||||
- The current protocol version is 0x01
|
||||
- The version field is a single byte in the public header
|
||||
- Future protocol versions may introduce breaking changes to the message format
|
||||
- Implementations must validate the version field before processing messages
|
||||
|
||||
### Header Generation
|
||||
|
||||
**Dependency on Encapsulation:**
|
||||
|
||||
- Both public_header and private_header are generated by
|
||||
the Message Encapsulation Mechanism.
|
||||
- Implementations must not manually construct headers
|
||||
- The encapsulation mechanism ensures proper cryptographic properties
|
||||
- Headers include signatures, proofs, and encryption
|
||||
as specified in the Message Encapsulation spec.
|
||||
|
||||
### Blending Header Limit
|
||||
|
||||
**Maximum Encapsulation Layers:**
|
||||
|
||||
- The protocol limits the private header to $\beta_{max}=3$ BlendingHeader entries
|
||||
- This limit is defined in the Global Parameters
|
||||
- Each BlendingHeader represents one layer of Message encapsulation
|
||||
- The limit balances privacy (more layers) with performance and overhead
|
||||
|
||||
### Integration Points
|
||||
|
||||
**Required Specifications:**
|
||||
|
||||
- Message Encapsulation Mechanism: Generates the public and private headers
|
||||
- Payload Formatting Specification: Defines how to format the payload content
|
||||
- Notation: Provides mathematical and cryptographic notation used throughout
|
||||
- Global Parameters: Defines protocol-wide constants like $\beta_{max}$
|
||||
|
||||
### Security Considerations
|
||||
|
||||
**Traffic Analysis Protection:**
|
||||
|
||||
- Fixed message lengths prevent size-based traffic analysis
|
||||
- All messages appear identical in size on the network
|
||||
- Cover traffic can be indistinguishable from real data messages
|
||||
|
||||
**Cryptographic Integrity:**
|
||||
|
||||
- Signatures in both public and private headers ensure message authenticity
|
||||
- Proof of Quota prevents spam and resource exhaustion
|
||||
- Proof of Selection ensures correct node routing
|
||||
|
||||
**Message Validation:**
|
||||
|
||||
- Implementations must verify all signatures before processing
|
||||
- Proof of Quota must be validated to prevent quota violations
|
||||
- The is_last flag must be checked to determine final message destination
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Message Encapsulation Mechanism](https://nomos-tech.notion.site/Message-Encapsulation-Mechanism-215261aa09df81309d7fd7f1c2da086b)
|
||||
\- Cryptographic operations for building and processing messages
|
||||
- [Payload Formatting Specification](https://nomos-tech.notion.site/Payload-Formatting-215261aa09df81b2a3e1d913a0df9ad9)
|
||||
\- Defines payload structure and formatting rules
|
||||
- [Blend Protocol](https://nomos-tech.notion.site/Blend-Protocol-215261aa09df81ae8857d71066a80084)
|
||||
\- Protocol-wide constants and configuration values
|
||||
|
||||
### Informative
|
||||
|
||||
- [Message Formatting Specification](https://nomos-tech.notion.site/Message-Formatting-Specification-215261aa09df81c79e3acd9e921bcc30)
|
||||
\- Original Message Formatting documentation
|
||||
- [Blend Protocol Formatting](https://nomos-tech.notion.site/Formatting-215261aa09df81a3b3ebc1f438209467)
|
||||
\- High-level overview of message formatting in Blend Protocol
|
||||
|
||||
[payload-formatting]: https://nomos-tech.notion.site/Payload-Formatting-215261aa09df81b2a3e1d913a0df9ad9
|
||||
[message-encapsulation]: https://nomos-tech.notion.site/Message-Encapsulation-Mechanism-215261aa09df81309d7fd7f1c2da086b
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,272 +0,0 @@
|
||||
# NOMOS-PAYLOAD-FORMATTING
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Payload Formatting Specification |
|
||||
| Slug | 97 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Marcin Pawlowski <marcin@status.im> |
|
||||
| Contributors | Youngjoon Lee <youngjoon@status.im>, Alexander Mozeika <alexander.mozeika@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-payload-formatting.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-payload-formatting.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the Payload formatting for the Blend Protocol.
|
||||
The Payload has a fixed length to prevent traffic analysis attacks,
|
||||
with shorter messages padded using random data.
|
||||
|
||||
**Keywords:** Blend, payload formatting, padding, fixed length, traffic analysis
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
|
||||
document are to be interpreted as described in
|
||||
[RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Construction
|
||||
|
||||
#### Payload
|
||||
|
||||
The `Payload` is a structure that contains a `Header` and a `body`.
|
||||
|
||||
```python
|
||||
class Payload:
|
||||
header: Header
|
||||
body: bytes
|
||||
```
|
||||
|
||||
#### Header
|
||||
|
||||
The `Header` is a structure that contains a `body_type` and a `body_length`.
|
||||
|
||||
```python
|
||||
class Header:
|
||||
body_type: byte
|
||||
body_length: uint16
|
||||
```
|
||||
|
||||
**Fields:**
|
||||
|
||||
- `body_type`: A single byte indicating the type of message in the body
|
||||
- `body_length`: A uint16 (encoded as little-endian)
|
||||
indicating the actual length of the raw message
|
||||
|
||||
#### Type
|
||||
|
||||
Messages are classified into two types:
|
||||
|
||||
- **Cover message**: Traffic used to obscure network patterns and enhance privacy
|
||||
- **Data message**: Traffic containing actual protocol data (e.g., block proposals)
|
||||
|
||||
The `body_type` field indicates the message classification:
|
||||
|
||||
- `body_type=0x00`: The body contains a cover message
|
||||
- `body_type=0x01`: The body contains a data message
|
||||
|
||||
Implementations MUST discard messages with any other `body_type` value,
|
||||
as this indicates the message was not decapsulated correctly.
|
||||
|
||||
#### Length
|
||||
|
||||
The `body_length` field is a uint16 (encoded as little-endian),
|
||||
with a theoretical maximum of 65535 bytes.
|
||||
The `body_length` MUST be set to the actual length of the raw message in bytes.
|
||||
|
||||
#### Body
|
||||
|
||||
The `MAX_BODY_LENGTH` parameter defines the maximum length of the body.
|
||||
The maximal length of a raw data message is 33129 bytes (Block Proposal),
|
||||
so `MAX_BODY_LENGTH=33129`.
|
||||
|
||||
The body length is fixed to `MAX_BODY_LENGTH` bytes.
|
||||
If the length of the raw message is shorter than `MAX_BODY_LENGTH`,
|
||||
it MUST be padded with random data.
|
||||
|
||||
```python
|
||||
MAX_BODY_LENGTH = 33129
|
||||
```
|
||||
|
||||
**Note:** The `MAX_BODY_LENGTH` (33129 bytes) defined here differs from
|
||||
`MAX_PAYLOAD_LENGTH` (34003 bytes) in the [Message Formatting specification][message-formatting].
|
||||
The Message Formatting specification includes additional Message headers
|
||||
beyond the Payload body.
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Fixed-Length Design
|
||||
|
||||
**Payload Size Uniformity:**
|
||||
|
||||
- All payloads have a fixed total length to prevent traffic analysis attacks
|
||||
- The body length is constant at MAX_BODY_LENGTH=33129 bytes
|
||||
regardless of actual content size
|
||||
- Shorter messages must be padded with random data to fill unused space
|
||||
- This design prevents adversaries from distinguishing message types based on size
|
||||
|
||||
**Padding Requirements:**
|
||||
|
||||
- If len(raw_message) < MAX_BODY_LENGTH, padding is required
|
||||
- Padding must consist of random data (not zeros or predictable patterns)
|
||||
- The body_length field indicates where the actual message ends and padding begins
|
||||
- Implementations must use cryptographically secure random number generation
|
||||
for padding
|
||||
|
||||
### Header Structure
|
||||
|
||||
**Total Header Size:**
|
||||
|
||||
- body_type: 1 byte
|
||||
- body_length: 2 bytes (uint16, little-endian)
|
||||
- Total header size: 3 bytes
|
||||
|
||||
**Endianness:**
|
||||
|
||||
- The body_length field uses little-endian encoding
|
||||
- Implementations must correctly encode/decode uint16 values in little-endian format
|
||||
- This ensures consistent interpretation across different platforms and architectures
|
||||
|
||||
### Message Type Validation
|
||||
|
||||
**Valid Types:**
|
||||
|
||||
- 0x00: Cover message (dummy traffic for privacy)
|
||||
- 0x01: Data message (actual protocol data or block proposals)
|
||||
|
||||
**Invalid Type Handling:**
|
||||
|
||||
- Any body_type value other than 0x00 or 0x01 indicates decapsulation failure
|
||||
- Messages with invalid types must be discarded immediately
|
||||
- Implementations should not attempt to process or forward invalid messages
|
||||
- Invalid types may indicate cryptographic errors or malicious manipulation
|
||||
|
||||
### Body Length Constraints
|
||||
|
||||
**Length Validation:**
|
||||
|
||||
- body_length must be ≤ MAX_BODY_LENGTH (33129 bytes)
|
||||
- body_length indicates the actual length of the raw message before padding
|
||||
- Implementations must verify body_length is within valid range before processing
|
||||
- The theoretical maximum is 65535 bytes (uint16 limit),
|
||||
but the protocol constrains it to 33129
|
||||
|
||||
**Message Extraction:**
|
||||
|
||||
- To extract the raw message: raw_message = body[0:body_length]
|
||||
- Padding data beyond body_length should be discarded
|
||||
- The padding serves only to maintain fixed payload size
|
||||
|
||||
### Maximum Message Size
|
||||
|
||||
**Block Proposal Size:**
|
||||
|
||||
- The current MAX_BODY_LENGTH=33129 is based on the maximum size of a Block Proposal
|
||||
- This value may be adjusted in future protocol versions
|
||||
- Implementations should use the constant rather than hardcoding the value
|
||||
- Total payload size = 3 bytes (header) + 33129 bytes (body) = 33132 bytes
|
||||
|
||||
**Total Payload Calculation:**
|
||||
|
||||
```python
|
||||
HEADER_SIZE = 3 # 1 byte type + 2 bytes length
|
||||
MAX_BODY_LENGTH = 33129
|
||||
MAX_PAYLOAD_LENGTH = HEADER_SIZE + MAX_BODY_LENGTH # 33132 bytes
|
||||
```
|
||||
|
||||
### Cover Traffic
|
||||
|
||||
**Cover Messages (body_type=0x00):**
|
||||
|
||||
- Cover messages provide traffic obfuscation to enhance privacy
|
||||
- They appear indistinguishable from data messages at the network level
|
||||
- The body of a cover message should contain random data
|
||||
- Cover messages are discarded after decapsulation
|
||||
|
||||
**Indistinguishability:**
|
||||
|
||||
- Cover and data messages have identical size due to fixed-length design
|
||||
- Both types follow the same formatting and encryption procedures
|
||||
- Network observers cannot distinguish cover traffic from real data
|
||||
|
||||
### Integration Points
|
||||
|
||||
**Required Specifications:**
|
||||
|
||||
- [Message Formatting Specification][message-formatting]: Defines the overall message structure
|
||||
that contains the payload
|
||||
- [Message Encapsulation Mechanism][message-encapsulation]: Handles encryption and encapsulation
|
||||
of the formatted payload
|
||||
- [Blend Protocol][blend-protocol]: Provides high-level overview of payload formatting
|
||||
|
||||
**Relationship to Message Formatting:**
|
||||
|
||||
- The Payload Formatting specification defines the internal structure of the payload
|
||||
- The Message Formatting specification defines how the payload is included
|
||||
in the complete message
|
||||
- The MAX_PAYLOAD_LENGTH in Message Formatting (34003 bytes)
|
||||
accounts for this payload structure
|
||||
|
||||
### Security Considerations
|
||||
|
||||
**Cryptographic Randomness:**
|
||||
|
||||
- Padding must use cryptographically secure random number generation
|
||||
- Predictable padding could leak information about message types or content
|
||||
- Never use zeros, repeated patterns, or pseudo-random generators for padding
|
||||
|
||||
**Length Information Leakage:**
|
||||
|
||||
- The fixed-length design prevents length-based traffic analysis
|
||||
- The body_length field is encrypted as part of the payload
|
||||
- Only after successful decapsulation can the actual message length be determined
|
||||
|
||||
**Type Field Security:**
|
||||
|
||||
- The body_type field is encrypted within the payload
|
||||
- Invalid types indicate potential security issues (failed decryption, tampering)
|
||||
- Implementations must discard invalid messages without further processing
|
||||
|
||||
**Message Validation Sequence:**
|
||||
|
||||
1. Decrypt and extract the payload
|
||||
2. Parse the 3-byte header
|
||||
3. Validate body_type is 0x00 or 0x01
|
||||
4. Validate body_length ≤ MAX_BODY_LENGTH
|
||||
5. Extract raw_message using body_length
|
||||
6. Process or discard based on body_type
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Message Formatting Specification][message-formatting]
|
||||
\- Defines the overall message structure containing the Payload
|
||||
- [Message Encapsulation Mechanism][message-encapsulation]
|
||||
\- Cryptographic operations for encrypting and encapsulating the Payload
|
||||
- [Blend Protocol][blend-protocol]
|
||||
\- Protocol-wide constants and configuration values
|
||||
|
||||
### Informative
|
||||
|
||||
- [Payload Formatting Specification][payload-formatting-origin]
|
||||
\- Original Payload Formatting documentation
|
||||
|
||||
[message-formatting]: https://nomos-tech.notion.site/Message-Formatting-Specification-215261aa09df81c79e3acd9e921bcc30
|
||||
[message-encapsulation]: https://nomos-tech.notion.site/Message-Encapsulation-Mechanism-215261aa09df81309d7fd7f1c2da086b
|
||||
[blend-protocol]: https://nomos-tech.notion.site/Blend-Protocol-215261aa09df81ae8857d71066a80084
|
||||
[payload-formatting-origin]: https://nomos-tech.notion.site/Payload-Formatting-Specification-215261aa09df8153a456c555b7dcbe1c
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,499 +0,0 @@
|
||||
# NOMOS-PROOF-OF-QUOTA
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Proof of Quota Specification |
|
||||
| Slug | 88 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Mehmet Gonen <mehmet@status.im> |
|
||||
| Contributors | Marcin Pawlowski <marcin@status.im>, Thomas Lavaur <thomaslavaur@status.im>, Youngjoon Lee <youngjoon@status.im>, David Rusu <davidrusu@status.im>, Álvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomos-proof-of-quota.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomos-proof-of-quota.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document defines an implementation-friendly specification
|
||||
of the Proof of Quota (PoQ),
|
||||
which ensures that there is a limited number of message encapsulations
|
||||
that a node can perform,
|
||||
thereby constraining the number of messages a node can introduce
|
||||
to the Blend network used in Nomos
|
||||
(see [NOMOS-BLEND-PROTOCOL](#references)).
|
||||
The mechanism regulating these messages is similar to rate-limiting nullifiers.
|
||||
|
||||
**Keywords:** cryptography, zero-knowledge, Blend, quota, rate-limiting,
|
||||
PoQ, nullifier
|
||||
|
||||
## Document Structure
|
||||
|
||||
This specification is organized into two distinct parts
|
||||
to serve different audiences and use cases:
|
||||
|
||||
**Protocol Specification** contains the normative requirements necessary
|
||||
for implementing an interoperable Blend Protocol node.
|
||||
This section defines the cryptographic primitives, message formats,
|
||||
network protocols, and behavioral requirements that all implementations
|
||||
must follow to ensure compatibility and maintain the protocol's
|
||||
privacy guarantees.
|
||||
Protocol designers, auditors, and those seeking to understand the core
|
||||
mechanisms should focus on this part.
|
||||
|
||||
**Implementation Considerations** provides non-normative guidance
|
||||
for implementers.
|
||||
This section offers practical recommendations, optimization strategies,
|
||||
and detailed examples that help developers build efficient and robust
|
||||
implementations.
|
||||
While these details are not required for interoperability,
|
||||
they represent best practices learned from reference implementations
|
||||
and can significantly improve performance and reliability.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
This section defines the normative cryptographic protocol requirements
|
||||
for the Proof of Quota.
|
||||
|
||||
### Construction
|
||||
|
||||
The Proof of Quota (PoQ) can be satisfied by one of two proof types,
|
||||
depending on the node's role in the network:
|
||||
|
||||
1. **Proof of Core Quota (PoQ_C)**: Ensures that the core node is declared
|
||||
and hasn't already produced more keys than the core quota Q_C.
|
||||
|
||||
2. **Proof of Leadership Quota (PoQ_L)**: Ensures that the leader node
|
||||
would win the proof of stake for current Cryptarchia epoch
|
||||
(see [Cryptarchia Consensus](#references))
|
||||
and hasn't already produced more keys than the leadership quota Q_L.
|
||||
This doesn't guarantee that the node is indeed winning
|
||||
because the PoQ doesn't check if the Proof of Leadership note
|
||||
(representing staked value) is unspent,
|
||||
enabling generation of the proof ahead of time preventing extreme delays.
|
||||
|
||||
**Validity**: The final proof PoQ is valid if either PoQ_C or PoQ_L holds.
|
||||
|
||||
### Zero-Knowledge Proof Statement
|
||||
|
||||
#### Public Values
|
||||
|
||||
A proof attesting that for the following public values
|
||||
derived from blockchain parameters:
|
||||
|
||||
**Type Definition:**
|
||||
`zkhash` represents a 256-bit hash value used in zero-knowledge circuits,
|
||||
typically a Poseidon hash output compatible with the BN256 scalar field.
|
||||
|
||||
```python
|
||||
class ProofOfQuotaPublic:
|
||||
session: int # Session number (uint64)
|
||||
core_quota: int # Allowed messages per session for core nodes (20 bits)
|
||||
leader_quota: int # Allowed messages per session for potential leaders (20 bits)
|
||||
core_root: zkhash # Merkle root of zk_id of the core nodes
|
||||
K_part_one: int # First part of the signature public key (16 bytes)
|
||||
K_part_two: int # Second part of the signature public key (16 bytes)
|
||||
pol_epoch_nonce: int # PoL Epoch nonce
|
||||
pol_t0: int # PoL constant t0
|
||||
pol_t1: int # PoL constant t1
|
||||
pol_ledger_aged: zkhash # Merkle root of the PoL eligible notes
|
||||
|
||||
# Outputs:
|
||||
key_nullifier: zkhash # Derived from session, private index and private sk
|
||||
```
|
||||
|
||||
**Field Descriptions:**
|
||||
|
||||
- `session`: Unique session identifier for temporal partitioning
|
||||
- `core_quota`: Maximum number of message encapsulations allowed per session
|
||||
for core nodes (20-bit value)
|
||||
- `leader_quota`: Maximum number of message encapsulations allowed per session
|
||||
for potential leaders (20-bit value)
|
||||
- `core_root`: Root of Merkle tree containing zk_id values
|
||||
of all declared core nodes
|
||||
- `K_part_one`, `K_part_two`: Split representation of one-time signature
|
||||
public key (32 bytes total)
|
||||
- `pol_epoch_nonce`: Proof of Leadership epoch nonce for lottery
|
||||
- `pol_t0`, `pol_t1`: Proof of Leadership threshold constants
|
||||
- `pol_ledger_aged`: Root of Merkle tree containing eligible
|
||||
Proof of Leadership notes
|
||||
- `key_nullifier`: Output nullifier preventing key reuse within a session
|
||||
|
||||
#### Witness
|
||||
|
||||
The prover knows a witness:
|
||||
|
||||
```python
|
||||
class ProofOfQuotaWitness:
|
||||
index: int # Index of the generated key (20 bits)
|
||||
selector: bool # Indicates if it's a leader (=1) or core node (=0)
|
||||
|
||||
# This part is filled randomly by potential leaders
|
||||
core_sk: zkhash # sk corresponding to the zk_id of the core node
|
||||
core_path: list[zkhash] # Merkle path proving zk_id membership (len = 20)
|
||||
core_path_selectors: list[bool] # Indicates how to read the core_path
|
||||
|
||||
# This part is filled randomly by core nodes
|
||||
pol_sl: int # PoL slot
|
||||
pol_sk_starting_slot: int # PoL starting slot of the slot secrets
|
||||
pol_note_value: int # PoL note value
|
||||
pol_note_tx_hash: zkhash # PoL note transaction
|
||||
pol_note_output_number: int # PoL note transaction output number
|
||||
pol_noteid_path: list[zkhash] # PoL Merkle path proving noteID membership (len = 32)
|
||||
pol_noteid_path_selectors: list[bool] # Indicates how to read the note_path
|
||||
pol_slot_secret: int # PoL slot secret corresponding to sl
|
||||
pol_slot_secret_path: list[zkhash] # PoL slot secret Merkle path (len = 25)
|
||||
```
|
||||
|
||||
**Witness Field Descriptions:**
|
||||
|
||||
- `index`: The index of the generated key.
|
||||
Limiting this index limits the maximum number of keys generated
|
||||
(20 bits enables up to 2^20 = 1,048,576 messages per node per session)
|
||||
- `selector`: Boolean flag indicating node type (1 for leader, 0 for core node)
|
||||
- `core_sk`: Secret key corresponding to the core node's zk_id
|
||||
- `core_path`: Merkle authentication path for core node membership
|
||||
- `core_path_selectors`: Navigation bits for Merkle path (left/right)
|
||||
- `pol_*`: Proof of Leadership witness fields (filled randomly by core nodes)
|
||||
|
||||
**Note**: All inputs and outputs of zero-knowledge proofs are scalar field elements.
|
||||
|
||||
### Constraints
|
||||
|
||||
The following constraints MUST hold for a valid proof:
|
||||
|
||||
#### Step 1: Index Selection and Quota Limitation
|
||||
|
||||
The prover selects an index for the chosen key.
|
||||
This index MUST be lower than the allowed quota and not already used.
|
||||
This index is used to derive the key nullifier in
|
||||
[Step 4: Key Nullifier Derivation](#step-4-key-nullifier-derivation).
|
||||
|
||||
**Purpose**: Limiting the possible values of this index limits the possible
|
||||
nullifiers created,
|
||||
which produces the desired effect of limiting the generation of keys
|
||||
to a certain quota.
|
||||
|
||||
**Specification**: `index` is 20 bits,
|
||||
enabling up to 2^20 messages per node per session.
|
||||
|
||||
#### Step 2: Core Node Verification
|
||||
|
||||
If the prover indicated that the node is a core node for the proof
|
||||
(`selector` is 0), the proof checks that:
|
||||
|
||||
1. **Core Node Registration**: The core node is registered
|
||||
in the set N = SDP(session),
|
||||
where SDP is the Service Declaration Protocol
|
||||
(see [Service Declaration Protocol](#references)).
|
||||
This is proven by demonstrating knowledge of a `core_sk`
|
||||
that corresponds to a declared `zk_id`,
|
||||
which is a valid SDP registry for the current session.
|
||||
|
||||
- The `zk_id` values are stored in a Merkle tree with a fixed depth of 20
|
||||
- The root is provided as a public input
|
||||
- To build the Merkle tree, `zk_id` values are ordered from smallest
|
||||
to biggest (when seen as natural numbers between 0 and p)
|
||||
- Remaining empty leaves are represented by 0 after the sorting
|
||||
(appended at the end of the vector)
|
||||
- This structure supports up to 1M validators
|
||||
|
||||
2. **Index Validity**: The index MUST satisfy: `index < core_quota`
|
||||
|
||||
#### Step 3: Leader Node Verification
|
||||
|
||||
If the prover indicated that the node is a potential leader node for the proof
|
||||
(`selector` is 1), the proof checks that:
|
||||
|
||||
1. **Leadership Lottery**: The leader node possesses a note
|
||||
that would win a slot in the consensus lottery.
|
||||
Unlike leadership conditions,
|
||||
the proof of quota doesn't verify that the note is unspent.
|
||||
This enables potential provers to generate the PoQ well in advance.
|
||||
All other lottery constraints are the same as in Circuit Constraints.
|
||||
|
||||
2. **Index Validity**: The index MUST satisfy: `index < leader_quota`
|
||||
|
||||
#### Step 4: Key Nullifier Derivation
|
||||
|
||||
The prover derives a `key_nullifier` maintained by blend nodes
|
||||
during the session for message deduplication purposes:
|
||||
|
||||
```python
|
||||
selection_randomness = zkhash(b"SELECTION_RANDOMNESS_V1", sk, index, session)
|
||||
key_nullifier = zkhash(b"KEY_NULLIFIER_V1", selection_randomness)
|
||||
```
|
||||
|
||||
Where `sk` is:
|
||||
|
||||
- The `core_sk` as defined in the Mantle specification if the node is a core node
|
||||
- The secret key of the PoL note if it's a leader node derived from inputs
|
||||
|
||||
**Rationale**: Two hashes are used because the selection randomness is used
|
||||
in the Proof of Selection to prove the ownership of a valid PoQ.
|
||||
|
||||
#### Step 5: One-Time Signature Key Attachment
|
||||
|
||||
The prover attaches a one-time signature key used in the blend protocol.
|
||||
This public key is split into two 16-byte parts:
|
||||
`K_part_one` and `K_part_two`.
|
||||
|
||||
**Encoding**: When written in little-endian byte order,
|
||||
the complete public key equals the concatenation `K_part_one || K_part_two`.
|
||||
|
||||
### Circuit Implementation
|
||||
|
||||
```python
|
||||
# Verify selector is a boolean
|
||||
# selector = 1 if it's a potential leader and 0 if it's a core node
|
||||
selector * (1 - selector) == 0 # Check that selector is indeed a bit
|
||||
|
||||
# Verify index is lower than quota
|
||||
# Equivalent to: index < leader_quota if selector == 1
|
||||
# or index < core_quota if selector == 0
|
||||
index < selector * (leader_quota - core_quota) + core_quota
|
||||
|
||||
# Check if it's a registered core node
|
||||
zk_id = zkhash(b"NOMOS_KDF", core_sk)
|
||||
is_registered = merkle_verify(core_root, core_path, core_path_selectors, zk_id)
|
||||
|
||||
# Check if it's a potential leader
|
||||
is_leader = would_win_leadership(
|
||||
pol_epoch_nonce,
|
||||
pol_t0,
|
||||
pol_t1,
|
||||
pol_ledger_aged,
|
||||
pol_sl,
|
||||
pol_sk_starting_slot,
|
||||
pol_sk_secrets_root,
|
||||
pol_note_value,
|
||||
pol_note_tx_hash,
|
||||
pol_note_output_number,
|
||||
pol_noteid_path,
|
||||
pol_noteid_path_selectors,
|
||||
pol_slot_secret,
|
||||
pol_slot_secret_path
|
||||
)
|
||||
|
||||
# Verify that it's a core node or a leader
|
||||
assert(selector * (is_leader - is_registered) + is_registered == 1)
|
||||
|
||||
# Get leader note secret key
|
||||
pol_sk_secrets_root = get_merkle_root(pol_sk_starting_slot, sl, pol_slot_secret_path)
|
||||
pol_note_sk = zkhash(b"NOMOS_POL_SK_V1", pol_sk_starting_slot, pol_sk_secrets_root)
|
||||
|
||||
# Derive nullifier
|
||||
selection_randomness = zkhash(
|
||||
b"SELECTION_RANDOMNESS_V1",
|
||||
selector * (pol_note_sk - core_sk) + core_sk,
|
||||
index,
|
||||
session
|
||||
)
|
||||
key_nullifier = zkhash(b"KEY_NULLIFIER_V1", selection_randomness)
|
||||
```
|
||||
|
||||
### Proof Compression
|
||||
|
||||
The proof confirming that the PoQ is correct MUST be compressed
|
||||
to a size of 128 bytes.
|
||||
|
||||
**Uncompressed Format**: The UncompressedProof comprises 2 G1 and 1 G2
|
||||
BN256 elements:
|
||||
|
||||
```python
|
||||
class UncompressedProof:
|
||||
pi_a: G1 # BN256 element
|
||||
pi_b: G2 # BN256 element
|
||||
pi_c: G1 # BN256 element
|
||||
```
|
||||
|
||||
**Compression Requirements**:
|
||||
|
||||
- Compressed size: 128 bytes
|
||||
- Curve: BN256 (also known as BN254 or alt_bn128)
|
||||
- Compression MUST preserve proof validity
|
||||
|
||||
### Proof Serialization
|
||||
|
||||
The ProofOfQuota structure contains `key_nullifier` and the compressed proof
|
||||
transformed into bytes.
|
||||
|
||||
```python
|
||||
class ProofOfQuota:
|
||||
key_nullifier: zkhash # 32 bytes
|
||||
proof: bytes # 128 bytes
|
||||
```
|
||||
|
||||
**Serialization Format**:
|
||||
|
||||
1. Transform `key_nullifier` into 32 bytes
|
||||
2. Compress proof to 128 bytes
|
||||
3. Concatenate: `key_nullifier || proof`
|
||||
4. Total size: 160 bytes
|
||||
|
||||
**Deserialization**:
|
||||
|
||||
Interpret the 160-byte sequence as:
|
||||
|
||||
- Bytes 0-31: `key_nullifier`
|
||||
- Bytes 32-159: `proof`
|
||||
|
||||
### Security Considerations
|
||||
|
||||
#### Quota Enforcement
|
||||
|
||||
- Implementations MUST track `key_nullifier` values during each session
|
||||
- Duplicate `key_nullifier` values MUST be rejected
|
||||
- Session transitions MUST clear the nullifier set
|
||||
|
||||
#### Proof Verification
|
||||
|
||||
- All Merkle path verifications MUST be performed
|
||||
- The `selector` bit MUST be verified as boolean (0 or 1)
|
||||
- Index bounds MUST be strictly enforced
|
||||
- Implementations MUST reject proofs where neither core nor leader conditions hold
|
||||
|
||||
#### Cryptographic Assumptions
|
||||
|
||||
- Relies on soundness of the underlying zk-SNARK system
|
||||
- Assumes collision resistance of `zkhash` function
|
||||
- Assumes computational Diffie-Hellman assumption on BN256 curve
|
||||
|
||||
#### Note Unspent Condition
|
||||
|
||||
- **Critical**: The PoQ does NOT verify that Proof of Leadership notes are unspent
|
||||
- This allows pre-generation of proofs to avoid delays
|
||||
- Implementations SHOULD implement additional checks for actual leadership
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
This section provides guidance for implementing the Proof of Quota protocol.
|
||||
|
||||
### Proof Generation
|
||||
|
||||
**Performance Characteristics**:
|
||||
|
||||
Implementations SHOULD consider:
|
||||
|
||||
- Proof generation is computationally intensive
|
||||
- Pre-generation is recommended for leader nodes
|
||||
- Witness preparation involves Merkle path computation
|
||||
|
||||
### Proof Verification Implementation
|
||||
|
||||
**Verification Steps**:
|
||||
|
||||
1. Deserialize proof into `key_nullifier` and `proof` components
|
||||
2. Verify proof size (160 bytes total)
|
||||
3. Check `key_nullifier` against session nullifier set
|
||||
4. Verify zk-SNARK proof with public inputs
|
||||
5. Add `key_nullifier` to session set if valid
|
||||
|
||||
### Merkle Tree Construction
|
||||
|
||||
#### Core Nodes Merkle Tree
|
||||
|
||||
**Specification**:
|
||||
|
||||
- Depth: 20 levels
|
||||
- Leaf values: `zk_id` of declared core nodes
|
||||
- Ordering: Ascending numerical order (as natural numbers 0 to p)
|
||||
- Empty leaves: Represented by 0, appended after sorted values
|
||||
- Capacity: 2^20 = 1,048,576 validators
|
||||
|
||||
**Construction Algorithm**:
|
||||
|
||||
```python
|
||||
def build_core_tree(zk_ids: list[int]) -> MerkleTree:
|
||||
# Sort zk_ids in ascending order
|
||||
sorted_ids = sorted(zk_ids)
|
||||
|
||||
# Pad to 2^20 with zeros
|
||||
padded = sorted_ids + [0] * (2**20 - len(sorted_ids))
|
||||
|
||||
# Build Merkle tree
|
||||
return MerkleTree(padded, depth=20)
|
||||
```
|
||||
|
||||
#### PoL Ledger Merkle Tree
|
||||
|
||||
**Specification**:
|
||||
|
||||
- Depth: 32 levels
|
||||
- Leaf values: Note IDs of eligible PoL notes
|
||||
- Purpose: Prove note membership in aged ledger
|
||||
|
||||
### Session Management
|
||||
|
||||
**Session Lifecycle**:
|
||||
|
||||
1. **Session Start**:
|
||||
- Initialize empty nullifier set
|
||||
- Load current session parameters (quotas, roots)
|
||||
- Prepare session number for proofs
|
||||
|
||||
2. **During Session**:
|
||||
- Verify incoming proofs
|
||||
- Track nullifiers in set
|
||||
- Reject duplicate nullifiers
|
||||
|
||||
3. **Session End**:
|
||||
- Clear nullifier set
|
||||
- Archive session data
|
||||
- Transition to next session
|
||||
|
||||
### Best Practices
|
||||
|
||||
#### Nullifier Set Management
|
||||
|
||||
- Use efficient data structure (hash set or Bloom filter with fallback)
|
||||
- Implement atomic operations for nullifier insertion
|
||||
- Consider memory constraints for long sessions
|
||||
|
||||
#### Pre-Generation Strategy
|
||||
|
||||
For leader nodes:
|
||||
|
||||
- Generate proofs before slot assignment
|
||||
- Cache proofs for multiple indices
|
||||
- Monitor note status separately from PoQ
|
||||
|
||||
#### Error Handling
|
||||
|
||||
Implementations SHOULD handle:
|
||||
|
||||
- Invalid proof format
|
||||
- Duplicate nullifiers
|
||||
- Index out of bounds
|
||||
- Merkle path verification failures
|
||||
- Invalid selector values
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- NOMOS-BLEND-PROTOCOL - Blend Protocol specification for Nomos
|
||||
- Service Declaration Protocol (SDP) - Protocol for declaring core nodes
|
||||
- Mantle Specification
|
||||
- Circuit Constraints (Cryptarchia)
|
||||
- Proof of Selection
|
||||
- [Rate-Limiting Nullifiers](https://rate-limiting-nullifier.github.io/rln-docs/)
|
||||
\- RLN documentation for rate-limiting mechanisms
|
||||
|
||||
### Informative
|
||||
|
||||
- [Proof of Quota Specification](https://nomos-tech.notion.site/Proof-of-Quota-Specification-215261aa09df81d88118ee22205cbafe)
|
||||
\- Original Proof of Quota documentation
|
||||
- BN256 Curve Specification
|
||||
- zk-SNARKs (Zero-Knowledge Succinct Non-Interactive Arguments of Knowledge)
|
||||
- [Cryptarchia Consensus](https://arxiv.org/abs/2402.06408)
|
||||
- Merkle Trees and Authentication Paths
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,256 +0,0 @@
|
||||
# NOMOS-WALLET-TECHNICAL-STANDARD
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos Wallet Technical Standard |
|
||||
| Slug | 154 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Tags | wallet, key derivation, HD wallet, mnemonic, BIP-32, BIP-39, Poseidon2 |
|
||||
| Editor | Giacomo Pasini <giacomo@status.im> |
|
||||
| Contributors | Thomas Lavaur <thomas@status.im>, Mehmet Gonen <mehmet@status.im>, Daniel Sanchez Quiros <daniel@status.im>, Alvaro Castro-Castilla <alvaro@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-13** — [`b7813dc`](https://github.com/logos-co/logos-lips/blob/b7813dce5a7413f7d7c430d9f2c2bbee367fbeef/docs/blockchain/raw/nomos-wallet-technical-standard.md) — feat: add Nomos Wallet Technical Standard specification (#292)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the wallet technical standard for Nomos,
|
||||
covering key generation, hierarchical deterministic (HD) wallet derivation,
|
||||
and zero-knowledge (ZK) compatible secret key derivation.
|
||||
The primary motivation is avoiding lock-in to a specific wallet software
|
||||
by specifying the algorithms used to derive keys,
|
||||
allowing users to migrate between implementations.
|
||||
The specification adapts pre-existing Bitcoin standards
|
||||
([BIP-39][bip-39] and [BIP-32][bip-32]) to Nomos,
|
||||
with modifications necessitated by the use of hash-based secret/public key pairs
|
||||
and zero-knowledge proof requirements.
|
||||
|
||||
**Keywords:** wallet, key derivation, hierarchical deterministic,
|
||||
mnemonic codes, BIP-32, BIP-39, BLAKE2b, Poseidon2, extended keys, ZK-compatible
|
||||
|
||||
## Semantics
|
||||
|
||||
The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in [RFC 2119][rfc-2119].
|
||||
|
||||
### Definitions
|
||||
|
||||
| Terminology | Description |
|
||||
| ----------- | ----------- |
|
||||
| HD Wallet | Hierarchical Deterministic wallet. A wallet that uses a single source of entropy to generate many different addresses. |
|
||||
| Mnemonic Code | A human-readable representation of entropy used for key generation, as defined in [BIP-39][bip-39]. |
|
||||
| Extended Key | A key extended with an additional 256 bits of entropy (chain code) to prevent child keys from depending solely on the parent key. |
|
||||
| Chain Code | A 32-byte extension added to both private and public keys, identical for corresponding key pairs. |
|
||||
| Hardened Child Key | A child key derived using the parent private key, with indices from $2^{31}$ through $2^{32} - 1$. |
|
||||
| Normal Child Key | A child key derivable from a parent public key alone. Not available in Nomos due to the absence of elliptic curve homomorphic properties. |
|
||||
| Poseidon2 | A ZK-optimized hash function that operates on field elements rather than raw bytes. |
|
||||
| BN254 | A Barreto-Naehrig elliptic curve used in ZK proof systems. $\mathbb{F}_{r}$ denotes its scalar field. |
|
||||
| `zkhash` | A ZK-friendly hash function used for public key derivation from a secret key. |
|
||||
|
||||
## Background
|
||||
|
||||
This specification mostly follows pre-existing standards in Bitcoin
|
||||
and adapts them to Nomos when necessary.
|
||||
This is also the choice of other Bitcoin-inspired projects
|
||||
like [Cardano][cardano-cip-0003] or [Zcash][zcash-zip-0032].
|
||||
For this reason, this document does not go over the entire specification,
|
||||
and highlights only the differences with existing standards.
|
||||
|
||||
## Protocol Specification
|
||||
|
||||
### Mnemonic Codes for Key Generation
|
||||
|
||||
Mnemonic codes are far easier to interact with as humans
|
||||
than raw binary or hex strings
|
||||
and are the standard for wallets.
|
||||
The mnemonic code generation process
|
||||
MUST follow [BIP-39][bip-39] entirely,
|
||||
as it involves only operations on strings and bytes.
|
||||
|
||||
### Hierarchical Deterministic Wallet
|
||||
|
||||
Hierarchical Deterministic (HD) wallets are the standard approach.
|
||||
Using a single source of entropy
|
||||
(usually obtained through the mnemonic process above),
|
||||
it is possible to generate many different addresses
|
||||
and share all or part of the key hierarchy.
|
||||
|
||||
The industry standard is [BIP-32][bip-32].
|
||||
However, Nomos cannot use it as-is,
|
||||
because Nomos uses different keys and cryptographic components.
|
||||
In addition, some [BIP-32][bip-32] features
|
||||
are only possible thanks to homomorphic properties
|
||||
of elliptic curve cryptography (ECC),
|
||||
which are not available in Nomos since it uses hash-based secret/public keys.
|
||||
|
||||
[BIP-32][bip-32] specifies two kinds of child keys:
|
||||
|
||||
- **Normal**: a child public key can be derived from the parent public key.
|
||||
- **Hardened**: the parent private key is needed
|
||||
to derive a child private and public key.
|
||||
|
||||
Normal children are possible thanks to specific properties
|
||||
of the keys used in Bitcoin that are not available in Nomos
|
||||
(namely, homomorphism).
|
||||
To maintain compatibility,
|
||||
the same structure is used
|
||||
but non-hardened children are not available.
|
||||
|
||||
The following diagram illustrates the [BIP-32][bip-32] HD wallet structure:
|
||||
|
||||

|
||||
|
||||
### Extended Keys
|
||||
|
||||
> In what follows, a function is defined that derives a number of child keys
|
||||
> from a parent key.
|
||||
> In order to prevent these from depending solely on the key itself,
|
||||
> both private and public keys are first extended
|
||||
> with an extra 256 bits of entropy.
|
||||
> This extension, called the chain code,
|
||||
> is identical for corresponding private and public keys,
|
||||
> and consists of 32 bytes.
|
||||
>
|
||||
> An extended private key is represented as $(k, c)$,
|
||||
> with $k$ the normal private key, and $c$ the chain code.
|
||||
> An extended public key is represented as $(K, c)$,
|
||||
> with $K = zkhash(\text{"KDF\_V1"}, k)$ the public key and $c$ the chain code.
|
||||
>
|
||||
> Each extended key has $2^{31}$ hardened children keys.
|
||||
> Each of these child keys has an index.
|
||||
> The hardened child keys use indices from $2^{31}$ through $2^{32} - 1$.
|
||||
|
||||
### Notation
|
||||
|
||||
- $(k_{par}, c_{par})$: the parent extended key,
|
||||
composed of the private key $k_{par}$ and the chain code $c_{par}$.
|
||||
- $ser_{32}(i)$: serialize a 32-bit unsigned integer $i$
|
||||
as a 4-byte sequence, most significant byte first.
|
||||
- $Blake2b\_512(p, x)$: refers to unkeyed BLAKE2b-512 in sequential mode,
|
||||
with an output digest length of 64 bytes,
|
||||
16-byte personalization string $p$, and input $x$.
|
||||
- $PRF^{expand}(x, y) : Blake2b\_512(\text{"Nomos\_ExpandSeed"}, x \| y)$,
|
||||
a pseudo-random function.
|
||||
|
||||
### Child Key Derivation
|
||||
|
||||
$CDKpriv((k_{par}, c_{par}), i) \rightarrow (k_{i}, c_{i})$:
|
||||
|
||||
1. Check whether $i \geq 2^{31}$ (whether the child is a hardened key).
|
||||
|
||||
- If so (hardened child):
|
||||
let $I = PRF^{expand}(c_{par}, 0x00 \| k_{par} \| ser_{32}(i))$.
|
||||
- If not (normal child): failure.
|
||||
|
||||
1. Split $I$ into two 32-byte sequences, $I_{L}$, $I_{R}$.
|
||||
|
||||
1. The returned child key $k_{i}$ is $I_{L}$.
|
||||
|
||||
1. The returned chain code $c_{i}$ is $I_{R}$.
|
||||
|
||||
### Master Key Generation
|
||||
|
||||
1. Generate a seed byte sequence $S$ of a chosen length
|
||||
(e.g. with [BIP-39][bip-39]).
|
||||
|
||||
1. Calculate $I = Blake2b\_512(\text{"Nomos\_MasterKGen"}, S)$.
|
||||
|
||||
1. Split $I$ into two 32-byte sequences, $I_{L}$ and $I_{R}$.
|
||||
|
||||
1. Use $I_{L}$ as master secret key, and $I_{R}$ as master chain code.
|
||||
|
||||
### ZK-Compatible Secret Key Derivation
|
||||
|
||||
Since Nomos makes extensive use of ZK proofs,
|
||||
the secret-to-public key derivation needs to be efficient.
|
||||
For this purpose, a ZK-optimized hash function is used: Poseidon2.
|
||||
|
||||
However, Poseidon2 operates on field elements rather than raw bytes,
|
||||
so $k_{i}$ as specified above cannot be simply input directly.
|
||||
Instead, these bytes need to be encoded into field elements.
|
||||
Two field elements are needed to encode 32 bytes (the size of $k_{i}$).
|
||||
This creates inefficiency because although a single field element
|
||||
provides adequate security,
|
||||
twice as many are needed,
|
||||
increasing computation costs to accommodate the entire key.
|
||||
|
||||
To reduce this additional cost inside the proof,
|
||||
one final hash function is applied
|
||||
that compresses these two field elements into a single one,
|
||||
which becomes the actual key used in the Nomos network:
|
||||
|
||||
1. Let $k_{L}$, $k_{R}$ be 16-byte sequences
|
||||
such that $k_{i} = k_{L} \| k_{R}$
|
||||
and $n_{L}$, $n_{R}$ be their values
|
||||
when interpreted as little-endian unsigned integers.
|
||||
|
||||
1. Let $e_{L}$, $e_{R}$ be scalar field elements in BN254
|
||||
such that $e_{L} := n_{L} \in \mathbb{F}_{r}$,
|
||||
$e_{R} := n_{R} \in \mathbb{F}_{r}$.
|
||||
|
||||
1. The Nomos key can be obtained as
|
||||
$k_{\text{nomos}} = Poseidon2(e_{L}, e_{R})$,
|
||||
where $Poseidon2$ outputs a single field element.
|
||||
|
||||
> **Note: Why not use Poseidon2 for the full derivation?**
|
||||
>
|
||||
> While Poseidon2 is optimized for ZK circuits,
|
||||
> its long-term stability and parameterization are still evolving.
|
||||
> General-purpose hash functions like Blake2b
|
||||
> offer a more stable and audited base layer.
|
||||
> By introducing Poseidon2 only at the last compression step,
|
||||
> ZK dependencies are isolated from the rest of the key derivation path.
|
||||
> This ensures the wallet hierarchy remains valid
|
||||
> even if Poseidon2 parameters are updated.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Key Derivation Security
|
||||
|
||||
- Implementations MUST only support hardened child key derivation.
|
||||
Non-hardened (normal) child keys are not available in Nomos
|
||||
due to the absence of elliptic curve homomorphic properties.
|
||||
- The master secret key MUST be generated from a seed
|
||||
of sufficient entropy (at least 128 bits).
|
||||
|
||||
### ZK Key Compression
|
||||
|
||||
- The final Poseidon2 compression step isolates ZK dependencies
|
||||
from the rest of the key derivation path,
|
||||
ensuring that the wallet hierarchy remains valid
|
||||
even if Poseidon2 parameters are updated.
|
||||
- Implementations SHOULD ensure that field element encoding
|
||||
is performed correctly using little-endian unsigned integer interpretation.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [BIP-39][bip-39] - Mnemonic code for generating deterministic keys
|
||||
- [BIP-32][bip-32] - Hierarchical Deterministic Wallets
|
||||
|
||||
### Informative
|
||||
|
||||
- [Zcash ZIP-0032][zcash-zip-0032] - Shielded Hierarchical Deterministic Wallets
|
||||
- [Cardano CIP-0003][cardano-cip-0003] - Wallet Key Generation
|
||||
- [SLIP-0023][slip-0023] - Cardano HD Key Derivation
|
||||
- [Wallet Technical Standard (Notion)][source] - Origin reference
|
||||
|
||||
[rfc-2119]: https://www.ietf.org/rfc/rfc2119.txt
|
||||
[bip-39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
|
||||
[bip-32]: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
|
||||
[zcash-zip-0032]: https://zips.z.cash/zip-0032
|
||||
[cardano-cip-0003]: https://github.com/cardano-foundation/CIPs/blob/master/CIP-0003/README.md
|
||||
[slip-0023]: https://github.com/satoshilabs/slips/blob/master/slip-0023.md
|
||||
[source]: https://www.notion.so/nomos-tech/Wallet-Technical-Standard-215261aa09df80e9884ad7cf039e2c57
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,266 +0,0 @@
|
||||
# NOMOS-DA-NETWORK
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | NomosDA Network |
|
||||
| Slug | 136 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Daniel Sanchez Quiros <danielsq@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Daniel Kashepava <danielkashepava@status.im>, Gusto Bacvinka <augustinas@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/blockchain/raw/nomosda-network.md) — chore: add math support (#287)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/nomosda-network.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/nomosda-network.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/nomos/raw/nomosda-network.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/nomos/raw/nomosda-network.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/nomos/raw/nomosda-network.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-25** — [`51ef4cd`](https://github.com/logos-co/logos-lips/blob/51ef4cd533d8824291d9e2884bb467235b32a450/nomos/raw/nomosda-network.md) — added nomos/raw/nomosda-network.md (#160)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Introduction
|
||||
|
||||
NomosDA is the scalability solution protocol for data availability within the Nomos network.
|
||||
This document delineates the protocol's structure at the network level,
|
||||
identifies participants,
|
||||
and describes the interactions among its components.
|
||||
Please note that this document does not delve into the cryptographic aspects of the design.
|
||||
For comprehensive details on the cryptographic operations,
|
||||
a detailed specification is a work in progress.
|
||||
|
||||
## Objectives
|
||||
|
||||
NomosDA was created to ensure that data from Nomos zones is distributed, verifiable, immutable, and accessible.
|
||||
At the same time, it is optimised for the following properties:
|
||||
|
||||
- **Decentralization**: NomosDA’s data availability guarantees must be achieved with minimal trust assumptions
|
||||
and centralised actors. Therefore,
|
||||
permissioned DA schemes involving a Data Availability Committee (DAC) had to be avoided in the design.
|
||||
Schemes that require some nodes to download the entire blob data were also off the list
|
||||
due to the disproportionate role played by these “supernodes”.
|
||||
|
||||
- **Scalability**: NomosDA is intended to be a bandwidth-scalable protocol, ensuring that its functions are maintained as the Nomos network grows. Therefore, NomosDA was designed to minimise the amount of data sent to participants, reducing the communication bottleneck and allowing more parties to participate in the DA process.
|
||||
|
||||
To achieve the above properties, NomosDA splits up zone data and
|
||||
distributes it among network participants,
|
||||
with cryptographic properties used to verify the data’s integrity.
|
||||
A major feature of this design is that parties who wish to receive an assurance of data availability
|
||||
can do so very quickly and with minimal hardware requirements.
|
||||
However, this comes at the cost of additional complexity and resources required by more integral participants.
|
||||
|
||||
## Requirements
|
||||
|
||||
In order to ensure that the above objectives are met,
|
||||
the NomosDA network requires a group of participants
|
||||
that undertake a greater burden in terms of active involvement in the protocol.
|
||||
Recognising that not all node operators can do so,
|
||||
NomosDA assigns different roles to different kinds of participants,
|
||||
depending on their ability and willingness to contribute more computing power
|
||||
and bandwidth to the protocol.
|
||||
It was therefore necessary for NomosDA to be implemented as an opt-in Service Network.
|
||||
|
||||
Because the NomosDA network has an arbitrary amount of participants,
|
||||
and the data is split into a fixed number of portions (see the [Encoding & Verification Specification](https://www.notion.so/NomosDA-Encoding-Verification-4d8ca269e96d4fdcb05abc70426c5e7c)),
|
||||
it was necessary to define exactly how each portion is assigned to a participant who will receive and verify it.
|
||||
This assignment algorithm must also be flexible enough to ensure smooth operation in a variety of scenarios,
|
||||
including where there are more or fewer participants than the number of portions.
|
||||
|
||||
## Overview
|
||||
|
||||
### Network Participants
|
||||
|
||||
The NomosDA network includes three categories of participants:
|
||||
|
||||
- **Executors**: Tasked with the encoding and dispersal of data blobs.
|
||||
- **DA Nodes**: Receive and verify the encoded data,
|
||||
subsequently temporarily storing it for further network validation through sampling.
|
||||
- **Light Nodes**: Employ sampling to ascertain data availability.
|
||||
|
||||
### Network Distribution
|
||||
|
||||
The NomosDA network is segmented into `num_subnets` subnetworks.
|
||||
These subnetworks represent subsets of peers from the overarching network,
|
||||
each responsible for a distinct portion of the distributed encoded data.
|
||||
Peers in the network may engage in one or multiple subnetworks,
|
||||
contingent upon network size and participant count.
|
||||
|
||||
### Sub-protocols
|
||||
|
||||
The NomosDA protocol consists of the following sub-protocols:
|
||||
|
||||
- **Dispersal**: Describes how executors distribute encoded data blobs to subnetworks.
|
||||
[NomosDA Dispersal](https://www.notion.so/NomosDA-Dispersal-1818f96fb65c805ca257cb14798f24d4?pvs=21)
|
||||
- **Replication**: Defines how DA nodes distribute encoded data blobs within subnetworks.
|
||||
[NomosDA Subnetwork Replication](https://www.notion.so/NomosDA-Subnetwork-Replication-1818f96fb65c80119fa0e958a087cc2b?pvs=21)
|
||||
- **Sampling**: Used by sampling clients (e.g., light clients) to verify the availability of previously dispersed
|
||||
and replicated data.
|
||||
[NomosDA Sampling](https://www.notion.so/NomosDA-Sampling-1538f96fb65c8031a44cf7305d271779?pvs=21)
|
||||
- **Reconstruction**: Describes gathering and decoding dispersed data back into its original form.
|
||||
[NomosDA Reconstruction](https://www.notion.so/NomosDA-Reconstruction-1828f96fb65c80b2bbb9f4c5a0cf26a5?pvs=21)
|
||||
- **Indexing**: Tracks and exposes blob metadata on-chain.
|
||||
[NomosDA Indexing](https://www.notion.so/NomosDA-Indexing-1bb8f96fb65c8044b635da9df20c2411?pvs=21)
|
||||
|
||||
## Construction
|
||||
|
||||
### NomosDA Network Registration
|
||||
|
||||
Entities wishing to participate in NomosDA must declare their role via [SDP](https://www.notion.so/Final-Draft-Validator-Role-Protocol-17b8f96fb65c80c69c2ef55e22e29506) (Service Declaration Protocol).
|
||||
Once declared, they're accounted for in the subnetwork construction.
|
||||
|
||||
This enables participation in:
|
||||
|
||||
- Dispersal (as executor)
|
||||
- Replication & sampling (as DA node)
|
||||
- Sampling (as light node)
|
||||
|
||||
### Subnetwork Assignment
|
||||
|
||||
The NomosDA network comprises `num_subnets` subnetworks,
|
||||
which are virtual in nature.
|
||||
A subnetwork is a subset of peers grouped together so nodes know who they should connect with,
|
||||
serving as groupings of peers tasked with executing the dispersal and replication sub-protocols.
|
||||
In each subnetwork, participants establish a fully connected overlay,
|
||||
ensuring all nodes maintain permanent connections for the lifetime of the SDP set
|
||||
with peers within the same subnetwork.
|
||||
Nodes refer to nodes in the Data Availability SDP set to ascertain their connectivity requirements across subnetworks.
|
||||
|
||||
#### Assignment Algorithm
|
||||
|
||||
The concrete distribution algorithm is described in the following specification:
|
||||
[DA Subnetwork Assignation](https://www.notion.so/DA-Subnetwork-Assignation-217261aa09df80fc8bb9cf46092741ce)
|
||||
|
||||
## Executor Connections
|
||||
|
||||
Each executor maintains a connection with one peer per subnetwork,
|
||||
necessitating at least num_subnets stable and healthy connections.
|
||||
Executors are expected to allocate adequate resources to sustain these connections.
|
||||
An example algorithm for peer selection would be:
|
||||
|
||||
```python
|
||||
def select_peers(
|
||||
subnetworks: Sequence[Set[PeerId]],
|
||||
filtered_subnetworks: Set[int],
|
||||
filtered_peers: Set[PeerId]
|
||||
) -> Set[PeerId]:
|
||||
result = set()
|
||||
for i, subnetwork in enumerate(subnetworks):
|
||||
available_peers = subnetwork - filtered_peers
|
||||
if i not in filtered_subnetworks and available_peers:
|
||||
result.add(next(iter(available_peers)))
|
||||
return result
|
||||
```
|
||||
|
||||
## NomosDA Protocol Steps
|
||||
|
||||
### Dispersal
|
||||
|
||||
1. The NomosDA protocol is initiated by executors
|
||||
who perform data encoding as outlined in the [Encoding Specification](https://www.notion.so/NomosDA-Encoding-Verification-4d8ca269e96d4fdcb05abc70426c5e7c).
|
||||
2. Executors prepare and distribute each encoded data portion
|
||||
to its designated subnetwork (from `0` to `num_subnets - 1` ).
|
||||
3. Executors might opt to perform sampling to confirm successful dispersal.
|
||||
4. Post-dispersal, executors publish the dispersed `blob_id` and metadata to the mempool. <!-- TODO: add link to dispersal document-->
|
||||
|
||||
### Replication
|
||||
|
||||
DA nodes receive columns from dispersal or replication
|
||||
and validate the data encoding.
|
||||
Upon successful validation,
|
||||
they replicate the validated column to connected peers within their subnetwork.
|
||||
Replication occurs once per blob; subsequent validations of the same blob are discarded.
|
||||
|
||||
### Sampling
|
||||
|
||||
1. Sampling is [invoked based on the node's current role](https://www.notion.so/1538f96fb65c8031a44cf7305d271779?pvs=25#15e8f96fb65c8006b9d7f12ffdd9a159).
|
||||
2. The node selects `sample_size` random subnetworks
|
||||
and queries each for the availability of the corresponding column for the sampled blob. Sampling is deemed successful only if all queried subnetworks respond affirmatively.
|
||||
|
||||
- If `num_subnets` is 2048, `sample_size` is [20 as per the sampling research](https://www.notion.so/1708f96fb65c80a08c97d728cb8476c3?pvs=25#1708f96fb65c80bab6f9c6a946940078)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
SamplingClient ->> DANode_1: Request
|
||||
DANode_1 -->> SamplingClient: Response
|
||||
SamplingClient ->>DANode_2: Request
|
||||
DANode_2 -->> SamplingClient: Response
|
||||
SamplingClient ->> DANode_n: Request
|
||||
DANode_n -->> SamplingClient: Response
|
||||
```
|
||||
|
||||
### Network Schematics
|
||||
|
||||
The overall network and protocol interactions is represented by the following diagram
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Replication
|
||||
subgraph Subnetwork_N
|
||||
N10 -->|Replicate| N20
|
||||
N20 -->|Replicate| N30
|
||||
N30 -->|Replicate| N10
|
||||
end
|
||||
subgraph ...
|
||||
end
|
||||
subgraph Subnetwork_0
|
||||
N1 -->|Replicate| N2
|
||||
N2 -->|Replicate| N3
|
||||
N3 -->|Replicate| N1
|
||||
end
|
||||
end
|
||||
subgraph Sampling
|
||||
N9 -->|Sample 0| N2
|
||||
N9 -->|Sample S| N20
|
||||
end
|
||||
subgraph Dispersal
|
||||
Executor -->|Disperse| N1
|
||||
Executor -->|Disperse| N10
|
||||
end
|
||||
```
|
||||
|
||||
## Details
|
||||
|
||||
### Network specifics
|
||||
|
||||
The NomosDA network is engineered for connection efficiency.
|
||||
Executors manage numerous open connections,
|
||||
utilizing their resource capabilities.
|
||||
DA nodes, with their resource constraints,
|
||||
are designed to maximize connection reuse.
|
||||
|
||||
NomosDA uses [multiplexed](https://docs.libp2p.io/concepts/transports/quic/#quic-native-multiplexing) streams over [QUIC](https://docs.libp2p.io/concepts/transports/quic/) connections.
|
||||
For each sub-protocol, a stream protocol ID is defined to negotiate the protocol,
|
||||
triggering the specific protocol once established:
|
||||
|
||||
- Dispersal: /nomos/da/{version}/dispersal
|
||||
- Replication: /nomos/da/{version}/replication
|
||||
- Sampling: /nomos/da/{version}/sampling
|
||||
|
||||
Through these multiplexed streams,
|
||||
DA nodes can utilize the same connection for all sub-protocols.
|
||||
This, combined with virtual subnetworks (membership sets),
|
||||
ensures the overlay node distribution is scalable for networks of any size.
|
||||
|
||||
## References
|
||||
|
||||
- [Encoding Specification](https://www.notion.so/NomosDA-Encoding-Verification-4d8ca269e96d4fdcb05abc70426c5e7c)
|
||||
- [Encoding & Verification Specification](https://www.notion.so/NomosDA-Encoding-Verification-4d8ca269e96d4fdcb05abc70426c5e7c)
|
||||
- [NomosDA Dispersal](https://www.notion.so/NomosDA-Dispersal-1818f96fb65c805ca257cb14798f24d4?pvs=21)
|
||||
- [NomosDA Subnetwork Replication](https://www.notion.so/NomosDA-Subnetwork-Replication-1818f96fb65c80119fa0e958a087cc2b?pvs=21)
|
||||
- [DA Subnetwork Assignation](https://www.notion.so/DA-Subnetwork-Assignation-217261aa09df80fc8bb9cf46092741ce)
|
||||
- [NomosDA Sampling](https://www.notion.so/NomosDA-Sampling-1538f96fb65c8031a44cf7305d271779?pvs=21)
|
||||
- [NomosDA Reconstruction](https://www.notion.so/NomosDA-Reconstruction-1828f96fb65c80b2bbb9f4c5a0cf26a5?pvs=21)
|
||||
- [NomosDA Indexing](https://www.notion.so/NomosDA-Indexing-1bb8f96fb65c8044b635da9df20c2411?pvs=21)
|
||||
- [SDP](https://www.notion.so/Final-Draft-Validator-Role-Protocol-17b8f96fb65c80c69c2ef55e22e29506)
|
||||
- [invoked based on the node's current role](https://www.notion.so/1538f96fb65c8031a44cf7305d271779?pvs=25#15e8f96fb65c8006b9d7f12ffdd9a159)
|
||||
- [20 as per the sampling research](https://www.notion.so/1708f96fb65c80a08c97d728cb8476c3?pvs=25#1708f96fb65c80bab6f9c6a946940078)
|
||||
- [multiplexed](https://docs.libp2p.io/concepts/transports/quic/#quic-native-multiplexing)
|
||||
- [QUIC](https://docs.libp2p.io/concepts/transports/quic/)
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,249 +0,0 @@
|
||||
# P2P-HARDWARE-REQUIREMENTS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos p2p Network Hardware Requirements Specification |
|
||||
| Slug | 137 |
|
||||
| Status | raw |
|
||||
| Category | infrastructure |
|
||||
| Editor | Daniel Sanchez-Quiros <danielsq@status.im> |
|
||||
| Contributors | Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/p2p-hardware-requirements.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/p2p-hardware-requirements.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/nomos/raw/p2p-hardware-requirements.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/nomos/raw/p2p-hardware-requirements.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/nomos/raw/p2p-hardware-requirements.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-25** — [`34bbd7a`](https://github.com/logos-co/logos-lips/blob/34bbd7af90df4baefd6dfeb89c625e846b2db14a/nomos/raw/p2p-hardware-requirements.md) — Created nomos/raw/hardware-requirements.md file (#172)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the hardware requirements for running various types of Nomos blockchain nodes. Hardware needs vary significantly based on the node's role, from lightweight verification nodes to high-performance Zone Executors. The requirements are designed to support diverse participation levels while ensuring network security and performance.
|
||||
|
||||
## Motivation
|
||||
|
||||
The Nomos network is designed to be inclusive and accessible across a wide range of hardware configurations. By defining clear hardware requirements for different node types, we enable:
|
||||
|
||||
1. **Inclusive Participation**: Allow users with limited resources to participate as Light Nodes
|
||||
2. **Scalable Infrastructure**: Support varying levels of network participation based on available resources
|
||||
3. **Performance Optimization**: Ensure adequate resources for computationally intensive operations
|
||||
4. **Network Security**: Maintain network integrity through properly resourced validator nodes
|
||||
5. **Service Quality**: Define requirements for optional services that enhance network functionality
|
||||
|
||||
**Important Notice**: These hardware requirements are preliminary and subject to revision based on implementation testing and real-world network performance data.
|
||||
|
||||
## Specification
|
||||
|
||||
### Node Types Overview
|
||||
|
||||
Hardware requirements vary based on the node's role and services:
|
||||
|
||||
- **Light Node**: Minimal verification with minimal resources
|
||||
- **Basic Bedrock Node**: Standard validation participation
|
||||
- **Service Nodes**: Enhanced capabilities for optional network services
|
||||
|
||||
### Light Node
|
||||
|
||||
Light Nodes provide network verification with minimal resource requirements, suitable for resource-constrained environments.
|
||||
|
||||
**Target Use Cases:**
|
||||
|
||||
- Mobile devices and smartphones
|
||||
- Single-board computers (Raspberry Pi, etc.)
|
||||
- IoT devices with network connectivity
|
||||
- Users with limited hardware resources
|
||||
|
||||
**Hardware Requirements:**
|
||||
|
||||
| Component | Specification |
|
||||
|-----------|---------------|
|
||||
| **CPU** | Low-power processor (smartphone/SBC capable) |
|
||||
| **Memory (RAM)** | 512 MB |
|
||||
| **Storage** | Minimal (few GB) |
|
||||
| **Network** | Reliable connection, 1 Mbps free bandwidth |
|
||||
|
||||
### Basic Bedrock Node (Validator)
|
||||
|
||||
Basic validators participate in Bedrock consensus using typical consumer hardware.
|
||||
|
||||
**Target Use Cases:**
|
||||
|
||||
- Individual validators on consumer hardware
|
||||
- Small-scale validation operations
|
||||
- Entry-level network participation
|
||||
|
||||
**Hardware Requirements:**
|
||||
|
||||
| Component | Specification |
|
||||
|-----------|---------------|
|
||||
| **CPU** | 2 cores, 2 GHz modern multi-core processor |
|
||||
| **Memory (RAM)** | 1 GB minimum |
|
||||
| **Storage** | SSD with 100+ GB free space, expandable |
|
||||
| **Network** | Reliable connection, 1 Mbps free bandwidth |
|
||||
|
||||
### Service-Specific Requirements
|
||||
|
||||
Nodes can optionally run additional Bedrock Services that require enhanced resources beyond basic validation.
|
||||
|
||||
#### Data Availability (DA) Service
|
||||
|
||||
DA Service nodes store and serve data shares for the network's data availability layer.
|
||||
|
||||
**Service Role:**
|
||||
|
||||
- Store blockchain data and blob data long-term
|
||||
- Serve data shares to requesting nodes
|
||||
- Maintain high availability for data retrieval
|
||||
|
||||
**Additional Requirements:**
|
||||
|
||||
| Component | Specification | Rationale |
|
||||
|-----------|---------------|-----------|
|
||||
| **CPU** | Same as Basic Bedrock Node | Standard processing needs |
|
||||
| **Memory (RAM)** | Same as Basic Bedrock Node | Standard memory needs |
|
||||
| **Storage** | **Fast SSD, 500+ GB free** | Long-term chain and blob storage |
|
||||
| **Network** | **High bandwidth (10+ Mbps)** | Concurrent data serving |
|
||||
| **Connectivity** | **Stable, accessible external IP** | Direct peer connections |
|
||||
|
||||
**Network Requirements:**
|
||||
|
||||
- Capacity to handle multiple concurrent connections
|
||||
- Stable external IP address for direct peer access
|
||||
- Low latency for efficient data serving
|
||||
|
||||
#### Blend Protocol Service
|
||||
|
||||
Blend Protocol nodes provide anonymous message routing capabilities.
|
||||
|
||||
**Service Role:**
|
||||
|
||||
- Route messages anonymously through the network
|
||||
- Provide timing obfuscation for privacy
|
||||
- Maintain multiple concurrent connections
|
||||
|
||||
**Additional Requirements:**
|
||||
|
||||
| Component | Specification | Rationale |
|
||||
|-----------|---------------|-----------|
|
||||
| **CPU** | Same as Basic Bedrock Node | Standard processing needs |
|
||||
| **Memory (RAM)** | Same as Basic Bedrock Node | Standard memory needs |
|
||||
| **Storage** | Same as Basic Bedrock Node | Standard storage needs |
|
||||
| **Network** | **Stable connection (10+ Mbps)** | Multiple concurrent connections |
|
||||
| **Connectivity** | **Stable, accessible external IP** | Direct peer connections |
|
||||
|
||||
**Network Requirements:**
|
||||
|
||||
- Low-latency connection for effective message blending
|
||||
- Stable connection for timing obfuscation
|
||||
- Capability to handle multiple simultaneous connections
|
||||
|
||||
#### Executor Network Service
|
||||
|
||||
Zone Executors perform the most computationally intensive work in the network.
|
||||
|
||||
**Service Role:**
|
||||
|
||||
- Execute Zone state transitions
|
||||
- Generate zero-knowledge proofs
|
||||
- Process complex computational workloads
|
||||
|
||||
**Critical Performance Note**: Zone Executors perform the heaviest computational work in the network. High-performance hardware is crucial for effective participation and may provide competitive advantages in execution markets.
|
||||
|
||||
**Hardware Requirements:**
|
||||
|
||||
| Component | Specification | Rationale |
|
||||
|-----------|---------------|-----------|
|
||||
| **CPU** | **Very high-performance multi-core processor** | Zone logic execution and ZK proving |
|
||||
| **Memory (RAM)** | **32+ GB strongly recommended** | Complex Zone execution requirements |
|
||||
| **Storage** | Same as Basic Bedrock Node | Standard storage needs |
|
||||
| **GPU** | **Highly recommended/often necessary** | Efficient ZK proof generation |
|
||||
| **Network** | **High bandwidth (10+ Mbps)** | Data dispersal and high connection load |
|
||||
|
||||
**GPU Requirements:**
|
||||
|
||||
- **NVIDIA**: CUDA-enabled GPU (RTX 3090 or equivalent recommended)
|
||||
- **Apple**: Metal-compatible Apple Silicon
|
||||
- **Performance Impact**: Strong GPU significantly reduces proving time
|
||||
|
||||
**Network Requirements:**
|
||||
|
||||
- Support for **2048+ direct UDP connections** to DA Nodes (for blob publishing)
|
||||
- High bandwidth for data dispersal operations
|
||||
- Stable connection for continuous operation
|
||||
|
||||
*Note: DA Nodes utilizing [libp2p](https://docs.libp2p.io/) connections need sufficient capacity to receive and serve data shares over many connections.*
|
||||
|
||||
## Implementation Requirements
|
||||
|
||||
### Minimum Requirements
|
||||
|
||||
All Nomos nodes MUST meet:
|
||||
|
||||
1. **Basic connectivity** to the Nomos network via [libp2p](https://docs.libp2p.io/)
|
||||
2. **Adequate storage** for their designated role
|
||||
3. **Sufficient processing power** for their service level
|
||||
4. **Reliable network connection** with appropriate bandwidth for [QUIC](https://docs.libp2p.io/concepts/transports/quic/) transport
|
||||
|
||||
### Optional Enhancements
|
||||
|
||||
Node operators MAY implement:
|
||||
|
||||
- Hardware redundancy for critical services
|
||||
- Enhanced cooling for high-performance configurations
|
||||
- Dedicated network connections for service nodes utilizing [libp2p](https://docs.libp2p.io/) protocols
|
||||
- Backup power systems for continuous operation
|
||||
|
||||
### Resource Scaling
|
||||
|
||||
Requirements may vary based on:
|
||||
|
||||
- **Network Load**: Higher network activity increases resource demands
|
||||
- **Zone Complexity**: More complex Zones require additional computational resources
|
||||
- **Service Combinations**: Running multiple services simultaneously increases requirements
|
||||
- **Geographic Location**: Network latency affects optimal performance requirements
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Hardware Security
|
||||
|
||||
1. **Secure Storage**: Use encrypted storage for sensitive node data
|
||||
2. **Network Security**: Implement proper firewall configurations
|
||||
3. **Physical Security**: Secure physical access to node hardware
|
||||
4. **Backup Strategies**: Maintain secure backups of critical data
|
||||
|
||||
### Performance Security
|
||||
|
||||
1. **Resource Monitoring**: Monitor resource usage to detect anomalies
|
||||
2. **Redundancy**: Plan for hardware failures in critical services
|
||||
3. **Isolation**: Consider containerization or virtualization for service isolation
|
||||
4. **Update Management**: Maintain secure update procedures for hardware drivers
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Scalability
|
||||
|
||||
- **Light Nodes**: Minimal resource footprint, high scalability
|
||||
- **Validators**: Moderate resource usage, network-dependent scaling
|
||||
- **Service Nodes**: High resource usage, specialized scaling requirements
|
||||
|
||||
### Resource Efficiency
|
||||
|
||||
- **CPU Usage**: Optimized algorithms for different hardware tiers
|
||||
- **Memory Usage**: Efficient data structures for constrained environments
|
||||
- **Storage Usage**: Configurable retention policies and compression
|
||||
- **Network Usage**: Adaptive bandwidth utilization based on [libp2p](https://docs.libp2p.io/) capacity and [QUIC](https://docs.libp2p.io/concepts/transports/quic/) connection efficiency
|
||||
|
||||
## References
|
||||
|
||||
1. [libp2p protocol](https://docs.libp2p.io/)
|
||||
2. [QUIC protocol](https://docs.libp2p.io/concepts/transports/quic/)
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,385 +0,0 @@
|
||||
# P2P-NAT-SOLUTION
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos P2P Network NAT Solution Specification |
|
||||
| Slug | 138 |
|
||||
| Status | raw |
|
||||
| Category | networking |
|
||||
| Editor | Antonio Antonino <antonio@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Daniel Sanchez-Quiros <danielsq@status.im>, Petar Radovic <petar@status.im>, Gusto Bacvinka <augustinas@status.im>, Youngjoon Lee <youngjoon@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/p2p-nat-solution.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/p2p-nat-solution.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/nomos/raw/p2p-nat-solution.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/nomos/raw/p2p-nat-solution.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/nomos/raw/p2p-nat-solution.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-25** — [`cfb3b78`](https://github.com/logos-co/logos-lips/blob/cfb3b78c71ed75f7859299c38704b809f3e33613/nomos/raw/p2p-nat-solution.md) — Created nomos/raw/p2p-nat-solution.md draft (#174)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines a comprehensive NAT (Network Address Translation) traversal solution for the Nomos P2P network. The solution enables nodes to automatically determine their NAT status and establish both outbound and inbound connections regardless of network configuration. The strategy combines [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md), dynamic port mapping protocols, and continuous verification to maximize public reachability while maintaining decentralized operation.
|
||||
|
||||
## Motivation
|
||||
|
||||
Network Address Translation presents a critical challenge for Nomos participants, particularly those operating on consumer hardware without technical expertise. The Nomos network requires a NAT traversal solution that:
|
||||
|
||||
1. **Automatic Operation**: Works out-of-the-box without user configuration
|
||||
2. **Inclusive Participation**: Enables nodes on consumer hardware to participate effectively
|
||||
3. **Decentralized Approach**: Leverages the existing Nomos P2P network rather than centralized services
|
||||
4. **Progressive Fallback**: Escalates through increasingly complex protocols as needed
|
||||
5. **Dynamic Adaptation**: Handles changing network environments and configurations
|
||||
|
||||
The solution must ensure that nodes can both establish outbound connections and accept inbound connections from other peers, maintaining network connectivity across diverse NAT configurations.
|
||||
|
||||
## Specification
|
||||
|
||||
### Terminology
|
||||
|
||||
- **Public Node**: A node that is publicly reachable via a public IP address or valid port mapping
|
||||
- **Private Node**: A node that is not publicly reachable due to NAT/firewall restrictions
|
||||
- **Dialing**: The process of establishing a connection using the [libp2p protocol](https://docs.libp2p.io/) stack
|
||||
- **NAT Status**: Whether a node is publicly reachable or hidden behind NAT
|
||||
|
||||
### Key Design Principles
|
||||
|
||||
#### Optional Configuration
|
||||
|
||||
The NAT traversal strategy must work out-of-the-box whenever possible. Users who do not want to engage in configuration should only need to install the node software package. However, users requiring full control must be able to configure every aspect of the strategy.
|
||||
|
||||
#### Decentralized Operation
|
||||
|
||||
The solution leverages the existing Nomos P2P network for coordination rather than relying on centralized third-party services. This maintains the decentralized nature of the network while providing necessary NAT traversal capabilities.
|
||||
|
||||
#### Progressive Fallback
|
||||
|
||||
The protocol begins with lightweight checks and escalates through more complex and resource-intensive protocols. Failure at any step moves the protocol to the next stage in the strategy, ensuring maximum compatibility across network configurations.
|
||||
|
||||
#### Dynamic Network Environment
|
||||
|
||||
Unless explicitly configured for static addresses, each node's public or private status is assumed to be dynamic. A once publicly-reachable node can become unreachable and vice versa, requiring continuous monitoring and adaptation.
|
||||
|
||||
### Node Discovery Considerations
|
||||
|
||||
The Nomos public network encourages participation from a large number of nodes, many deployed through simple installation procedures. Some nodes will not achieve Public status, but the discovery protocol must track these peers and allow other nodes to discover them. This prevents network partitioning and ensures Private nodes remain accessible to other participants.
|
||||
|
||||
### NAT Traversal Protocol
|
||||
|
||||
#### Protocol Requirements
|
||||
|
||||
**Each node MUST:**
|
||||
|
||||
- Run an [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) client, except for nodes statically configured as Public
|
||||
- Use the [Identify protocol](https://github.com/libp2p/specs/blob/master/identify/README.md) to advertise support for:
|
||||
- `/nomos/autonat/2/dial-request` for main network
|
||||
- `/nomos-testnet/autonat/2/dial-request` for public testnet
|
||||
- `/nomos/autonat/2/dial-back` and `/nomos-testnet/autonat/2/dial-back` respectively
|
||||
|
||||
#### NAT State Machine
|
||||
|
||||
The NAT traversal process follows a multi-phase state machine:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
Start@{shape: circle, label: "Start"} -->|Preconfigured public IP or port mapping| StaticPublic[Statically configured as<br/>**Public**]
|
||||
subgraph Phase0 [Phase 0]
|
||||
Start -->|Default configuration| Boot
|
||||
end
|
||||
subgraph Phase1 [Phase 1]
|
||||
Boot[Bootstrap and discover AutoNAT servers]--> Inspect
|
||||
Inspect[Inspect own IP addresses]-->|At least 1 IP address in the public range| ConfirmPublic[AutoNAT]
|
||||
end
|
||||
subgraph Phase2 [Phase 2]
|
||||
Inspect -->|No IP addresses in the public range| MapPorts[Port Mapping Client<br/>UPnP/NAT-PMP/PCP]
|
||||
MapPorts -->|Successful port map| ConfirmMapPorts[AutoNAT]
|
||||
end
|
||||
ConfirmPublic -->|Node's IP address reachable by AutoNAT server| Public[**Public** Node]
|
||||
ConfirmPublic -->|Node's IP address not reachable by AutoNAT server or Timeout| MapPorts
|
||||
ConfirmMapPorts -->|Mapped IP address and port reachable by AutoNAT server| Public
|
||||
ConfirmMapPorts -->|Mapped IP address and port not reachable by AutoNAT server or Timeout| Private
|
||||
MapPorts -->|Failure or Timeout| Private[**Private** Node]
|
||||
subgraph Phase3 [Phase 3]
|
||||
Public -->Monitor
|
||||
Private --> Monitor
|
||||
end
|
||||
Monitor[Network Monitoring] -->|Restart| Inspect
|
||||
```
|
||||
|
||||
### Phase Implementation
|
||||
|
||||
#### Phase 0: Bootstrapping and Identifying Public Nodes
|
||||
|
||||
If the node is statically configured by the operator to be Public, the procedure stops here.
|
||||
|
||||
The node utilizes bootstrapping and discovery mechanisms to find other Public nodes. The [Identify protocol](https://github.com/libp2p/specs/blob/master/identify/README.md) confirms which detected Public nodes support [AutoNAT v2](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md).
|
||||
|
||||
#### Phase 1: NAT Detection
|
||||
|
||||
The node starts an [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) client and inspects its own addresses. For each public IP address, the node verifies public reachability via [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md). If any public IP addresses are confirmed, the node assumes Public status and moves to Phase 3. Otherwise, it continues to Phase 2.
|
||||
|
||||
#### Phase 2: Automated Port Mapping
|
||||
|
||||
The node attempts to secure port mapping on the default gateway using:
|
||||
|
||||
- **[PCP](https://datatracker.ietf.org/doc/html/rfc6887)** (Port Control Protocol) - Most reliable
|
||||
- **[NAT-PMP](https://datatracker.ietf.org/doc/html/rfc6886)** (NAT Port Mapping Protocol) - Second most reliable
|
||||
- **[UPnP-IGD](https://datatracker.ietf.org/doc/html/rfc6970)** (Universal Plug and Play Internet Gateway Device) - Most widely deployed
|
||||
|
||||
**Port Mapping Algorithm:**
|
||||
|
||||
```python
|
||||
def try_port_mapping():
|
||||
# Step 1: Get the local IPv4 address
|
||||
local_ip = get_local_ipv4_address()
|
||||
|
||||
# Step 2: Get the default gateway IPv4 address
|
||||
gateway_ip = get_default_gateway_address()
|
||||
|
||||
# Step 3: Abort if local or gateway IP could not be determined
|
||||
if not local_ip or not gateway_ip:
|
||||
return "Mapping failed: Unable to get local or gateway IPv4"
|
||||
|
||||
# Step 4: Probe the gateway for protocol support
|
||||
supports_pcp = probe_pcp(gateway_ip)
|
||||
supports_nat_pmp = probe_nat_pmp(gateway_ip)
|
||||
supports_upnp = probe_upnp(gateway_ip) # Optional for logging
|
||||
|
||||
# Step 5-9: Try protocols in order of reliability
|
||||
# PCP (most reliable) -> NAT-PMP -> UPnP -> fallback attempts
|
||||
|
||||
protocols = [
|
||||
(supports_pcp, try_pcp_mapping),
|
||||
(supports_nat_pmp, try_nat_pmp_mapping),
|
||||
(True, try_upnp_mapping), # Always try UPnP
|
||||
(not supports_pcp, try_pcp_mapping), # Fallback
|
||||
(not supports_nat_pmp, try_nat_pmp_mapping) # Last resort
|
||||
]
|
||||
|
||||
for supported, mapping_func in protocols:
|
||||
if supported:
|
||||
mapping = mapping_func(local_ip, gateway_ip)
|
||||
if mapping:
|
||||
return mapping
|
||||
|
||||
return "Mapping failed: No protocol succeeded"
|
||||
```
|
||||
|
||||
If mapping succeeds, the node uses [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) to confirm public reachability. Upon confirmation, the node assumes Public status. Otherwise, it assumes Private status.
|
||||
|
||||
**Port Mapping Sequence:**
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
box Node
|
||||
participant AutoNAT Client
|
||||
participant NAT State Machine
|
||||
participant Port Mapping Client
|
||||
end
|
||||
participant Router
|
||||
|
||||
alt Mapping is successful
|
||||
Note left of AutoNAT Client: Phase 2
|
||||
Port Mapping Client ->> +Router: Requests new mapping
|
||||
Router ->> Port Mapping Client: Confirms new mapping
|
||||
Port Mapping Client ->> NAT State Machine: Mapping secured
|
||||
NAT State Machine ->> AutoNAT Client: Requests confirmation<br/>that mapped address<br/>is publicly reachable
|
||||
|
||||
alt Node asserts Public status
|
||||
AutoNAT Client ->> NAT State Machine: Mapped address<br/>is publicly reachable
|
||||
Note left of AutoNAT Client: Phase 3<br/>Network Monitoring
|
||||
else Node asserts Private status
|
||||
AutoNAT Client ->> NAT State Machine: Mapped address<br/>is not publicly reachable
|
||||
Note left of AutoNAT Client: Phase 3<br/>Network Monitoring
|
||||
end
|
||||
else Mapping fails, node asserts Private status
|
||||
Note left of AutoNAT Client: Phase 2
|
||||
Port Mapping Client ->> Router: Requests new mapping
|
||||
Router ->> Port Mapping Client: Refuses new mapping or Timeout
|
||||
Port Mapping Client ->> NAT State Machine: Mapping failed
|
||||
Note left of AutoNAT Client: Phase 3<br/>Network Monitoring
|
||||
end
|
||||
```
|
||||
|
||||
#### Phase 3: Network Monitoring
|
||||
|
||||
Unless explicitly configured, nodes must monitor their network status and restart from Phase 1 when changes are detected.
|
||||
|
||||
**Public Node Monitoring:**
|
||||
|
||||
A Public node must restart when:
|
||||
|
||||
- [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) client no longer confirms public reachability
|
||||
- A previously successful port mapping is lost or refresh fails
|
||||
|
||||
**Private Node Monitoring:**
|
||||
|
||||
A Private node must restart when:
|
||||
|
||||
- It gains a new public IP address
|
||||
- Port mapping is likely to succeed (gateway change, sufficient time passed)
|
||||
|
||||
**Network Monitoring Sequence:**
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant AutoNAT Server
|
||||
box Node
|
||||
participant AutoNAT Client
|
||||
participant NAT State Machine
|
||||
participant Port Mapping Client
|
||||
end
|
||||
participant Router
|
||||
|
||||
Note left of AutoNAT Server: Phase 3<br/>Network Monitoring
|
||||
par Refresh mapping and monitor changes
|
||||
loop periodically refreshes mapping
|
||||
Port Mapping Client ->> Router: Requests refresh
|
||||
Router ->> Port Mapping Client: Confirms mapping refresh
|
||||
end
|
||||
break Mapping is lost, the node loses Public status
|
||||
Router ->> Port Mapping Client: Refresh failed or mapping dropped
|
||||
Port Mapping Client ->> NAT State Machine: Mapping lost
|
||||
NAT State Machine ->> NAT State Machine: Restart
|
||||
end
|
||||
and Monitor public reachability of mapped addresses
|
||||
loop periodically checks public reachability
|
||||
AutoNAT Client ->> AutoNAT Server: Requests dialback
|
||||
AutoNAT Server ->> AutoNAT Client: Dialback successful
|
||||
end
|
||||
break
|
||||
AutoNAT Server ->> AutoNAT Client: Dialback failed or Timeout
|
||||
AutoNAT Client ->> NAT State Machine: Public reachability lost
|
||||
NAT State Machine ->> NAT State Machine: Restart
|
||||
end
|
||||
end
|
||||
Note left of AutoNAT Server: Phase 1
|
||||
```
|
||||
|
||||
### Public Node Responsibilities
|
||||
|
||||
**A Public node MUST:**
|
||||
|
||||
- Run an [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) server
|
||||
- Listen on and advertise via [Identify protocol](https://github.com/libp2p/specs/blob/master/identify/README.md) its publicly reachable [multiaddresses](https://github.com/libp2p/specs/blob/master/addressing/README.md):
|
||||
|
||||
`/{public_peer_ip}/udp/{port}/quic-v1/p2p/{public_peer_id}`
|
||||
|
||||
- Periodically renew port mappings according to protocol recommendations
|
||||
- Maintain high availability for [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) services
|
||||
|
||||
### Peer Dialing
|
||||
|
||||
Other peers can always dial a Public peer using its publicly reachable [multiaddresses](https://github.com/libp2p/specs/blob/master/addressing/README.md):
|
||||
|
||||
`/{public_peer_ip}/udp/{port}/quic-v1/p2p/{public_peer_id}`
|
||||
|
||||
## Implementation Requirements
|
||||
|
||||
### Mandatory Components
|
||||
|
||||
All Nomos nodes MUST implement:
|
||||
|
||||
1. **[AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) client** for NAT status detection
|
||||
2. **Port mapping clients** for [PCP](https://datatracker.ietf.org/doc/html/rfc6887), [NAT-PMP](https://datatracker.ietf.org/doc/html/rfc6886), and [UPnP-IGD](https://datatracker.ietf.org/doc/html/rfc6970)
|
||||
3. **[Identify protocol](https://github.com/libp2p/specs/blob/master/identify/README.md)** for capability advertisement
|
||||
4. **Network monitoring** for status change detection
|
||||
|
||||
### Optional Enhancements
|
||||
|
||||
Nodes MAY implement:
|
||||
|
||||
- Custom port mapping retry strategies
|
||||
- Enhanced network change detection
|
||||
- Advanced [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) server load balancing
|
||||
- Backup connectivity mechanisms
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
#### [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) Configuration
|
||||
|
||||
```yaml
|
||||
autonat:
|
||||
client:
|
||||
dial_timeout: 15s
|
||||
max_peer_addresses: 16
|
||||
throttle_global_limit: 30
|
||||
throttle_peer_limit: 3
|
||||
server:
|
||||
dial_timeout: 30s
|
||||
max_peer_addresses: 16
|
||||
throttle_global_limit: 30
|
||||
throttle_peer_limit: 3
|
||||
```
|
||||
|
||||
#### Port Mapping Configuration
|
||||
|
||||
```yaml
|
||||
port_mapping:
|
||||
pcp:
|
||||
timeout: 30s
|
||||
lifetime: 7200s # 2 hours
|
||||
retry_interval: 300s
|
||||
nat_pmp:
|
||||
timeout: 30s
|
||||
lifetime: 7200s
|
||||
retry_interval: 300s
|
||||
upnp:
|
||||
timeout: 30s
|
||||
lease_duration: 7200s
|
||||
retry_interval: 300s
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### NAT Traversal Security
|
||||
|
||||
1. **Port Mapping Validation**: Verify that requested port mappings are actually created
|
||||
2. **[AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) Server Trust**: Implement peer reputation for [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) servers
|
||||
3. **Gateway Communication**: Secure communication with NAT devices
|
||||
4. **Address Validation**: Validate public addresses before advertisement
|
||||
|
||||
### Privacy Considerations
|
||||
|
||||
1. **IP Address Exposure**: Public nodes necessarily expose IP addresses
|
||||
2. **Traffic Analysis**: Monitor for patterns that could reveal node behavior
|
||||
3. **Gateway Information**: Minimize exposure of internal network topology
|
||||
|
||||
### Denial of Service Protection
|
||||
|
||||
1. **[AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) Rate Limiting**: Implement request throttling for [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) services
|
||||
2. **Port Mapping Abuse**: Prevent excessive port mapping requests
|
||||
3. **Resource Exhaustion**: Limit concurrent NAT traversal attempts
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Scalability
|
||||
|
||||
- **[AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md) Server Load**: Distributed across Public nodes
|
||||
- **Port Mapping Overhead**: Minimal ongoing resource usage
|
||||
- **Network Monitoring**: Efficient periodic checks
|
||||
|
||||
### Reliability
|
||||
|
||||
- **Fallback Mechanisms**: Multiple protocols ensure high success rates
|
||||
- **Continuous Monitoring**: Automatic recovery from connectivity loss
|
||||
- **Protocol Redundancy**: Multiple port mapping protocols increase reliability
|
||||
|
||||
## References
|
||||
|
||||
1. [Multiaddress spec](https://github.com/libp2p/specs/blob/master/addressing/README.md)
|
||||
2. [Identify protocol spec](https://github.com/libp2p/specs/blob/master/identify/README.md)
|
||||
3. [AutoNAT v2 protocol spec](https://github.com/libp2p/specs/blob/master/autonat/autonat-v2.md)
|
||||
4. [Circuit Relay v2 protocol spec](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md)
|
||||
5. [PCP - RFC 6887](https://datatracker.ietf.org/doc/html/rfc6887)
|
||||
6. [NAT-PMP - RFC 6886](https://datatracker.ietf.org/doc/html/rfc6886)
|
||||
7. [UPnP IGD - RFC 6970](https://datatracker.ietf.org/doc/html/rfc6970)
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,193 +0,0 @@
|
||||
# P2P-NETWORK-BOOTSTRAPPING
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos P2P Network Bootstrapping Specification |
|
||||
| Slug | 134 |
|
||||
| Status | raw |
|
||||
| Category | networking |
|
||||
| Editor | Daniel Sanchez-Quiros <danielsq@status.im> |
|
||||
| Contributors | Álvaro Castro-Castilla <alvaro@status.im>, Petar Radovic <petar@status.im>, Gusto Bacvinka <augustinas@status.im>, Antonio Antonino <antonio@status.im>, Youngjoon Lee <youngjoon@status.im>, Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/p2p-network-bootstrapping.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/p2p-network-bootstrapping.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/nomos/raw/p2p-network-bootstrapping.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/nomos/raw/p2p-network-bootstrapping.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/nomos/raw/p2p-network-bootstrapping.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-25** — [`aa8a3b0`](https://github.com/logos-co/logos-lips/blob/aa8a3b0c65470b97f5aeee85a8444c7d22dcafc8/nomos/raw/p2p-network-bootstrapping.md) — Created nomos/raw/p2p-network-bootstrapping.md draft (#175)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Introduction
|
||||
|
||||
Nomos network bootstrapping is the process by which a new node discovers peers and synchronizes with the existing decentralized network. It ensures that a node can:
|
||||
|
||||
1. **Discover Peers** – Find other active nodes in the network.
|
||||
2. **Establish Connections** – Securely connect to trusted peers.
|
||||
3. **Negotiate (libp2p) Protocols** - Ensure that other peers operate in the same protocols as the node needs.
|
||||
|
||||
## Overview
|
||||
|
||||
The Nomos P2P network bootstrapping strategy relies on a designated subset of **bootstrap nodes** to facilitate secure and efficient node onboarding. These nodes serve as the initial entry points for new network participants.
|
||||
|
||||
### Key Design Principles
|
||||
|
||||
#### Trusted Bootstrap Nodes
|
||||
|
||||
A curated set of publicly announced and highly available nodes ensures reliability during initial peer discovery. These nodes are configured with elevated connection limits to handle a high volume of incoming bootstrapping requests from new participants.
|
||||
|
||||
#### Node Configuration & Onboarding
|
||||
|
||||
New node operators must explicitly configure their instances with the addresses of bootstrap nodes. This configuration may be preloaded or dynamically fetched from a trusted source to minimize manual setup.
|
||||
|
||||
#### Network Integration
|
||||
|
||||
Upon initialization, the node establishes connections with the bootstrap nodes and begins participating in Nomos networking protocols. Through these connections, the node discovers additional peers, synchronizes with the network state, and engages in protocol-specific communication (e.g., consensus, block propagation).
|
||||
|
||||
### Security & Decentralization Considerations
|
||||
|
||||
**Trust Minimization**: While bootstrap nodes provide initial connectivity, the network rapidly transitions to decentralized peer discovery to prevent over-reliance on any single entity.
|
||||
|
||||
**Authenticated Announcements**: The identities and addresses of bootstrap nodes are publicly verifiable to mitigate impersonation attacks. From [libp2p documentation](https://docs.libp2p.io/concepts/transports/quic/#quic-in-libp2p):
|
||||
|
||||
> To authenticate each others' peer IDs, peers encode their peer ID into a self-signed certificate, which they sign using their host's private key.
|
||||
|
||||
**Dynamic Peer Management**: After bootstrapping, nodes continuously refine their peer lists to maintain a resilient and distributed network topology.
|
||||
|
||||
This approach ensures **rapid, secure, and scalable** network participation while preserving the decentralized ethos of the Nomos protocol.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Protocol Overview
|
||||
|
||||
The bootstrapping protocol follows libp2p conventions for peer discovery and connection establishment. Implementation details are handled by the underlying libp2p stack with Nomos-specific configuration parameters.
|
||||
|
||||
### Bootstrapping Process
|
||||
|
||||
#### Step-by-Step bootstrapping process
|
||||
|
||||
1. **Node Initial Configuration**: New nodes load pre-configured bootstrap node addresses. Addresses may be `IP` or `DNS` embedded in a compatible [libp2p PeerId multiaddress](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-ids-in-multiaddrs). Node operators may chose to advertise more than one address. This is out of the scope of this protocol. For example:
|
||||
|
||||
`/ip4/198.51.100.0/udp/4242/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N` or
|
||||
|
||||
`/dns/foo.bar.net/udp/4242/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N`
|
||||
|
||||
2. **Secure Connection**: Nodes establish connections to bootstrap nodes announced addresses. Verifies network identity and protocol compatibility.
|
||||
|
||||
3. **Peer Discovery**: Requests and receives validated peer lists from bootstrap nodes. Each entry includes connectivity details as per the peer discovery protocol engaging after the initial connection.
|
||||
|
||||
4. **Network Integration**: Iteratively connects to discovered peers. Gradually build peer connections.
|
||||
|
||||
5. **Protocol Engagement**: Establishes required protocol channels (gossip/consensus/sync). Begins participating in network operations.
|
||||
|
||||
6. **Ongoing Maintenance**: Continuously evaluates and refreshes peer connections. Ideally removes the connection to the bootstrap node itself. Bootstrap nodes may chose to remove the connection on their side to keep high availability for other nodes.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Nomos Network
|
||||
participant Node
|
||||
participant Bootstrap Node
|
||||
|
||||
Node->>Node: Fetches bootstrapping addresses
|
||||
|
||||
loop Interacts with bootstrap node
|
||||
Node->>+Bootstrap Node: Connects
|
||||
Bootstrap Node->>-Node: Sends discovered peers information
|
||||
end
|
||||
|
||||
loop Connects to Network participants
|
||||
Node->>Nomos Network: Engages in connections
|
||||
Node->>Nomos Network: Negotiates protocols
|
||||
end
|
||||
|
||||
loop Ongoing maintenance
|
||||
Node-->>Nomos Network: Evaluates peer connections
|
||||
alt Bootstrap connection no longer needed
|
||||
Node-->>Bootstrap Node: Disconnects
|
||||
else Bootstrap enforces disconnection
|
||||
Bootstrap Node-->>Node: Disconnects
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The bootstrapping process for the Nomos p2p network uses the **QUIC** transport as specified in the Nomos network specification.
|
||||
|
||||
Bootstrapping is separated from the network's peer discovery protocol. It assumes that there is one protocol that would engage as soon as the connection with the bootstrapping node triggers. Currently Nomos network uses `kademlia` as the current first approach for the Nomos p2p network, this comes granted.
|
||||
|
||||
### Bootstrap Node Requirements
|
||||
|
||||
Bootstrap nodes MUST fulfill the following requirements:
|
||||
|
||||
- **High Availability**: Maintain uptime of 99.5% or higher
|
||||
- **Connection Capacity**: Support minimum 1000 concurrent connections
|
||||
- **Geographic Distribution**: Deploy across multiple regions
|
||||
- **Protocol Compatibility**: Support all required Nomos network protocols
|
||||
- **Security**: Implement proper authentication and rate limiting
|
||||
|
||||
### Network Configuration
|
||||
|
||||
Bootstrap node addresses are distributed through:
|
||||
|
||||
- **Hardcoded addresses** in node software releases
|
||||
- **DNS seeds** for dynamic address resolution
|
||||
- **Community-maintained lists** with cryptographic verification
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Trust Model
|
||||
|
||||
Bootstrap nodes operate under a **minimal trust model**:
|
||||
|
||||
- Nodes verify peer identities through cryptographic authentication
|
||||
- Bootstrap connections are temporary and replaced by organic peer discovery
|
||||
- No single bootstrap node can control network participation
|
||||
|
||||
### Attack Mitigation
|
||||
|
||||
**Sybil Attack Protection**: Bootstrap nodes implement connection limits and peer verification to prevent malicious flooding.
|
||||
|
||||
**Eclipse Attack Prevention**: Nodes connect to multiple bootstrap nodes and rapidly diversify their peer connections.
|
||||
|
||||
**Denial of Service Resistance**: Rate limiting and connection throttling protect bootstrap nodes from resource exhaustion attacks.
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Bootstrapping Metrics
|
||||
|
||||
- **Initial Connection Time**: Target < 30 seconds to first bootstrap node
|
||||
- **Peer Discovery Duration**: Discover minimum viable peer set within 2 minutes
|
||||
- **Network Integration**: Full protocol engagement within 5 minutes
|
||||
|
||||
### Resource Requirements
|
||||
|
||||
#### Bootstrap Nodes
|
||||
|
||||
- Memory: Minimum 4GB RAM
|
||||
- Bandwidth: 100 Mbps sustained
|
||||
- Storage: 50GB available space
|
||||
|
||||
#### Regular Nodes
|
||||
|
||||
- Memory: 512MB for bootstrapping process
|
||||
- Bandwidth: 10 Mbps during initial sync
|
||||
- Storage: Minimal requirements
|
||||
|
||||
## References
|
||||
|
||||
- P2P Network Specification (internal document)
|
||||
- [libp2p QUIC Transport](https://docs.libp2p.io/concepts/transports/quic/)
|
||||
- [libp2p Peer IDs and Addressing](https://docs.libp2p.io/concepts/fundamentals/peers/)
|
||||
- [Ethereum bootnodes](https://ethereum.org/en/developers/docs/nodes-and-clients/bootnodes/)
|
||||
- [Bitcoin peer discovery](https://developer.bitcoin.org/devguide/p2p_network.html#peer-discovery)
|
||||
- [Cardano nodes connectivity](https://docs.cardano.org/stake-pool-operators/node-connectivity)
|
||||
- [Cardano peer sharing](https://www.coincashew.com/coins/overview-ada/guide-how-to-build-a-haskell-stakepool-node/part-v-tips/implementing-peer-sharing)
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,320 +0,0 @@
|
||||
# NOMOS-P2P-NETWORK
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Nomos P2P Network Specification |
|
||||
| Slug | 135 |
|
||||
| Status | draft |
|
||||
| Category | networking |
|
||||
| Editor | Daniel Sanchez-Quiros <danielsq@status.im> |
|
||||
| Contributors | Filip Dimitrijevic <filip@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/blockchain/raw/p2p-network.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/blockchain/raw/p2p-network.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/nomos/raw/p2p-network.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/nomos/raw/p2p-network.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/nomos/raw/p2p-network.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-25** — [`a3a5b91`](https://github.com/logos-co/logos-lips/blob/a3a5b91df3e06bb9ad737056ccd2c2f1fd20af3c/nomos/raw/p2p-network.md) — Created nomos/raw/p2p-network.md file (#169)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines the peer-to-peer (P2P) network layer for Nomos blockchain nodes. The network serves as the comprehensive communication infrastructure enabling transaction dissemination through mempool and block propagation. The specification leverages established libp2p protocols to ensure robust, scalable performance with low bandwidth requirements and minimal latency while maintaining accessibility for diverse hardware configurations and network environments.
|
||||
|
||||
## Motivation
|
||||
|
||||
The Nomos blockchain requires a reliable, scalable P2P network that can:
|
||||
|
||||
1. **Support diverse hardware**: From laptops to dedicated servers across various operating systems and geographic locations
|
||||
2. **Enable inclusive participation**: Allow non-technical users to operate nodes with minimal configuration
|
||||
3. **Maintain connectivity**: Ensure nodes remain reachable even with limited connectivity or behind NAT/routers
|
||||
4. **Scale efficiently**: Support large-scale networks (+10k nodes) with eventual consistency
|
||||
5. **Provide low-latency communication**: Enable efficient transaction and block propagation
|
||||
|
||||
## Specification
|
||||
|
||||
### Network Architecture Overview
|
||||
|
||||
The Nomos P2P network addresses three critical challenges:
|
||||
|
||||
- **Peer Connectivity**: Mechanisms for peers to join and connect to the network
|
||||
- **Peer Discovery**: Enabling peers to locate and identify network participants
|
||||
- **Message Transmission**: Facilitating efficient message exchange across the network
|
||||
|
||||
### Transport Protocol
|
||||
|
||||
#### QUIC Protocol Transport
|
||||
|
||||
The Nomos network employs **[QUIC protocol](https://docs.libp2p.io/concepts/transports/quic/)** as the primary transport protocol, leveraging the [libp2p protocol](https://docs.libp2p.io/) implementation.
|
||||
|
||||
**Rationale for [QUIC protocol](https://docs.libp2p.io/concepts/transports/quic/):**
|
||||
|
||||
- Rapid connection establishment
|
||||
- Enhanced NAT traversal capabilities (UDP-based)
|
||||
- Built-in multiplexing simplifies configuration
|
||||
- Production-tested reliability
|
||||
|
||||
### Peer Discovery
|
||||
|
||||
#### Kademlia DHT
|
||||
|
||||
The network utilizes libp2p's Kademlia Distributed Hash Table (DHT) for peer discovery.
|
||||
|
||||
**Protocol Identifiers:**
|
||||
|
||||
- **Mainnet**: `/nomos/kad/1.0.0`
|
||||
- **Testnet**: `/nomos-testnet/kad/1.0.0`
|
||||
|
||||
**Features:**
|
||||
|
||||
- Proximity-based peer discovery heuristics
|
||||
- Distributed peer routing table
|
||||
- Resilient to network partitions
|
||||
- Automatic peer replacement
|
||||
|
||||
#### Identify Protocol
|
||||
|
||||
Complements Kademlia by enabling peer information exchange.
|
||||
|
||||
**Protocol Identifiers:**
|
||||
|
||||
- **Mainnet**: `/nomos/identify/1.0.0`
|
||||
- **Testnet**: `/nomos-testnet/identify/1.0.0`
|
||||
|
||||
**Capabilities:**
|
||||
|
||||
- Protocol support advertisement
|
||||
- Peer capability negotiation
|
||||
- Network interoperability enhancement
|
||||
|
||||
#### Future Considerations
|
||||
|
||||
The current Kademlia implementation is acknowledged as interim. Future improvements target:
|
||||
|
||||
- Lightweight design without full DHT overhead
|
||||
- Highly-scalable eventual consistency
|
||||
- Support for 10k+ nodes with minimal resource usage
|
||||
|
||||
### NAT Traversal
|
||||
|
||||
The network implements comprehensive NAT traversal solutions to ensure connectivity across diverse network configurations.
|
||||
|
||||
**Objectives:**
|
||||
|
||||
- Configuration-free peer connections
|
||||
- Support for users with varying technical expertise
|
||||
- Enable nodes on standard consumer hardware
|
||||
|
||||
**Implementation:**
|
||||
|
||||
- Tailored solutions based on user network configuration
|
||||
- Automatic NAT type detection and adaptation
|
||||
- Fallback mechanisms for challenging network environments
|
||||
|
||||
*Note: Detailed NAT traversal specifications are maintained in a separate document.*
|
||||
|
||||
### Message Dissemination
|
||||
|
||||
#### Gossipsub Protocol
|
||||
|
||||
Nomos employs **gossipsub** for reliable message propagation across the network.
|
||||
|
||||
**Integration:**
|
||||
|
||||
- Seamless integration with Kademlia peer discovery
|
||||
- Automatic peer list updates
|
||||
- Efficient message routing and delivery
|
||||
|
||||
#### Topic Configuration
|
||||
|
||||
**Mempool Dissemination:**
|
||||
|
||||
- **Mainnet**: `/nomos/mempool/0.1.0`
|
||||
- **Testnet**: `/nomos-testnet/mempool/0.1.0`
|
||||
|
||||
**Block Propagation:**
|
||||
|
||||
- **Mainnet**: `/nomos/cryptarchia/0.1.0`
|
||||
- **Testnet**: `/nomos-testnet/cryptarchia/0.1.0`
|
||||
|
||||
#### Network Parameters
|
||||
|
||||
**Peering Degree:**
|
||||
|
||||
- **Minimum recommended**: 8 peers
|
||||
- **Rationale**: Ensures redundancy and efficient propagation
|
||||
- **Configurable**: Nodes may adjust based on resources and requirements
|
||||
|
||||
### Bootstrapping
|
||||
|
||||
#### Initial Network Entry
|
||||
|
||||
New nodes connect to the network through designated bootstrap nodes.
|
||||
|
||||
**Process:**
|
||||
|
||||
1. Connect to known bootstrap nodes
|
||||
2. Obtain initial peer list through Kademlia
|
||||
3. Establish gossipsub connections
|
||||
4. Begin participating in network protocols
|
||||
|
||||
**Bootstrap Node Requirements:**
|
||||
|
||||
- High availability and reliability
|
||||
- Geographic distribution
|
||||
- Version compatibility maintenance
|
||||
|
||||
### Message Encoding
|
||||
|
||||
All network messages follow the Nomos Wire Format specification for consistent encoding and decoding across implementations.
|
||||
|
||||
**Key Properties:**
|
||||
|
||||
- Deterministic serialization
|
||||
- Efficient binary encoding
|
||||
- Forward/backward compatibility support
|
||||
- Cross-platform consistency
|
||||
|
||||
*Note: Detailed wire format specifications are maintained in a separate document.*
|
||||
|
||||
## Implementation Requirements
|
||||
|
||||
### Mandatory Protocols
|
||||
|
||||
All Nomos nodes MUST implement:
|
||||
|
||||
1. **Kademlia DHT** for peer discovery
|
||||
2. **Identify protocol** for peer information exchange
|
||||
3. **Gossipsub** for message dissemination
|
||||
|
||||
### Optional Enhancements
|
||||
|
||||
Nodes MAY implement:
|
||||
|
||||
- Advanced NAT traversal techniques
|
||||
- Custom peering strategies
|
||||
- Enhanced message routing optimizations
|
||||
|
||||
### Network Versioning
|
||||
|
||||
Protocol versions follow semantic versioning:
|
||||
|
||||
- **Major version**: Breaking protocol changes
|
||||
- **Minor version**: Backward-compatible enhancements
|
||||
- **Patch version**: Bug fixes and optimizations
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
### Implementation Note
|
||||
|
||||
**Current Status**: The Nomos P2P network implementation uses hardcoded libp2p protocol parameters for optimal performance and reliability. While the node configuration file (`config.yaml`) contains network-related settings, the core libp2p protocol parameters (Kademlia DHT, Identify, and Gossipsub) are embedded in the source code.
|
||||
|
||||
### Node Configuration
|
||||
|
||||
The following network parameters are configurable via `config.yaml`:
|
||||
|
||||
#### Network Backend Settings
|
||||
|
||||
```yaml
|
||||
network:
|
||||
backend:
|
||||
host: 0.0.0.0
|
||||
port: 3000
|
||||
node_key: <node_private_key>
|
||||
initial_peers: []
|
||||
```
|
||||
|
||||
#### Protocol-Specific Topics
|
||||
|
||||
**Mempool Dissemination:**
|
||||
|
||||
- **Mainnet**: `/nomos/mempool/0.1.0`
|
||||
- **Testnet**: `/nomos-testnet/mempool/0.1.0`
|
||||
|
||||
**Block Propagation:**
|
||||
|
||||
- **Mainnet**: `/nomos/cryptarchia/0.1.0`
|
||||
- **Testnet**: `/nomos-testnet/cryptarchia/0.1.0`
|
||||
|
||||
### Hardcoded Protocol Parameters
|
||||
|
||||
The following libp2p protocol parameters are currently hardcoded in the implementation:
|
||||
|
||||
#### Peer Discovery Parameters
|
||||
|
||||
- **Protocol identifiers** for Kademlia DHT and Identify protocols
|
||||
- **DHT routing table** configuration and query timeouts
|
||||
- **Peer discovery intervals** and connection management
|
||||
|
||||
#### Message Dissemination Parameters
|
||||
|
||||
- **Gossipsub mesh parameters** (peer degree, heartbeat intervals)
|
||||
- **Message validation** and caching settings
|
||||
- **Topic subscription** and fanout management
|
||||
|
||||
#### Rationale for Hardcoded Parameters
|
||||
|
||||
1. **Network Stability**: Prevents misconfigurations that could fragment the network
|
||||
2. **Performance Optimization**: Parameters are tuned for the target network size and latency requirements
|
||||
3. **Security**: Reduces attack surface by limiting configurable network parameters
|
||||
4. **Simplicity**: Eliminates need for operators to understand complex P2P tuning
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Network-Level Security
|
||||
|
||||
1. **Peer Authentication**: Utilize libp2p's built-in peer identity verification
|
||||
2. **Message Validation**: Implement application-layer message validation
|
||||
3. **Rate Limiting**: Protect against spam and DoS attacks
|
||||
4. **Blacklisting**: Mechanism for excluding malicious peers
|
||||
|
||||
### Privacy Considerations
|
||||
|
||||
1. **Traffic Analysis**: Gossipsub provides some resistance to traffic analysis
|
||||
2. **Metadata Leakage**: Minimize identifiable information in protocol messages
|
||||
3. **Connection Patterns**: Randomize connection timing and patterns
|
||||
|
||||
### Denial of Service Protection
|
||||
|
||||
1. **Resource Limits**: Impose limits on connections and message rates
|
||||
2. **Peer Scoring**: Implement reputation-based peer management
|
||||
3. **Circuit Breakers**: Automatic protection against resource exhaustion
|
||||
|
||||
### Node Configuration Example
|
||||
|
||||
[Nomos Node Configuration](https://github.com/logos-co/nomos/blob/master/nodes/nomos-node/config.yaml) is an example node configuration
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Scalability
|
||||
|
||||
- **Target Network Size**: 10,000+ nodes
|
||||
- **Message Latency**: Sub-second for critical messages
|
||||
- **Bandwidth Efficiency**: Optimized for limited bandwidth environments
|
||||
|
||||
### Resource Requirements
|
||||
|
||||
- **Memory Usage**: Minimal DHT routing table overhead
|
||||
- **CPU Usage**: Efficient cryptographic operations
|
||||
- **Network Bandwidth**: Adaptive based on node role and capacity
|
||||
|
||||
## References
|
||||
|
||||
Original working document, from Nomos Notion: [P2P Network Specification](https://nomos-tech.notion.site/P2P-Network-Specification-206261aa09df81db8100d5f410e39d75).
|
||||
|
||||
1. [libp2p Specifications](https://docs.libp2p.io/)
|
||||
2. [QUIC Protocol Specification](https://docs.libp2p.io/concepts/transports/quic/)
|
||||
3. [Kademlia DHT](https://docs.libp2p.io/concepts/discovery-routing/kaddht/)
|
||||
4. [Gossipsub Protocol](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub)
|
||||
5. [Identify Protocol](https://github.com/libp2p/specs/blob/master/identify/README.md)
|
||||
6. [Nomos Implementation](https://github.com/logos-co/nomos) - Reference implementation and source code
|
||||
7. [Nomos Node Configuration](https://github.com/logos-co/nomos/blob/master/nodes/nomos-node/config.yaml) - Example node configuration
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,42 +0,0 @@
|
||||
# IFT-TS LIPs
|
||||
|
||||
IFT-TS builds public good protocols for the decentralised web.
|
||||
IFT-TS acts as a custodian for the protocols that live in the logos-lips repository.
|
||||
With the goal of widespread adoption,
|
||||
IFT-TS will make sure the protocols adhere to a set of principles,
|
||||
including but not limited to liberty, security, privacy, decentralisation and inclusivity.
|
||||
|
||||
To learn more, visit [IFT-TS Research](https://vac.dev/)
|
||||
|
||||
<div class="landing-hero">
|
||||
<div class="filter-row">
|
||||
<input id="rfc-search" type="search" placeholder="Search by number, title, status" aria-label="Search LIPs">
|
||||
<div class="chips" id="status-chips">
|
||||
<span class="chip active" data-status="current" data-label="Current">Current</span>
|
||||
<span class="chip" data-status="all" data-label="All">All</span>
|
||||
<span class="chip" data-status="stable" data-label="Stable">Stable</span>
|
||||
<span class="chip" data-status="draft" data-label="Draft">Draft</span>
|
||||
<span class="chip" data-status="raw" data-label="Raw">Raw</span>
|
||||
<span class="chip" data-status="deprecated" data-label="Deprecated">Deprecated</span>
|
||||
<span class="chip" data-status="deleted" data-label="Deleted">Deleted</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="filter-row">
|
||||
<div class="chips" id="date-chips">
|
||||
<span class="chip active" data-date="all" data-label="All time">All time</span>
|
||||
<span class="chip" data-date="latest" data-label="Latest" data-count="false">Latest</span>
|
||||
<span class="chip" data-date="last90" data-label="Last 90 days">Last 90 days</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="results-row">
|
||||
<div id="results-count" class="results-count">Loading RFC index...</div>
|
||||
<div class="results-hint">Click a column to sort</div>
|
||||
</div>
|
||||
|
||||
<div id="rfc-table-container" class="table-wrap" data-component="ift-ts"></div>
|
||||
|
||||
<noscript>
|
||||
<p class="noscript-note">JavaScript is required to load the RFC index table.</p>
|
||||
</noscript>
|
||||
@@ -1,385 +0,0 @@
|
||||
# 1/COSS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Consensus-Oriented Specification System |
|
||||
| Slug | 1 |
|
||||
| Status | draft |
|
||||
| Category | Best Current Practice |
|
||||
| Editor | Daniel Kaiser <danielkaiser@status.im> |
|
||||
| Contributors | Oskar Thoren <oskarth@titanproxy.com>, Pieter Hintjens <ph@imatix.com>, André Rebentisch <andre@openstandards.de>, Alberto Barrionuevo <abarrio@opentia.es>, Chris Puttick <chris.puttick@thehumanjourney.net>, Yurii Rashkovskii <yrashk@gmail.com>, Jimmy Debe <jimmy@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-03-23** — [`011eea8`](https://github.com/logos-co/logos-lips/blob/011eea8f069feed3a4feee1c2cc2669f8ed498d9/docs/ift-ts/raw/1/coss.md) — docs(1/coss): add Approved/Verified statuses, CFR doc type, and raw-spec leniency (#301)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/1/coss.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/1/coss.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/1/coss.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/1/coss.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/1/coss.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-11-04** — [`dd397ad`](https://github.com/logos-co/logos-lips/blob/dd397adc594c121ce3e10b7e81b5c2ed4818c0a6/vac/1/coss.md) — Update Coss Date (#206)
|
||||
- **2024-10-09** — [`d5e0072`](https://github.com/logos-co/logos-lips/blob/d5e0072498858c5d699ec091c41ae8961badcaee/vac/1/coss.md) — cosmetic: fix external links in 1/COSS (#100)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/1/coss.md) — Fix Files for Linting (#94)
|
||||
- **2024-08-09** — [`ed2c68f`](https://github.com/logos-co/logos-lips/blob/ed2c68f0722a88ec5781741e07bafc3920d1796a/vac/1/coss.md) — 1/COSS: New RFC Process (#4)
|
||||
- **2024-02-01** — [`3eaccf9`](https://github.com/logos-co/logos-lips/blob/3eaccf93b593026f05c8bfc2dc3a9f5657398cd3/vac/1/coss.md) — Update and rename COSS.md to coss.md
|
||||
- **2024-01-30** — [`990d940`](https://github.com/logos-co/logos-lips/blob/990d940d92e3bbbfa41b1b57fbcbbea05d41834d/vac/1/COSS.md) — Rename COSS.md to COSS.md
|
||||
- **2024-01-27** — [`6495074`](https://github.com/logos-co/logos-lips/blob/649507410e07e0d0a08f3122a625c86a12e38de0/vac/01/COSS.md) — Rename vac/rfcs/01/README.md to vac/01/COSS.md
|
||||
- **2024-01-25** — [`bab16a8`](https://github.com/logos-co/logos-lips/blob/bab16a8463d343392f45defb79b6dddbe68eb636/vac/rfcs/01/README.md) — Rename README.md to README.md
|
||||
- **2024-01-25** — [`a9162f2`](https://github.com/logos-co/logos-lips/blob/a9162f28df681781e9bc94b94e2b3a6425cf4428/vac/rfc/01/README.md) — Create README.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
This document describes a consensus-oriented specification system (COSS)
|
||||
for building interoperable technical specifications.
|
||||
COSS is based on a lightweight editorial process that
|
||||
seeks to engage the widest possible range of interested parties and
|
||||
move rapidly to consensus through working code.
|
||||
|
||||
This specification is based on [Unprotocols 2/COSS](https://github.com/unprotocols/rfc/blob/master/2/README.md),
|
||||
used by the [ZeromMQ](https://rfc.zeromq.org/) project.
|
||||
It is equivalent except for some areas:
|
||||
|
||||
- recommending the use of a permissive licenses,
|
||||
such as CC0 (with the exception of this document);
|
||||
- miscellaneous metadata, editor, and format/link updates;
|
||||
- more inheritance from the [IETF Standards Process](https://www.rfc-editor.org/rfc/rfc2026.txt),
|
||||
e.g. using RFC categories: Standards Track, Informational, and Best Common Practice;
|
||||
- standards track specifications SHOULD
|
||||
follow a specific structure that both streamlines editing,
|
||||
and helps implementers to quickly comprehend the specification
|
||||
- specifications MUST feature a header providing specific meta information
|
||||
- raw specifications will not be assigned numbers
|
||||
- section explaining the [IFT](https://free.technology/)
|
||||
Request For Comments specification process managed by the IFT-TS service department
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2008-26 the Editor and Contributors.
|
||||
|
||||
This Specification is free software;
|
||||
you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation;
|
||||
either version 3 of the License, or (at your option) any later version.
|
||||
|
||||
This specification is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY;
|
||||
without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE.
|
||||
See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program;
|
||||
if not, see [gnu.org](http://www.gnu.org/licenses).
|
||||
|
||||
## Change Process
|
||||
|
||||
This document is governed by the [1/COSS](coss.md) (COSS).
|
||||
|
||||
## Language
|
||||
|
||||
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
|
||||
"SHOULD NOT", "RECOMMENDED", "MAY", and
|
||||
"OPTIONAL" in this document are to be interpreted as described in
|
||||
[RFC 2119](http://tools.ietf.org/html/rfc2119).
|
||||
|
||||
## Goals
|
||||
|
||||
The primary goal of COSS is to facilitate the process of writing, proving, and
|
||||
improving new technical specifications.
|
||||
A "technical specification" defines a protocol, a process, an API, a use of language,
|
||||
a methodology, or any other aspect of a technical environment that
|
||||
can usefully be documented for the purposes of technical or social interoperability.
|
||||
|
||||
COSS is intended to above all be economical and rapid,
|
||||
so that it is useful to small teams with little time to spend on more formal processes.
|
||||
|
||||
Principles:
|
||||
|
||||
- We aim for rough consensus and running code; [inspired by the IETF Tao](https://www.ietf.org/about/participate/tao/).
|
||||
- Specifications are small pieces, made by small teams.
|
||||
- Specifications should have a clearly responsible editor.
|
||||
- The process should be visible, objective, and accessible to anyone.
|
||||
- The process should clearly separate experiments from solutions.
|
||||
- The process should allow deprecation of old specifications.
|
||||
|
||||
Specifications should take minutes to explain, hours to design, days to write,
|
||||
weeks to prove, months to become mature, and years to replace.
|
||||
Specifications have no special status except that accorded by the community.
|
||||
|
||||
## Architecture
|
||||
|
||||
COSS is designed around fast, easy to use communications tools.
|
||||
Primarily, COSS uses a wiki model for editing and publishing specifications texts.
|
||||
|
||||
- The *domain* is the conservancy for a set of specifications.
|
||||
- The *domain* is implemented as an Internet domain.
|
||||
- Each specification is a document together with references and attached resources.
|
||||
- A *sub-domain* is a initiative under a specific domain.
|
||||
|
||||
Individuals can become members of the *domain*
|
||||
by completing the necessary legal clearance.
|
||||
The copyright, patent, and trademark policies of the domain must be clarified
|
||||
in an Intellectual Property policy that applies to the domain.
|
||||
|
||||
Specifications exist as multiple pages, one page per version,
|
||||
(discussed below in "Branching and Merging"),
|
||||
which should be assigned URIs that MAY include an number identifier.
|
||||
|
||||
Thus, we refer to new specifications by specifying its domain,
|
||||
its sub-domain and short name.
|
||||
The syntax for a new specification reference is:
|
||||
|
||||
<domain>/<sub-domain>/<shortname>
|
||||
|
||||
For example, this specification should be **rfc.vac.dev/vac/COSS**,
|
||||
if the status were **raw**.
|
||||
|
||||
A number will be assigned to the specification when obtaining **draft** status.
|
||||
New versions of the same specification will be assigned a new number.
|
||||
The syntax for a specification reference is:
|
||||
|
||||
<domain>/<sub-domain>/<number>/<shortname>
|
||||
|
||||
For example, this specification is **rfc.vac.dev/vac/1/COSS**.
|
||||
The short form **1/COSS** may be used when referring to the specification
|
||||
from other specifications in the same domain.
|
||||
|
||||
Specifications (excluding raw specifications)
|
||||
carries a different number including branches.
|
||||
|
||||
## COSS Lifecycle
|
||||
|
||||
Every specification has an independent lifecycle that
|
||||
documents clearly its current status.
|
||||
For a specification to receive a lifecycle status,
|
||||
a new specification SHOULD be presented by the team of the sub-domain.
|
||||
After discussion amongst the contributors has reached a rough consensus,
|
||||
as described in [RFC7282](https://www.rfc-editor.org/rfc/rfc7282.html),
|
||||
the specification MAY begin the process to upgrade it's status.
|
||||
|
||||
A specification has eight possible states that reflect its maturity and
|
||||
contractual weight:
|
||||
|
||||

|
||||
|
||||
### Raw Specifications
|
||||
|
||||
All new specifications are **raw** specifications.
|
||||
Changes to raw specifications can be unilateral and arbitrary.
|
||||
A sub-domain MAY use the **raw** status for new specifications
|
||||
that live under their domain.
|
||||
Raw specifications have no contractual weight.
|
||||
|
||||
### Draft Specifications
|
||||
|
||||
When raw specifications can be demonstrated,
|
||||
they become **draft** specifications and are assigned numbers.
|
||||
Changes to draft specifications should be done in consultation with users.
|
||||
Draft specifications are contracts between the editors and implementers.
|
||||
|
||||
### Approved Specifications
|
||||
|
||||
When draft specifications have been reviewed and verified by the internal development team,
|
||||
they become **approved** specifications.
|
||||
Approved specifications are ready to be included in the specification index.
|
||||
Changes to approved specifications should be done in consultation with the development team.
|
||||
Approved specifications are contracts between the editors, the development team, and implementers.
|
||||
|
||||
### Stable Specifications
|
||||
|
||||
When approved specifications are used by third parties, they become **stable** specifications.
|
||||
Changes to stable specifications should be restricted to cosmetic ones,
|
||||
errata and clarifications.
|
||||
Stable specifications are contracts between editors, implementers, and end-users.
|
||||
|
||||
### Verified Specifications
|
||||
|
||||
When stable specifications have been implemented by a non-IFT entity,
|
||||
they become **verified** specifications.
|
||||
Verified status indicates external validation of the specification
|
||||
through independent implementation.
|
||||
Changes to verified specifications MUST be restricted to errata and clarifications.
|
||||
Verified specifications are contracts between editors, implementers, and external parties.
|
||||
|
||||
### Deprecated Specifications
|
||||
|
||||
When stable or verified specifications are replaced by newer draft specifications,
|
||||
they become **deprecated** specifications.
|
||||
Deprecated specifications should not be changed except
|
||||
to indicate their replacements, if any.
|
||||
Deprecated specifications are contracts between editors, implementers and end-users.
|
||||
|
||||
### Retired Specifications
|
||||
|
||||
When deprecated specifications are no longer used in products,
|
||||
they become **retired** specifications.
|
||||
Retired specifications are part of the historical record.
|
||||
They should not be changed except to indicate their replacements, if any.
|
||||
Retired specifications have no contractual weight.
|
||||
|
||||
### Deleted Specifications
|
||||
|
||||
Deleted specifications are those that have not reached maturity (stable) and
|
||||
were discarded.
|
||||
They should not be used and are only kept for their historical value.
|
||||
Only Raw and Draft specifications can be deleted.
|
||||
|
||||
## Editorial control
|
||||
|
||||
A specification MUST have a single responsible editor,
|
||||
the only person who SHALL change the status of the specification
|
||||
through the lifecycle stages.
|
||||
|
||||
A specification MAY also have additional contributors who contribute changes to it.
|
||||
It is RECOMMENDED to use a process similar to [C4 process](https://github.com/unprotocols/rfc/blob/master/1/README.md)
|
||||
to maximize the scale and diversity of contributions.
|
||||
|
||||
Unlike the original C4 process however,
|
||||
it is RECOMMENDED to use CC0 as a more permissive license alternative.
|
||||
We SHOULD NOT use GPL or GPL-like license.
|
||||
One exception is this specification, as this was the original license for this specification.
|
||||
|
||||
The editor is responsible for accurately maintaining the state of specifications,
|
||||
for retiring different versions that may live in other places and
|
||||
for handling all comments on the specification.
|
||||
|
||||
## Branching and Merging
|
||||
|
||||
Any member of the domain MAY branch a specification at any point.
|
||||
This is done by copying the existing text, and
|
||||
creating a new specification with the same name and content, but a new number.
|
||||
Since **raw** specifications are not assigned a number,
|
||||
branching by any member of a sub-domain MAY differentiate specifications
|
||||
based on date, contributors, or
|
||||
version number within the document.
|
||||
The ability to branch a specification is necessary in these circumstances:
|
||||
|
||||
- To change the responsible editor for a specification,
|
||||
with or without the cooperation of the current responsible editor.
|
||||
- To rejuvenate a specification that is stable but needs functional changes.
|
||||
This is the proper way to make a new version of a specification
|
||||
that is in stable or deprecated status.
|
||||
- To resolve disputes between different technical opinions.
|
||||
|
||||
The responsible editor of a branched specification is the person who makes the branch.
|
||||
|
||||
Branches, including added contributions, are derived works and
|
||||
thus licensed under the same terms as the original specification.
|
||||
This means that contributors are guaranteed the right to merge changes made in branches
|
||||
back into their original specifications.
|
||||
|
||||
Technically speaking, a branch is a *different* specification,
|
||||
even if it carries the same name.
|
||||
Branches have no special status except that accorded by the community.
|
||||
|
||||
## Conflict resolution
|
||||
|
||||
COSS resolves natural conflicts between teams and
|
||||
vendors by allowing anyone to define a new specification.
|
||||
There is no editorial control process except
|
||||
that practised by the editor of a new specification.
|
||||
The administrators of a domain (moderators)
|
||||
may choose to interfere in editorial conflicts,
|
||||
and may suspend or ban individuals for behaviour they consider inappropriate.
|
||||
|
||||
## Specification Structure
|
||||
|
||||
### Meta Information
|
||||
|
||||
Specifications MUST contain certain metadata fields.
|
||||
It is RECOMMENDED that specification metadata is specified as a YAML header
|
||||
(where possible).
|
||||
This will enable programmatic access to specification metadata.
|
||||
|
||||
Fields marked **required** MUST be present in all specifications at **draft** status or above.
|
||||
Fields marked **optional** MAY be omitted,
|
||||
particularly in **raw** specifications that are still being developed.
|
||||
|
||||
| Key | Required | Value | Type | Example |
|
||||
|------------------|------------|----------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **name** | required | full name | string | Consensus-Oriented Specification System |
|
||||
| **slug** | required | number | int | 1 |
|
||||
| **status** | required | status | string | draft |
|
||||
| **category** | optional | category | string | Best Current Practice |
|
||||
| **tags** | optional | 0 or several tags | list | waku-application, waku-core-protocol |
|
||||
| **editor** | optional | editor name/email | string | Oskar Thoren <oskarth@titanproxy.com> |
|
||||
| **contributors** | optional | contributors | list | - Pieter Hintjens <ph@imatix.com> - André Rebentisch <andre@openstandards.de> - Alberto Barrionuevo <abarrio@opentia.es> - Chris Puttick <chris.puttick@thehumanjourney.net> - Yurii Rashkovskii <yrashk@gmail.com> |
|
||||
|
||||
For **raw** specifications,
|
||||
only `name` and `status` are strictly required.
|
||||
All other fields SHOULD be added before the specification is promoted to **draft** status.
|
||||
|
||||
### IFT/Logos LIP Process
|
||||
|
||||
> [!Note]
|
||||
This section is introduced to allow contributors to understand the IFT
|
||||
(Institute of Free Technology) Logos LIP specification process.
|
||||
Other organizations may make changes to this section according to their needs.
|
||||
|
||||
IFT-TS is a department under the IFT organization that provides RFC (Request For Comments)
|
||||
specification services.
|
||||
This service works to help facilitate the RFC process, assuring standards are followed.
|
||||
Contributors within the service SHOULD assist a *sub-domain* in creating a new specification,
|
||||
editing a specification, and
|
||||
promoting the status of a specification along with other tasks.
|
||||
Once a specification reaches some level of maturity by rough consensus,
|
||||
the specification SHOULD enter the [Logos LIP](https://rfc.vac.dev/) process.
|
||||
Similar to the IETF working group adoption described in [RFC6174](https://www.rfc-editor.org/rfc/rfc6174.html),
|
||||
the Logos LIP process SHOULD facilitate all updates to the specification.
|
||||
|
||||
Specifications are introduced by projects,
|
||||
under a specific *domain*, with the intention of becoming technically mature documents.
|
||||
The IFT domain currently houses the following projects:
|
||||
|
||||
- [Messaging](https://waku.org/)
|
||||
- [Storage](https://codex.storage/)
|
||||
- [Blockchain](https://nomos.tech/)
|
||||
|
||||
When a specification is promoted to *draft* status,
|
||||
the number that is assigned MAY be incremental
|
||||
or by the *sub-domain* and the Logos LIP process.
|
||||
Standards track specifications MUST be based on the
|
||||
[Logos LIP template](../../template.md) before obtaining a new status.
|
||||
All changes, comments, and contributions SHOULD be documented.
|
||||
|
||||
### Document Types
|
||||
|
||||
The IFT specification process recognizes two document types:
|
||||
|
||||
**RFC (Request for Comments)**
|
||||
is the primary document type for technical specifications.
|
||||
RFCs progress through the full COSS lifecycle from raw to stable or beyond.
|
||||
All specifications described in this document are RFCs by default.
|
||||
|
||||
**CFR (Change for Request)**
|
||||
is a document type for proposing changes or amendments to existing specifications.
|
||||
A CFR describes a specific, bounded change to an existing RFC.
|
||||
CFRs follow the same lifecycle as RFCs but are scoped to a single change proposal.
|
||||
A CFR that reaches stable status MAY be merged back into the target RFC,
|
||||
after which it transitions to deprecated.
|
||||
|
||||
## Conventions
|
||||
|
||||
Where possible editors and contributors are encouraged to:
|
||||
|
||||
- Refer to and build on existing work when possible, especially IETF specifications.
|
||||
- Contribute to existing specifications rather than reinvent their own.
|
||||
- Use collaborative branching and merging as a tool for experimentation.
|
||||
- Use Semantic Line Breaks: [sembr](https://sembr.org/).
|
||||
|
||||
## Appendix A. Color Coding
|
||||
|
||||
It is RECOMMENDED to use color coding to indicate specification's status.
|
||||
Color coded specifications SHOULD use the following color scheme:
|
||||
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
- 
|
||||
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="105" height="20"><g shape-rendering="crispEdges"><path fill="#555" d="M0 0h43v20H0z"/><path fill="#0096be" d="M43 0h62v20H43z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="21.5" y="14">status</text><text x="74" y="14">approved</text></g></svg>
|
||||
|
Before Width: | Height: | Size: 363 B |
@@ -1,142 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1600 780" font-family="Helvetica, Arial, sans-serif">
|
||||
<title>Consensus Oriented Specification System Life Cycle</title>
|
||||
|
||||
<defs>
|
||||
<marker id="arr" markerWidth="8" markerHeight="6" refX="8" refY="3" orient="auto">
|
||||
<polygon points="0 0, 8 3, 0 6" fill="#cc4444"/>
|
||||
</marker>
|
||||
</defs>
|
||||
|
||||
<!-- Title -->
|
||||
<text x="800" y="32" text-anchor="middle" font-size="17" font-weight="bold" fill="#e6e1cf">Consensus Oriented Specification System Life Cycle</text>
|
||||
|
||||
<!-- Column borders -->
|
||||
<g stroke="#cc4444" stroke-width="1" fill="none">
|
||||
<rect x="0" y="48" width="1600" height="720"/>
|
||||
<line x1="200" y1="48" x2="200" y2="768"/>
|
||||
<line x1="400" y1="48" x2="400" y2="768"/>
|
||||
<line x1="600" y1="48" x2="600" y2="768"/>
|
||||
<line x1="800" y1="48" x2="800" y2="768"/>
|
||||
<line x1="1000" y1="48" x2="1000" y2="768"/>
|
||||
<line x1="1200" y1="48" x2="1200" y2="768"/>
|
||||
<line x1="1400" y1="48" x2="1400" y2="768"/>
|
||||
</g>
|
||||
|
||||
<!-- Column header backgrounds (color coding) -->
|
||||
<rect x="0" y="48" width="200" height="32" fill="#9f9f9f"/>
|
||||
<rect x="200" y="48" width="200" height="32" fill="#007ec6"/>
|
||||
<rect x="400" y="48" width="200" height="32" fill="#0096be"/>
|
||||
<rect x="600" y="48" width="200" height="32" fill="#44cc11"/>
|
||||
<rect x="800" y="48" width="200" height="32" fill="#2d8a4e"/>
|
||||
<rect x="1000" y="48" width="200" height="32" fill="#a4a61d"/>
|
||||
<rect x="1200" y="48" width="200" height="32" fill="#e05d44"/>
|
||||
<rect x="1400" y="48" width="200" height="32" fill="#555"/>
|
||||
|
||||
<!-- Column headers -->
|
||||
<g font-size="15" font-weight="bold" text-anchor="middle" fill="#fff">
|
||||
<text x="100" y="70">Raw</text>
|
||||
<text x="300" y="70">Draft</text>
|
||||
<text x="500" y="70">Approved</text>
|
||||
<text x="700" y="70">Stable</text>
|
||||
<text x="900" y="70">Verified</text>
|
||||
<text x="1100" y="70">Deprecated</text>
|
||||
<text x="1300" y="70">Retired</text>
|
||||
<text x="1500" y="70">Deleted</text>
|
||||
</g>
|
||||
|
||||
<!-- ======== RAW COLUMN ======== -->
|
||||
<rect x="50" y="88" width="100" height="30" rx="15" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="100" y="108" text-anchor="middle" font-size="13" font-weight="bold" fill="#000">Create</text>
|
||||
|
||||
<line x1="100" y1="118" x2="100" y2="135" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<rect x="22" y="135" width="156" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="100" y="153" text-anchor="middle" font-size="11" fill="#000">Use: mockups</text>
|
||||
<text x="100" y="170" text-anchor="middle" font-size="11" fill="#000">Goal: design it</text>
|
||||
|
||||
<line x1="100" y1="179" x2="100" y2="217" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<polygon points="100,217 158,247 100,277 42,247" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="100" y="252" text-anchor="middle" font-size="12" fill="#000">useful</text>
|
||||
|
||||
<!-- ======== DRAFT COLUMN ======== -->
|
||||
<rect x="222" y="135" width="156" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="300" y="153" text-anchor="middle" font-size="11" fill="#000">Use: prototypes</text>
|
||||
<text x="300" y="170" text-anchor="middle" font-size="11" fill="#000">Goal: prove it</text>
|
||||
|
||||
<line x1="300" y1="179" x2="300" y2="300" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<rect x="245" y="300" width="110" height="30" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="300" y="320" text-anchor="middle" font-size="13" fill="#000">Iterate</text>
|
||||
|
||||
<line x1="300" y1="330" x2="300" y2="358" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<polygon points="300,358 365,390 300,422 235,390" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="300" y="395" text-anchor="middle" font-size="11" fill="#000">successful</text>
|
||||
|
||||
<!-- ======== APPROVED COLUMN ======== -->
|
||||
<rect x="420" y="443" width="160" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="500" y="461" text-anchor="middle" font-size="11" fill="#000">Use: internal review</text>
|
||||
<text x="500" y="478" text-anchor="middle" font-size="11" fill="#000">Goal: verify it</text>
|
||||
|
||||
<line x1="500" y1="487" x2="500" y2="512" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<rect x="430" y="512" width="140" height="30" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="500" y="532" text-anchor="middle" font-size="12" fill="#000">Dev team sign-off</text>
|
||||
|
||||
<!-- ======== STABLE COLUMN ======== -->
|
||||
<rect x="622" y="512" width="156" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="700" y="530" text-anchor="middle" font-size="11" fill="#000">Use: products</text>
|
||||
<text x="700" y="547" text-anchor="middle" font-size="11" fill="#000">Goal: deploy it</text>
|
||||
|
||||
<line x1="700" y1="556" x2="700" y2="576" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<rect x="620" y="576" width="160" height="30" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="700" y="596" text-anchor="middle" font-size="11" fill="#000">Cosmetic improvements</text>
|
||||
|
||||
<!-- ======== VERIFIED COLUMN ======== -->
|
||||
<rect x="820" y="576" width="160" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="900" y="594" text-anchor="middle" font-size="11" fill="#000">Use: external impl.</text>
|
||||
<text x="900" y="611" text-anchor="middle" font-size="11" fill="#000">Goal: validate it</text>
|
||||
|
||||
<!-- ======== DEPRECATED COLUMN ======== -->
|
||||
<rect x="1022" y="652" width="156" height="44" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="1100" y="670" text-anchor="middle" font-size="11" fill="#000">Use: products</text>
|
||||
<text x="1100" y="687" text-anchor="middle" font-size="11" fill="#000">Goal: replace it</text>
|
||||
|
||||
<!-- ======== RETIRED COLUMN ======== -->
|
||||
<rect x="1218" y="659" width="164" height="30" rx="8" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="1300" y="679" text-anchor="middle" font-size="11" fill="#000">Use: historical, Goal: study it</text>
|
||||
|
||||
<!-- ======== DELETED COLUMN ======== -->
|
||||
<rect x="1450" y="232" width="100" height="30" rx="15" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="1500" y="252" text-anchor="middle" font-size="13" fill="#000">Archive</text>
|
||||
|
||||
<rect x="1450" y="375" width="100" height="30" rx="15" fill="#d4943a" stroke="#b07828" stroke-width="1.5"/>
|
||||
<text x="1500" y="395" text-anchor="middle" font-size="13" fill="#000">Archive</text>
|
||||
|
||||
<!-- ======== TRANSITION ARROWS ======== -->
|
||||
<line x1="158" y1="247" x2="220" y2="157" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<text x="168" y="238" font-size="11" fill="#cc4444">yes</text>
|
||||
|
||||
<line x1="158" y1="247" x2="1450" y2="247" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<text x="165" y="263" font-size="11" fill="#cc4444">no</text>
|
||||
|
||||
<line x1="300" y1="422" x2="418" y2="450" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<text x="308" y="443" font-size="11" fill="#cc4444">yes</text>
|
||||
|
||||
<line x1="365" y1="390" x2="1450" y2="390" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<text x="373" y="383" font-size="11" fill="#cc4444">no</text>
|
||||
|
||||
<line x1="570" y1="527" x2="620" y2="527" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<!-- Stable Cosmetic -> Verified (horizontal) -->
|
||||
<line x1="780" y1="591" x2="818" y2="591" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<!-- Stable Cosmetic -> Deprecated (L-bend below Verified) -->
|
||||
<polyline points="700,606 700,636 1022,663" fill="none" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<!-- Verified -> Deprecated (from right edge) -->
|
||||
<line x1="980" y1="598" x2="1022" y2="663" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
<!-- Deprecated -> Retired (horizontal) -->
|
||||
<line x1="1178" y1="674" x2="1218" y2="674" stroke="#cc4444" stroke-width="1.5" marker-end="url(#arr)"/>
|
||||
|
||||
<text x="800" y="758" text-anchor="middle" font-size="10" fill="#999">rfc.vac.dev/1/COSS</text>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 8.0 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="104" height="20"><g shape-rendering="crispEdges"><path fill="#555" d="M0 0h43v20H0z"/><path fill="#2d8a4e" d="M43 0h61v20H43z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="21.5" y="14">status</text><text x="73.5" y="14">verified</text></g></svg>
|
||||
|
Before Width: | Height: | Size: 365 B |
@@ -1,14 +0,0 @@
|
||||
# Alice and Bob: batch data sync
|
||||
msc {
|
||||
hscale="2", wordwraparcs=on;
|
||||
|
||||
alice [label="Alice"],
|
||||
bob [label="Bob"];
|
||||
|
||||
--- [label="batch data sync"];
|
||||
alice => alice [label="add messages to payload state"];
|
||||
alice >> bob [label="send payload with messages"];
|
||||
|
||||
bob => bob [label="add acks to payload state"];
|
||||
bob >> alice [label="send payload with acks"];
|
||||
}
|
||||
|
Before Width: | Height: | Size: 14 KiB |
@@ -1,20 +0,0 @@
|
||||
# Alice and Bob: interactive data sync
|
||||
msc {
|
||||
hscale="2", wordwraparcs=on;
|
||||
|
||||
alice [label="Alice"],
|
||||
bob [label="Bob"];
|
||||
|
||||
--- [label="interactive data sync"];
|
||||
alice => alice [label="add offers to payload state"];
|
||||
alice >> bob [label="send payload with offers"];
|
||||
|
||||
bob => bob [label="add requests to payload state"];
|
||||
bob >> alice [label="send payload with requests"];
|
||||
|
||||
alice => alice [label="add requested messages to state"];
|
||||
alice >> bob [label="send payload with messages"];
|
||||
|
||||
bob => bob [label="add acks to payload state"];
|
||||
bob >> alice [label="send payload with acks"];
|
||||
}
|
||||
|
Before Width: | Height: | Size: 24 KiB |
@@ -1,209 +0,0 @@
|
||||
# 2/MVDS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Minimum Viable Data Synchronization |
|
||||
| Slug | 2 |
|
||||
| Status | stable |
|
||||
| Category | Standards Track |
|
||||
| Editor | Sanaz Taheri <sanaz@status.im> |
|
||||
| Contributors | Dean Eigenmann <dean@status.im>, Oskar Thorén <oskarth@titanproxy.com> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/2/mvds.md) — chore: add math support (#287)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/2/mvds.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/2/mvds.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/2/mvds.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/2/mvds.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/2/mvds.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/2/mvds.md) — Fix Files for Linting (#94)
|
||||
- **2024-06-28** — [`a5b24ac`](https://github.com/logos-co/logos-lips/blob/a5b24ac0a27da361312260f9da372a0e6e812212/vac/2/mvds.md) — fix_: broken image links (#81)
|
||||
- **2024-02-01** — [`0253d53`](https://github.com/logos-co/logos-lips/blob/0253d534ffa9b7994cf0c6c31a5591309dc336d3/vac/2/mvds.md) — Rename MVDS.md to mvds.md
|
||||
- **2024-01-30** — [`70326d1`](https://github.com/logos-co/logos-lips/blob/70326d135bf660f2ec171aeba9eacd51eaadcd6b/vac/2/MVDS.md) — Rename MVDS.md to MVDS.md
|
||||
- **2024-01-27** — [`472a7fd`](https://github.com/logos-co/logos-lips/blob/472a7fd440882c195898d025c14357d875db6ff3/vac/02/MVDS.md) — Rename vac/rfcs/02/README.md to vac/02/MVDS.md
|
||||
- **2024-01-25** — [`4362a7b`](https://github.com/logos-co/logos-lips/blob/4362a7b221ceb4e1f1a82536f350c8e77d8f03d4/vac/rfcs/02/README.md) — Create README.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
In this specification, we describe a minimum viable protocol for
|
||||
data synchronization inspired by the Bramble Synchronization Protocol ([BSP](https://code.briarproject.org/briar/briar-spec/blob/master/protocols/BSP.md)).
|
||||
This protocol is designed to ensure reliable messaging
|
||||
between peers across an unreliable peer-to-peer (P2P) network where
|
||||
they may be unreachable or unresponsive.
|
||||
|
||||
We present a reference implementation[^2]
|
||||
including a simulation to demonstrate its performance.
|
||||
|
||||
## Definitions
|
||||
|
||||
| Term | Description |
|
||||
|------------|-------------------------------------------------------------------------------------|
|
||||
| **Peer** | The other nodes that a node is connected to. |
|
||||
| **Record** | Defines a payload element of either the type `OFFER`, `REQUEST`, `MESSAGE` or `ACK` |
|
||||
| **Node** | Some process that is able to store data, do processing and communicate for MVDS. |
|
||||
|
||||
## Wire Protocol
|
||||
|
||||
### Secure Transport
|
||||
|
||||
This specification does not define anything related to the transport of packets.
|
||||
It is assumed that this is abstracted in such a way that
|
||||
any secure transport protocol could be easily implemented.
|
||||
Likewise, properties such as confidentiality, integrity, authenticity and
|
||||
forward secrecy are assumed to be provided by a layer below.
|
||||
|
||||
### Payloads
|
||||
|
||||
Payloads are implemented using [protocol buffers v3](https://developers.google.com/protocol-buffers/).
|
||||
|
||||
```protobuf
|
||||
syntax = "proto3";
|
||||
|
||||
package vac.mvds;
|
||||
|
||||
message Payload {
|
||||
repeated bytes acks = 5001;
|
||||
repeated bytes offers = 5002;
|
||||
repeated bytes requests = 5003;
|
||||
repeated Message messages = 5004;
|
||||
}
|
||||
|
||||
message Message {
|
||||
bytes group_id = 6001;
|
||||
int64 timestamp = 6002;
|
||||
bytes body = 6003;
|
||||
}
|
||||
```
|
||||
|
||||
*The payload field numbers are kept more "unique" to*
|
||||
*ensure no overlap with other protocol buffers.*
|
||||
|
||||
Each payload contains the following fields:
|
||||
|
||||
- **Acks:** This field contains a list (can be empty)
|
||||
of `message identifiers` informing the recipient that sender holds a specific message.
|
||||
- **Offers:** This field contains a list (can be empty)
|
||||
of `message identifiers` that the sender would like to give to the recipient.
|
||||
- **Requests:** This field contains a list (can be empty)
|
||||
of `message identifiers` that the sender would like to receive from the recipient.
|
||||
- **Messages:** This field contains a list of messages (can be empty).
|
||||
|
||||
**Message Identifiers:** Each `message` has a message identifier calculated by
|
||||
hashing the `group_id`, `timestamp` and `body` fields as follows:
|
||||
|
||||
```js
|
||||
HASH("MESSAGE_ID", group_id, timestamp, body);
|
||||
```
|
||||
|
||||
**Group Identifiers:** Each `message` is assigned into a **group**
|
||||
using the `group_id` field,
|
||||
groups are independent synchronization contexts between peers.
|
||||
|
||||
The current `HASH` function used is `sha256`.
|
||||
|
||||
## Synchronization
|
||||
|
||||
### State
|
||||
|
||||
We refer to `state` as set of records for the types `OFFER`, `REQUEST` and
|
||||
`MESSAGE` that every node SHOULD store per peer.
|
||||
`state` MUST NOT contain `ACK` records as we do not retransmit those periodically.
|
||||
The following information is stored for records:
|
||||
|
||||
- **Type** - Either `OFFER`, `REQUEST` or `MESSAGE`
|
||||
- **Send Count** - The amount of times a record has been sent to a peer.
|
||||
- **Send Epoch** - The next epoch at which a record can be sent to a peer.
|
||||
|
||||
### Flow
|
||||
|
||||
A maximum of one payload SHOULD be sent to peers per epoch,
|
||||
this payload contains all `ACK`, `OFFER`, `REQUEST` and
|
||||
`MESSAGE` records for the specific peer.
|
||||
Payloads are created every epoch,
|
||||
containing reactions to previously received records by peers or
|
||||
new records being sent out by nodes.
|
||||
|
||||
Nodes MAY have two modes with which they can send records:
|
||||
`BATCH` and `INTERACTIVE` mode.
|
||||
The following rules dictate how nodes construct payloads
|
||||
every epoch for any given peer for both modes.
|
||||
|
||||
> ***NOTE:** A node may send messages both in interactive and in batch mode.*
|
||||
|
||||
#### Interactive Mode
|
||||
|
||||
- A node initially offers a `MESSAGE` when attempting to send it to a peer.
|
||||
This means an `OFFER` is added to the next payload and state for the given peer.
|
||||
- When a node receives an `OFFER`, a `REQUEST` is added to the next payload and
|
||||
state for the given peer.
|
||||
- When a node receives a `REQUEST` for a previously sent `OFFER`,
|
||||
the `OFFER` is removed from the state and
|
||||
the corresponding `MESSAGE` is added to the next payload and
|
||||
state for the given peer.
|
||||
- When a node receives a `MESSAGE`, the `REQUEST` is removed from the state and
|
||||
an `ACK` is added to the next payload for the given peer.
|
||||
- When a node receives an `ACK`,
|
||||
the `MESSAGE` is removed from the state for the given peer.
|
||||
- All records that require retransmission are added to the payload,
|
||||
given `Send Epoch` has been reached.
|
||||
|
||||

|
||||
|
||||
Figure 1: Delivery without retransmissions in interactive mode.
|
||||
|
||||
#### Batch Mode
|
||||
|
||||
1. When a node sends a `MESSAGE`,
|
||||
it is added to the next payload and the state for the given peer.
|
||||
2. When a node receives a `MESSAGE`,
|
||||
an `ACK` is added to the next payload for the corresponding peer.
|
||||
3. When a node receives an `ACK`,
|
||||
the `MESSAGE` is removed from the state for the given peer.
|
||||
4. All records that require retransmission are added to the payload,
|
||||
given `Send Epoch` has been reached.
|
||||
|
||||
<!-- diagram -->
|
||||
|
||||

|
||||
|
||||
Figure 2: Delivery without retransmissions in batch mode.
|
||||
|
||||
> ***NOTE:** Batch mode is higher bandwidth whereas interactive mode is higher latency.*
|
||||
|
||||
<!-- Interactions with state, flow chart with retransmissions? -->
|
||||
|
||||
### Retransmission
|
||||
|
||||
The record of the type `Type` SHOULD be retransmitted
|
||||
every time `Send Epoch` is smaller than or equal to the current epoch.
|
||||
|
||||
`Send Epoch` and `Send Count` MUST be increased every time a record is retransmitted.
|
||||
Although no function is defined on how to increase `Send Epoch`,
|
||||
it SHOULD be exponentially increased until reaching an upper bound
|
||||
where it then goes back to a lower epoch in order to
|
||||
prevent a record's `Send Epoch`'s from becoming too large.
|
||||
|
||||
> ***NOTE:** We do not retransmission `ACK`s as we do not know when they have arrived,
|
||||
therefore we simply resend them every time we receive a `MESSAGE`.*
|
||||
|
||||
## Formal Specification
|
||||
|
||||
MVDS has been formally specified using TLA+: <https://github.com/vacp2p/formalities/tree/master/MVDS>.
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
- Preston van Loon
|
||||
- Greg Markou
|
||||
- Rene Nayman
|
||||
- Jacek Sieka
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## Footnotes
|
||||
|
||||
[^2]: <https://github.com/vacp2p/mvds>
|
||||
@@ -1,170 +0,0 @@
|
||||
# 25/LIBP2P-DNS-DISCOVERY
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Libp2p Peer Discovery via DNS |
|
||||
| Slug | 25 |
|
||||
| Status | deleted |
|
||||
| Category | Standards Track |
|
||||
| Editor | Hanno Cornelius <hanno@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/25/libp2p-dns-discovery.md) — chore: add math support (#287)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/25/libp2p-dns-discovery.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/25/libp2p-dns-discovery.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/25/libp2p-dns-discovery.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/25/libp2p-dns-discovery.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/25/libp2p-dns-discovery.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/25/libp2p-dns-discovery.md) — Fix Files for Linting (#94)
|
||||
- **2024-03-21** — [`2eaa794`](https://github.com/logos-co/logos-lips/blob/2eaa7949c4abe7d14e2b9560e8c045bf2e937c9a/vac/25/libp2p-dns-discovery.md) — Broken Links + Change Editors (#26)
|
||||
- **2024-02-08** — [`a3ad14e`](https://github.com/logos-co/logos-lips/blob/a3ad14e6400392ccbc83ab401a605d80a92a6542/vac/25/libp2p-dns-discovery.md) — Create libp2p-dns-discovery.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
`25/LIBP2P-DNS-DISCOVERY` specifies a scheme to implement [`libp2p`](https://libp2p.io/)
|
||||
peer discovery via DNS for Waku v2.
|
||||
The generalised purpose is to retrieve an arbitrarily long, authenticated,
|
||||
updateable list of [`libp2p` peers](https://docs.libp2p.io/concepts/peer-id/)
|
||||
to bootstrap connection to a `libp2p` network.
|
||||
Since [`10/WAKU2`](../../../messaging/standards/core/10/waku2.md)
|
||||
currently specifies use of [`libp2p` peer identities](https://docs.libp2p.io/concepts/peer-id/),
|
||||
this method is suitable for a new Waku v2 node
|
||||
to discover other Waku v2 nodes to connect to.
|
||||
|
||||
This specification is largely based on [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459),
|
||||
with the only deviation being the type of address being encoded (`multiaddr` vs `enr`).
|
||||
Also see [this earlier explainer](https://vac.dev/dns-based-discovery)
|
||||
for more background on the suitability of DNS based discovery for Waku v2.
|
||||
|
||||
## List encoding
|
||||
|
||||
The peer list MUST be encoded as a [Merkle tree](https://www.wikiwand.com/en/Merkle_tree).
|
||||
EIP-1459 specifies [the URL scheme](https://eips.ethereum.org/EIPS/eip-1459#specification)
|
||||
to refer to such a DNS node list.
|
||||
This specification uses the same approach, but with a `matree` scheme:
|
||||
|
||||
```yaml
|
||||
matree://<key>@<fqdn>
|
||||
```
|
||||
|
||||
where
|
||||
|
||||
- `matree` is the selected `multiaddr` Merkle tree scheme
|
||||
- `<fqdn>` is the fully qualified domain name on which the list can be found
|
||||
- `<key>` is the base32 encoding of the compressed 32-byte binary public key
|
||||
that signed the list.
|
||||
|
||||
The example URL from EIP-1459, adapted to the above scheme becomes:
|
||||
|
||||
```yaml
|
||||
matree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@peers.example.org
|
||||
```
|
||||
|
||||
Each entry within the Merkle tree MUST be contained within a [DNS TXT record](https://www.rfc-editor.org/rfc/rfc1035.txt)
|
||||
and stored in a subdomain (except for the base URL `matree` entry).
|
||||
The content of any TXT record
|
||||
MUST be small enough to fit into the 512-byte limit imposed on UDP DNS packets,
|
||||
which limits the number of hashes that can be contained within a branch entry.
|
||||
The subdomain name for each entry
|
||||
is the base32 encoding of the abbreviated keccak256 hash of its text content.
|
||||
See [this example](https://eips.ethereum.org/EIPS/eip-1459#dns-record-structure)
|
||||
of a fully populated tree for more information.
|
||||
|
||||
## Entry types
|
||||
|
||||
The following entry types are derived from [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459)
|
||||
and adapted for use with `multiaddrs`:
|
||||
|
||||
## Root entry
|
||||
|
||||
The tree root entry MUST use the following format:
|
||||
|
||||
```yaml
|
||||
matree-root:v1 m=<ma-root> l=<link-root> seq=<sequence number> sig=<signature>
|
||||
```
|
||||
|
||||
where
|
||||
|
||||
- `ma-root` and `link-root` refer to the root hashes of subtrees
|
||||
containing `multiaddrs` and links to other subtrees, respectively
|
||||
- `sequence-number` is the tree's update sequence number.
|
||||
This number SHOULD increase with each update to the tree.
|
||||
- `signature` is a 65-byte secp256k1 EC signature
|
||||
over the keccak256 hash of the root record content,
|
||||
excluding the `sig=` part,
|
||||
encoded as URL-safe base64
|
||||
|
||||
## Branch entry
|
||||
|
||||
Branch entries MUST take the format:
|
||||
|
||||
```yaml
|
||||
matree-branch:<h₁>,<h₂>,...,<hₙ>
|
||||
```
|
||||
|
||||
where
|
||||
|
||||
- `<h₁>,<h₂>,...,<hₙ>` are the hashes of other subtree entries
|
||||
|
||||
## Leaf entries
|
||||
|
||||
There are two types of leaf entries:
|
||||
|
||||
### Link entries
|
||||
|
||||
For the subtree pointed to by `link-root`,
|
||||
leaf entries MUST take the format:
|
||||
|
||||
```yaml
|
||||
matree://<key>@<fqdn>
|
||||
```
|
||||
|
||||
which links to a different list located in another domain.
|
||||
|
||||
### `multiaddr` entries
|
||||
|
||||
For the subtree pointed to by `ma-root`,
|
||||
leaf entries MUST take the format:
|
||||
|
||||
```yaml
|
||||
ma:<multiaddr>
|
||||
```
|
||||
|
||||
which contains the `multiaddr` of a `libp2p` peer.
|
||||
|
||||
## Client protocol
|
||||
|
||||
A client MUST adhere to the [client protocol](https://eips.ethereum.org/EIPS/eip-1459#client-protocol)
|
||||
as specified in EIP-1459,
|
||||
and adapted for usage with `multiaddr` entry types below:
|
||||
|
||||
To find nodes at a given DNS name a client MUST perform the following steps:
|
||||
|
||||
1. Resolve the TXT record of the DNS name and
|
||||
check whether it contains a valid `matree-root:v1` entry.
|
||||
2. Verify the signature on the root against the known public key
|
||||
and check whether the sequence number is larger than or
|
||||
equal to any previous number seen for that name.
|
||||
3. Resolve the TXT record of a hash subdomain indicated in the record
|
||||
and verify that the content matches the hash.
|
||||
4. If the resolved entry is of type:
|
||||
|
||||
- `matree-branch`: parse the list of hashes and continue resolving them (step 3).
|
||||
- `ma`: import the `multiaddr` and add it to a local list of discovered nodes.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via
|
||||
[CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
1. [`10/WAKU2`](../../../messaging/standards/core/10/waku2.md)
|
||||
1. [EIP-1459: Client Protocol](https://eips.ethereum.org/EIPS/eip-1459#client-protocol)
|
||||
1. [EIP-1459: Node Discovery via DNS](https://eips.ethereum.org/EIPS/eip-1459)
|
||||
1. [`libp2p`](https://libp2p.io/)
|
||||
1. [`libp2p` peer identity](https://docs.libp2p.io/concepts/peer-id/)
|
||||
1. [Merkle trees](https://www.wikiwand.com/en/Merkle_tree)
|
||||
@@ -1,817 +0,0 @@
|
||||
# 32/RLN-V1
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Rate Limit Nullifier |
|
||||
| Slug | 32 |
|
||||
| Status | draft |
|
||||
| Category | Standards Track |
|
||||
| Editor | Aaryamann Challani <p1ge0nh8er@proton.me> |
|
||||
| Contributors | Barry Whitehat <barrywhitehat@protonmail.com>, Sanaz Taheri <sanaz@status.im>, Oskar Thorén <oskarth@titanproxy.com>, Onur Kilic <onurkilic1004@gmail.com>, Blagoj Dimovski <blagoj.dimovski@yandex.com>, Rasul Ibragimov <curryrasul@gmail.com> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/32/rln-v1.md) — chore: add math support (#287)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/32/rln-v1.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/32/rln-v1.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/32/rln-v1.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/32/rln-v1.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/32/rln-v1.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/32/rln-v1.md) — Fix Files for Linting (#94)
|
||||
- **2024-08-05** — [`eb25cd0`](https://github.com/logos-co/logos-lips/blob/eb25cd06d679e94409072a96841de16a6b3910d5/vac/32/rln-v1.md) — chore: replace email addresses (#86)
|
||||
- **2024-06-06** — [`cbefa48`](https://github.com/logos-co/logos-lips/blob/cbefa483fca219c3787b0ff0e3c64a6436a6a8cc/vac/32/rln-v1.md) — 32/RLN-V1: Move to Draft (#40)
|
||||
- **2024-03-21** — [`2eaa794`](https://github.com/logos-co/logos-lips/blob/2eaa7949c4abe7d14e2b9560e8c045bf2e937c9a/vac/32/rln-v1.md) — Broken Links + Change Editors (#26)
|
||||
- **2024-02-01** — [`94db406`](https://github.com/logos-co/logos-lips/blob/94db40661ef4df3456e2e6996164a1bbc5427914/vac/32/rln-v1.md) — Update rln-v1.md
|
||||
- **2024-02-01** — [`a23299f`](https://github.com/logos-co/logos-lips/blob/a23299fe32f3ba780770f26dadfcf17118bd478c/vac/32/rln-v1.md) — Update and rename RLN-V1.md to rln-v1.md
|
||||
- **2024-01-27** — [`539575b`](https://github.com/logos-co/logos-lips/blob/539575b01ca2cff7e5e596ecae9a5c1fc035cd79/vac/32/RLN-V1.md) — Create RLN-V1.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
The following specification covers the RLN construct
|
||||
as well as some auxiliary libraries useful for interacting with it.
|
||||
Rate limiting nullifier (RLN) is a construct based on zero-knowledge proofs that
|
||||
provides an anonymous rate-limited signaling/messaging framework
|
||||
suitable for decentralized (and centralized) environments.
|
||||
Anonymity refers to the unlinkability of messages to their owner.
|
||||
|
||||
## Motivation
|
||||
|
||||
RLN guarantees a messaging rate is enforced cryptographically
|
||||
while preserving the anonymity of the message owners.
|
||||
A wide range of applications can benefit from RLN and
|
||||
provide desirable security features.
|
||||
For example,
|
||||
an e-voting system can integrate RLN to contain the voting rate while
|
||||
protecting the voters-vote unlinkability.
|
||||
Another use case is to protect an anonymous messaging system against DDoS and
|
||||
spam attacks by constraining messaging rate of users.
|
||||
This latter use case is explained in [17/WAKU2-RLN-RELAY RFC](../../../messaging/standards/core/17/rln-relay.md).
|
||||
|
||||
## Wire Format Specification
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”,
|
||||
“SHOULD NOT”, “RECOMMENDED”, “MAY”, and
|
||||
“OPTIONAL” in this document are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Flow
|
||||
|
||||
The users participate in the protocol by
|
||||
first registering to an application-defined group referred by the _membership group_.
|
||||
Registration to the group is mandatory for signaling in the application.
|
||||
After registration, group members can generate a zero-knowledge proof of membership
|
||||
for their signals and can participate in the application.
|
||||
Usually, the membership requires a financial or
|
||||
social stake which is beneficial for the prevention
|
||||
of inclusion of Sybils within the _membership group_.
|
||||
Group members are allowed to send one signal per external nullifier
|
||||
(an identifier that groups signals and can be thought of as a voting booth).
|
||||
If a user generates more signals than allowed,
|
||||
the user risks being slashed - by revealing his membership secret credentials.
|
||||
If the financial stake is put in place, the user also risks his stake being taken.
|
||||
|
||||
Generally the flow can be described by the following steps:
|
||||
|
||||
1. Registration
|
||||
2. Signaling
|
||||
3. Verification and slashing
|
||||
|
||||
### Registration
|
||||
|
||||
Depending on the application requirements,
|
||||
the registration can be implemented in different ways, for example:
|
||||
|
||||
- centralized registrations, by using a central server
|
||||
- decentralized registrations, by using a smart contract
|
||||
|
||||
The users' identity commitments
|
||||
(explained in section [User Identity](#user-identity)) are stored in a Merkle tree,
|
||||
and the users can obtain a Merkle proof proving that they are part of the group.
|
||||
|
||||
Also depending on the application requirements,
|
||||
usually a financial or social stake is introduced.
|
||||
An example for financial stake is:
|
||||
|
||||
For each registration a certain amount of ETH is required.
|
||||
An example for social stake is using [Interep](https://interep.link/) as a registry,
|
||||
users need to prove that they have a highly reputable social media account.
|
||||
|
||||
#### Implementation notes
|
||||
|
||||
##### User identity
|
||||
|
||||
The user's identity is composed of:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret: [identity_nullifier, identity_trapdoor],
|
||||
identity_secret_hash: poseidonHash(identity_secret),
|
||||
identity_commitment: poseidonHash([identity_secret_hash])
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
For registration, the user MUST submit their `identity_commitment`
|
||||
(along with any additional registration requirements) to the registry.
|
||||
Upon registration, they SHOULD receive `leaf_index` value
|
||||
which represents their position in the Merkle tree.
|
||||
Receiving a `leaf_index` is not a hard requirement and is application specific.
|
||||
The other way around is
|
||||
the users calculating the `leaf_index` themselves upon successful registration.
|
||||
|
||||
### Signaling
|
||||
|
||||
After registration,
|
||||
the users can participate in the application by
|
||||
sending signals to the other participants in a decentralised manner or
|
||||
to a centralised server.
|
||||
Along with their signal,
|
||||
they MUST generate a zero-knowledge proof by
|
||||
using the circuit with the specification described above.
|
||||
|
||||
For generating a proof,
|
||||
the users need to obtain the required parameters or compute them themselves,
|
||||
depending on the application implementation and
|
||||
client libraries supported by the application.
|
||||
For example,
|
||||
the users MAY store the membership Merkle tree on their end and
|
||||
generate a Merkle proof whenever they want to generate a signal.
|
||||
|
||||
#### Implementation Notes
|
||||
|
||||
##### Signal hash
|
||||
|
||||
The signal hash can be generated by hashing the raw signal (or content)
|
||||
using the `keccak256` hash function.
|
||||
|
||||
##### External nullifier
|
||||
|
||||
The external nullifier MUST be computed as the Poseidon hash of the current epoch
|
||||
(e.g. a value equal to or
|
||||
derived from the current UNIX timestamp divided by the epoch length)
|
||||
and the RLN identifier.
|
||||
|
||||
```js
|
||||
|
||||
external_nullifier = poseidonHash([epoch, rln_identifier]);
|
||||
|
||||
```
|
||||
|
||||
##### Obtaining Merkle proof
|
||||
|
||||
The Merkle proof SHOULD be obtained locally or from a trusted third party.
|
||||
By using the [incremental Merkle tree algorithm](https://github.com/appliedzkp/incrementalquintree/blob/master/ts/IncrementalQuinTree.ts),
|
||||
the Merkle can be obtained by providing the `leaf_index` of the `identity_commitment`.
|
||||
The proof (`Merkle_proof`) is composed of the following fields:
|
||||
|
||||
```js
|
||||
|
||||
{
|
||||
root: bigint,
|
||||
indices: number[],
|
||||
path_elements: bigint[][]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
1. **root** - The root of membership group Merkle tree
|
||||
at the time of publishing the message
|
||||
2. **indices** - The index fields of the leafs in the Merkle tree -
|
||||
used by the Merkle tree algorithm for verification
|
||||
3. **path_elements** - Auxiliary data structure used for storing the path
|
||||
to the leaf - used by the Merkle proof algorithm for verificaton
|
||||
|
||||
##### Generating proof
|
||||
|
||||
For proof generation,
|
||||
the user MUST submit the following fields to the circuit:
|
||||
|
||||
```js
|
||||
|
||||
{
|
||||
identity_secret: identity_secret_hash,
|
||||
path_elements: Merkle_proof.path_elements,
|
||||
identity_path_index: Merkle_proof.indices,
|
||||
x: signal_hash,
|
||||
external_nullifier: external_nullifier
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
##### Calculating output
|
||||
|
||||
The proof output is calculated locally,
|
||||
in order for the required fields for proof verification
|
||||
to be sent along with the proof.
|
||||
The proof output is composed of the `y` share of the secret equation and the `internal_nullifier`.
|
||||
The `internal_nullifier` represents a unique fingerprint of a user
|
||||
for a given `epoch` and app.
|
||||
The following fields are needed for proof output calculation:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret_hash: bigint,
|
||||
external_nullifier: bigint,
|
||||
x: bigint
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The output `[y, internal_nullifier]` is calculated in the following way:
|
||||
|
||||
```js
|
||||
|
||||
a_0 = identity_secret_hash;
|
||||
a_1 = poseidonHash([a0, external_nullifier]);
|
||||
|
||||
y = a_0 + x * a_1;
|
||||
|
||||
internal_nullifier = poseidonHash([a_1]);
|
||||
|
||||
```
|
||||
|
||||
It relies on the properties of the [Shamir's Secret sharing scheme](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing).
|
||||
|
||||
##### Sending the output message
|
||||
|
||||
The user's output message (`output_message`),
|
||||
containing the signal SHOULD contain the following fields at minimum:
|
||||
|
||||
```js
|
||||
|
||||
{
|
||||
signal: signal, # non-hashed signal,
|
||||
proof: zk_proof,
|
||||
internal_nullifier: internal_nullifier,
|
||||
x: x, # signal_hash,
|
||||
y: y,
|
||||
rln_identifier: rln_identifier
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Additionally depending on the application,
|
||||
the following fields MAY be required:
|
||||
|
||||
```js
|
||||
|
||||
{
|
||||
root: Merkle_proof.root,
|
||||
epoch: epoch
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Verification and slashing
|
||||
|
||||
The slashing implementation is dependent on the type of application.
|
||||
If the application is implemented in a centralised manner,
|
||||
and everything is stored on a single server,
|
||||
the slashing will be implemented only on the server.
|
||||
Otherwise if the application is distributed,
|
||||
the slashing will be implemented on each user's client.
|
||||
|
||||
#### Notes from Implementation
|
||||
|
||||
Each user of the protocol
|
||||
(server or otherwise) MUST store metadata for each message received by each user,
|
||||
for the given `epoch`.
|
||||
The data can be deleted when the `epoch` passes.
|
||||
Storing metadata is REQUIRED,
|
||||
so that if a user sends more than one unique signal per `epoch`,
|
||||
they can be slashed and removed from the protocol.
|
||||
The metadata stored contains the `x`, `y` shares and
|
||||
the `internal_nullifier` for the user for each message.
|
||||
If enough such shares are present, the user's secret can be retreived.
|
||||
|
||||
One way of storing received metadata (`messaging_metadata`) is the following format:
|
||||
|
||||
```js
|
||||
|
||||
{
|
||||
[external_nullifier]: {
|
||||
[internal_nullifier]: {
|
||||
x_shares: [],
|
||||
y_shares: []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
##### Verification
|
||||
|
||||
The output message verification consists of the following steps:
|
||||
|
||||
- `external_nullifier` correctness
|
||||
- non-duplicate message check
|
||||
- `zk_proof` zero-knowledge proof verification
|
||||
- spam verification
|
||||
|
||||
**1. `external_nullifier` correctness**
|
||||
Upon received `output_message`,
|
||||
first the `epoch` and `rln_identifier` fields are checked,
|
||||
to ensure that the message matches the current `external_nullifier`.
|
||||
If the `external_nullifier` is correct the verification continues, otherwise,
|
||||
the message is discarded.
|
||||
|
||||
**2. non-duplicate message check**
|
||||
The received message is checked to ensure it is not duplicate.
|
||||
The duplicate message check is performed by verifying that the `x` and `y`
|
||||
fields do not exist in the `messaging_metadata` object.
|
||||
If the `x` and `y` fields exist in the `x_shares` and
|
||||
`y_shares` array for the `external_nullifier` and
|
||||
the `internal_nullifier` the message can be considered as a duplicate.
|
||||
Duplicate messages are discarded.
|
||||
|
||||
**3. `zk_proof` verification**
|
||||
|
||||
The `zk_proof` SHOULD be verified by providing the `zk_proof` field
|
||||
to the circuit verifier along with the `public_signal`:
|
||||
|
||||
```js
|
||||
|
||||
[
|
||||
y,
|
||||
Merkle_proof.root,
|
||||
internal_nullifier,
|
||||
x, # signal_hash
|
||||
external_nullifier
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
If the proof verification is correct,
|
||||
the verification continues, otherwise the message is discarded.
|
||||
|
||||
**4. Double signaling verification**
|
||||
After the proof is verified the `x`, and
|
||||
`y` fields are added to the `x_shares` and `y_shares`
|
||||
arrays of the `messaging_metadata` `external_nullifier` and
|
||||
`internal_nullifier` object.
|
||||
If the length of the arrays is equal to the signaling threshold (`limit`),
|
||||
the user can be slashed.
|
||||
|
||||
##### Slashing
|
||||
|
||||
After the verification,
|
||||
the user SHOULD be slashed if two different shares are present
|
||||
to reconstruct their `identity_secret_hash` from `x_shares` and
|
||||
`y_shares` fields, for their `internal_nullifier`.
|
||||
The secret can be retreived by the properties of the Shamir's secret sharing scheme.
|
||||
In particular the secret (`a_0`) can be retrieved by computing [Lagrange polynomials](https://en.wikipedia.org/wiki/Lagrange_polynomial).
|
||||
|
||||
After the secret is retreived,
|
||||
the user's `identity_commitment` SHOULD be generated from the secret and
|
||||
it can be used for removing the user from the membership Merkle tree
|
||||
(zeroing out the leaf that contains the user's `identity_commitment`).
|
||||
Additionally, depending on the application the `identity_secret_hash`
|
||||
MAY be used for taking the user's provided stake.
|
||||
|
||||
### Technical overview
|
||||
|
||||
The main RLN construct is implemented using a
|
||||
[ZK-SNARK](https://z.cash/technology/zksnarks/) circuit.
|
||||
However, it is helpful to describe
|
||||
the other necessary outside components for interaction with the circuit,
|
||||
which together with the ZK-SNARK circuit enable the above mentioned features.
|
||||
|
||||
#### Terminology
|
||||
|
||||
| Term | Description |
|
||||
|---------------------------|-------------------------------------------------------------------------------------|
|
||||
| **ZK-SNARK** | [zksnarks](https://z.cash/technology/zksnarks/) |
|
||||
| **Stake** | Financial or social stake required for registering in the RLN applications. Common stake examples are: locking cryptocurrency (financial), linking reputable social identity. |
|
||||
| **Identity secret** | An array of two unique random components (identity nullifier and identity trapdoor), which must be kept private by the user. Secret hash and identity commitment are derived from this array. |
|
||||
| **Identity nullifier** | Random 32 byte value used as component for identity secret generation. |
|
||||
| **Identity trapdoor** | Random 32 byte value used as component for identity secret generation. |
|
||||
| **Identity secret hash** | The hash of the identity secret, obtained using the Poseidon hash function. It is used for deriving the identity commitment of the user, and as a private input for zero-knowledge proof generation. The secret hash should be kept private by the user. |
|
||||
| **Identity commitment** | Hash obtained from the `Identity secret hash` by using the poseidon hash function. It is used by the users for registering in the protocol. |
|
||||
| **Signal** | The message generated by a user. It is an arbitrary bit string that may represent a chat message, a URL request, protobuf message, etc. |
|
||||
| **Signal hash** | Keccak256 hash of the signal modulo circuit's field characteristic, used as an input in the RLN circuit. |
|
||||
| **RLN Identifier** | Random finite field value unique per RLN app. It is used for additional cross-application security. The role of the RLN identifier is protection of the user secrets from being compromised when signals are being generated with the same credentials in different apps. |
|
||||
| **RLN membership tree** | Merkle tree data structure, filled with identity commitments of the users. Serves as a data structure that ensures user registrations. |
|
||||
| **Merkle proof** | Proof that a user is member of the RLN membership tree. |
|
||||
|
||||
#### RLN Zero-Knowledge Circuit specific terms
|
||||
|
||||
| Term | Description |
|
||||
|---------------------------|-------------------------------------------------------------------------------------|
|
||||
| **x** | Keccak hash of the signal, same as signal hash (Defined above). |
|
||||
| **A0** | The identity secret hash. |
|
||||
| **A1** | Poseidon hash of [A0, External nullifier] (see about External nullifier below). |
|
||||
| **y** | The result of the polynomial equation (y = a0 + a1*x). The public output of the circuit. |
|
||||
| **External nullifier** | Poseidon hash of [Epoch, RLN Identifier]. An identifier that groups signals and can be thought of as a voting booth. |
|
||||
| **Internal nullifier** | Poseidon hash of [A1]. This field ensures that a user can send only one valid signal per external nullifier without risking being slashed. Public input of the circuit. |
|
||||
|
||||
#### Zero-Knowledge Circuits specification
|
||||
|
||||
Anonymous signaling with a controlled rate limit
|
||||
is enabled by proving that the user is part of a group
|
||||
which has high barriers to entry (form of stake) and
|
||||
enabling secret reveal if more than 1 unique signal is produced per external nullifier.
|
||||
The membership part is implemented using
|
||||
membership [Merkle trees](https://en.wikipedia.org/wiki/Merkle_tree) and Merkle proofs,
|
||||
while the secret reveal part is enabled by using the Shamir's Secret Sharing scheme.
|
||||
Essentially the protocol requires the users to generate zero-knowledge proof
|
||||
to be able to send signals and
|
||||
participate in the application.
|
||||
The zero knowledge proof proves that the user is member of a group,
|
||||
but also enforces the user to share part of their secret
|
||||
for each signal in an external nullifier.
|
||||
The external nullifier is usually represented by timestamp or a time interval.
|
||||
It can also be thought of as a voting booth in voting applications.
|
||||
|
||||
The zero-knowledge Circuit is implemented using a [Groth-16 ZK-SNARK](https://eprint.iacr.org/2016/260.pdf),
|
||||
using the [circomlib](https://docs.circom.io/) library.
|
||||
|
||||
##### System parameters
|
||||
|
||||
- `DEPTH` - Merkle tree depth
|
||||
|
||||
##### Circuit parameters
|
||||
|
||||
###### Public Inputs
|
||||
|
||||
- `x`
|
||||
- `external_nullifier`
|
||||
|
||||
###### Private Inputs
|
||||
|
||||
- `identity_secret_hash`
|
||||
- `path_elements` - rln membership proof component
|
||||
- `identity_path_index` - rln membership proof component
|
||||
|
||||
###### Outputs
|
||||
|
||||
- `y`
|
||||
- `root` - the rln membership tree root
|
||||
- `internal_nullifier`
|
||||
|
||||
##### Hash function
|
||||
|
||||
Canonical [Poseidon hash implementation](https://eprint.iacr.org/2019/458.pdf)
|
||||
is used,
|
||||
as implemented in the [circomlib library](https://github.com/iden3/circomlib/blob/master/circuits/poseidon.circom),
|
||||
according to the Poseidon paper.
|
||||
This Poseidon hash version (canonical implementation) uses the following parameters:
|
||||
|
||||
| Hash inputs | `t` | `RF` | `RP`|
|
||||
|:---:|:---:|:---:|:---:|
|
||||
|1 | 2 | 8 | 56|
|
||||
|2 | 3 | 8 | 57|
|
||||
|3 | 4 | 8 | 56|
|
||||
|4 | 5 | 8 | 60|
|
||||
|5 | 6 | 8 | 60|
|
||||
|6 | 7 | 8 | 63|
|
||||
|7 | 8 | 8 | 64|
|
||||
|8 | 9 | 8 | 63|
|
||||
|
||||
##### Membership implementation
|
||||
|
||||
For a valid signal, a user's `identity_commitment`
|
||||
(more on identity commitments below) must exist in identity membership tree.
|
||||
Membership is proven by providing a membership proof (witness).
|
||||
The fields from the membership proof REQUIRED for the verification are:
|
||||
`path_elements` and `identity_path_index`.
|
||||
|
||||
[IncrementalQuinTree](https://github.com/appliedzkp/incrementalquintree)
|
||||
algorithm is used for constructing the Membership Merkle tree.
|
||||
The circuits are reused from this repository.
|
||||
You can find out more details about the IncrementalQuinTree algorithm [here](https://ethresear.ch/t/gas-and-circuit-constraint-benchmarks-of-binary-and-quinary-incremental-Merkle-trees-using-the-poseidon-hash-function/7446).
|
||||
|
||||
#### Slashing and Shamir's Secret Sharing
|
||||
|
||||
Slashing is enabled by using polynomials and [Shamir's Secret sharing](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing).
|
||||
In order to produce a valid proof,
|
||||
`identity_secret_hash` as a private input to the circuit.
|
||||
Then a secret equation is created in the form of:
|
||||
|
||||
```js
|
||||
|
||||
y = a_0 + x * a_1;
|
||||
|
||||
```
|
||||
|
||||
where `a_0` is the `identity_secret_hash` and `a_1 = hash(a_0, external nullifier)`.
|
||||
Along with the generated proof,
|
||||
the users MUST provide a `(x, y)` share which satisfies the line equation,
|
||||
in order for their proof to be verified.
|
||||
`x` is the hashed signal, while the `y` is the circuit output.
|
||||
With more than one pair of unique shares, anyone can derive `a_0`, i.e. the `identity_secret_hash`.
|
||||
The hash of a signal will be the evaluation point `x`.
|
||||
In this way,
|
||||
a member who sends more than one unique signal per `external_nullifier`
|
||||
risks their identity secret being revealed.
|
||||
|
||||
Note that shares used in different epochs and
|
||||
different RLN apps cannot be used to derive the `identity_secret_hash`.
|
||||
|
||||
Thanks to the `external_nullifier` definition,
|
||||
also shares computed from same secret within same epoch but
|
||||
in different RLN apps cannot be used to derive the identity secret hash.
|
||||
|
||||
The `rln_identifier` is a random value from a finite field, unique per RLN app,
|
||||
and is used for additional cross-application security -
|
||||
to protect the user secrets being compromised if they use
|
||||
the same credentials accross different RLN apps.
|
||||
If `rln_identifier` is not present,
|
||||
the user uses the same credentials and
|
||||
sends a different message for two different RLN apps using the same `external_nullifier`,
|
||||
then their user signals can be grouped by the `internal_nullifier`
|
||||
which could lead the user's secret revealed.
|
||||
This is because two separate signals under the same `internal_nullifier`
|
||||
can be treated as rate limiting violation.
|
||||
With adding the `rln_identifier` field we obscure the `internal_nullifier`,
|
||||
so this kind of attack can be hardened because
|
||||
we don't have the same `internal_nullifier` anymore.
|
||||
|
||||
#### Identity credentials generation
|
||||
|
||||
In order to be able to generate valid proofs,
|
||||
the users MUST be part of the identity membership Merkle tree.
|
||||
They are part of the identity membership Merkle tree if
|
||||
their `identity_commitment` is placed in a leaf in the tree.
|
||||
|
||||
The identity credentials of a user are composed of:
|
||||
|
||||
- `identity_secret`
|
||||
- `identity_secret_hash`
|
||||
- `identity_commitment`
|
||||
|
||||
##### `identity_secret`
|
||||
|
||||
The `identity_secret` is generated in the following way:
|
||||
|
||||
```js
|
||||
|
||||
identity_nullifier = random_32_byte_buffer;
|
||||
identity_trapdoor = random_32_byte_buffer;
|
||||
identity_secret = [identity_nullifier, identity_trapdoor];
|
||||
|
||||
```
|
||||
|
||||
The same secret SHOULD NOT be used accross different protocols,
|
||||
because revealing the secret at one protocol
|
||||
could break privacy for the user in the other protocols.
|
||||
|
||||
##### `identity_secret_hash`
|
||||
|
||||
The `identity_secret_hash` is generated by obtaining a Poseidon hash
|
||||
of the `identity_secret` array:
|
||||
|
||||
```js
|
||||
|
||||
identity_secret_hash = poseidonHash(identity_secret);
|
||||
|
||||
```
|
||||
|
||||
##### `identity_commitment`
|
||||
|
||||
The `identity_commitment` is generated by obtaining a Poseidon hash of the `identity_secret_hash`:
|
||||
|
||||
```js
|
||||
|
||||
identity_commitment = poseidonHash([identity_secret_hash]);
|
||||
|
||||
```
|
||||
|
||||
### Appendix A: Security Considerations
|
||||
|
||||
RLN is an experimental and still un-audited technology.
|
||||
This means that the circuits have not been yet audited.
|
||||
Another consideration is the security of the underlying primitives.
|
||||
zk-SNARKS require a trusted setup for generating a prover and verifier keys.
|
||||
The standard for this is to use trusted
|
||||
[Multi-Party Computation (MPC)](https://en.wikipedia.org/wiki/Secure_multi-party_computation)
|
||||
ceremony, which requires two phases.
|
||||
Trusted MPC ceremony has not yet been performed for the RLN circuits.
|
||||
|
||||
#### SSS Security Assumptions
|
||||
|
||||
Shamir-Secret Sharing requires polynomial coefficients
|
||||
to be independent of each other.
|
||||
However, `a_1` depends on `a_0` through the Poseidon hash algorithm.
|
||||
Due to the design of Poseidon,
|
||||
it is possible to
|
||||
[attack](https://github.com/Rate-Limiting-Nullifier/rln-circuits/pull/7#issuecomment-1416085627)
|
||||
the protocol.
|
||||
It was decided _not_ to change the circuits design,
|
||||
since at the moment the attack is infeasible.
|
||||
Therefore, implementers must be aware that the current version
|
||||
provides approximately 160-bit security and not 254.
|
||||
Possible improvements:
|
||||
|
||||
- [change the circuit](https://github.com/Rate-Limiting-Nullifier/rln-circuits/pull/7#issuecomment-1416085627)
|
||||
to make coefficients independent;
|
||||
- switch to other hash function (Keccak, SHA);
|
||||
|
||||
### Appendix B: Identity Scheme Choice
|
||||
|
||||
The hashing scheme used is based on the design decisions
|
||||
which also include the Semaphore circuits.
|
||||
Our goal was to ensure compatibility of the secrets for apps that use Semaphore and
|
||||
RLN circuits while also not compromising on security because of using the same secrets.
|
||||
|
||||
For example, let's say there is a voting app that uses Semaphore,
|
||||
and also a chat app that uses RLN.
|
||||
The UX would be better if
|
||||
the users would not need to care about complicated identity management
|
||||
(secrets and commitments) they use for each app,
|
||||
and it would be much better if they could use a single id commitment for this.
|
||||
Also in some cases these kind of dependency is required -
|
||||
RLN chat app using Interep as a registry (instead of using financial stake).
|
||||
One potential concern about this interoperability is a slashed user
|
||||
on the RLN app side having their security compromised
|
||||
on the semaphore side apps as well.
|
||||
i.e. obtaining the user's secret,
|
||||
anyone would be able to generate valid semaphore proofs as the slashed user.
|
||||
We don't want that,
|
||||
and we should keep user's app specific security threats
|
||||
in the domain of that app alone.
|
||||
|
||||
To achieve the above interoperability UX
|
||||
while preventing the shared app security model
|
||||
(i.e slashing user on an RLN app having impact on Semaphore apps),
|
||||
we had to do the follow in regard the identity secret and identity commitment:
|
||||
|
||||
```js
|
||||
|
||||
identity_secret = [identity_nullifier, identity_trapdoor];
|
||||
identity_secret_hash = poseidonHash(identity_secret);
|
||||
identity_commitment = poseidonHash([identity_secret_hash]);
|
||||
|
||||
```
|
||||
|
||||
Secret components for generating Semaphore proof:
|
||||
|
||||
- `identity_nullifier`
|
||||
- `identity_trapdoor`
|
||||
|
||||
Secret components for generting RLN proof:
|
||||
|
||||
- `identity_secret_hash`
|
||||
|
||||
When a user is slashed on the RLN app side, their `identity_secret_hash` is revealed.
|
||||
However, a semaphore proof can't be generated because
|
||||
we do not know the user's `identity_nullifier` and `identity_trapdoor`.
|
||||
|
||||
With this design we achieve:
|
||||
|
||||
`identity_commitment` (Semaphore) == `identity_commitment` (RLN)
|
||||
secret (semaphore) != secret (RLN).
|
||||
|
||||
This is the only option we had for the scheme
|
||||
in order to satisfy the properties described above.
|
||||
|
||||
Also, for RLN we do a single secret component input for the circuit.
|
||||
Thus we need to hash the secret array (two components) to a secret hash,
|
||||
and we use that as a secret component input.
|
||||
|
||||
### Appendix C: Auxiliary Tooling
|
||||
|
||||
There are few additional tools implemented for easier integrations and
|
||||
usage of the RLN protocol.
|
||||
|
||||
[`zerokit`](https://github.com/vacp2p/zerokit) is a set of Zero Knowledge modules,
|
||||
written in Rust and designed to be used in many different environments.
|
||||
Among different modules, it supports `Semaphore` and `RLN`.
|
||||
|
||||
[`zk-kit`](https://github.com/appliedzkp/zk-kit)
|
||||
is a typescript library which exposes APIs for identity credentials generation,
|
||||
as well as proof generation.
|
||||
It supports various protocols (`Semaphore`, `RLN`).
|
||||
|
||||
[`zk-keeper`](https://github.com/akinovak/zk-keeper)
|
||||
is a browser plugin which allows for safe credential storing and
|
||||
proof generation.
|
||||
You can think of MetaMask for zero-knowledge proofs.
|
||||
It uses `zk-kit` under the hood.
|
||||
|
||||
### Appendix D: Example Usage
|
||||
|
||||
The following examples are code snippets using the `zerokit` RLN module.
|
||||
The examples are written in [rust](https://www.rust-lang.org/).
|
||||
|
||||
#### Creating a RLN Object
|
||||
|
||||
```rust
|
||||
|
||||
use rln::protocol::*;
|
||||
use rln::public::*;
|
||||
use std::io::Cursor;
|
||||
// We set the RLN parameters:
|
||||
// - the tree height;
|
||||
// - the circuit resource folder (requires a trailing "/").
|
||||
let tree_height = 20;
|
||||
let resources = Cursor::new("../zerokit/rln/resources/tree_height_20/");
|
||||
// We create a new RLN instance
|
||||
let mut rln = RLN::new(tree_height, resources);
|
||||
|
||||
```
|
||||
|
||||
#### Generating Identity Credentials
|
||||
|
||||
```rust
|
||||
|
||||
// We generate an identity tuple
|
||||
let mut buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.extended_key_gen(&mut buffer).unwrap();
|
||||
// We deserialize the keygen output to obtain
|
||||
// the identiy_secret and id_commitment
|
||||
let (identity_trapdoor, identity_nullifier, identity_secret_hash, id_commitment) = deserialize_identity_tuple(buffer.into_inner());
|
||||
|
||||
```
|
||||
|
||||
#### Adding ID Commitment to the RLN Merkle Tree
|
||||
|
||||
```rust
|
||||
|
||||
// We define the tree index where id_commitment will be added
|
||||
let id_index = 10;
|
||||
// We serialize id_commitment and pass it to set_leaf
|
||||
let mut buffer = Cursor::new(serialize_field_element(id_commitment));
|
||||
rln.set_leaf(id_index, &mut buffer).unwrap();
|
||||
|
||||
```
|
||||
|
||||
#### Setting Epoch and Signal
|
||||
|
||||
```rust
|
||||
|
||||
// We generate epoch from a date seed and we ensure is
|
||||
// mapped to a field element by hashing-to-field its content
|
||||
let epoch = hash_to_field(b"Today at noon, this year");
|
||||
// We set our signal
|
||||
let signal = b"RLN is awesome";
|
||||
|
||||
```
|
||||
|
||||
#### Generating Proof
|
||||
|
||||
```rust
|
||||
|
||||
// We prepare input to the proof generation routine
|
||||
let proof_input = prepare_prove_input(identity_secret, id_index, epoch, signal);
|
||||
// We generate a RLN proof for proof_input
|
||||
let mut in_buffer = Cursor::new(proof_input);
|
||||
let mut out_buffer = Cursor::new(Vec::<u8>::new());
|
||||
rln.generate_rln_proof(&mut in_buffer, &mut out_buffer)
|
||||
.unwrap();
|
||||
// We get the public outputs returned by the circuit evaluation
|
||||
let proof_data = out_buffer.into_inner();
|
||||
|
||||
```
|
||||
|
||||
#### Verifiying Proof
|
||||
|
||||
```rust
|
||||
|
||||
// We prepare input to the proof verification routine
|
||||
let verify_data = prepare_verify_input(proof_data, signal);
|
||||
// We verify the zero-knowledge proof against the provided proof values
|
||||
let mut in_buffer = Cursor::new(verify_data);
|
||||
let verified = rln.verify(&mut in_buffer).unwrap();
|
||||
// We ensure the proof is valid
|
||||
assert!(verified);
|
||||
|
||||
```
|
||||
|
||||
For more details please visit the
|
||||
[`zerokit`](https://github.com/vacp2p/zerokit) library.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)
|
||||
|
||||
## References
|
||||
|
||||
- [17/WAKU2-RLN-RELAY RFC](../../../messaging/standards/core/17/rln-relay.md)
|
||||
- [Interep](https://interep.link/)
|
||||
- [incremental Merkle tree algorithm](https://github.com/appliedzkp/incrementalquintree/blob/master/ts/IncrementalQuinTree.ts)
|
||||
- [Shamir's Secret sharing scheme](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing)
|
||||
- [Lagrange polynomials](https://en.wikipedia.org/wiki/Lagrange_polynomial)
|
||||
- [ZK-SNARK](https://z.cash/technology/zksnarks/)
|
||||
- [Merkle trees](https://en.wikipedia.org/wiki/Merkle_tree)
|
||||
- [Groth-16 ZK-SNARK](https://eprint.iacr.org/2016/260.pdf)
|
||||
- [circomlib](https://docs.circom.io/)
|
||||
- [Poseidon hash implementation](https://eprint.iacr.org/2019/458.pdf)
|
||||
- [circomlib library](https://github.com/iden3/circomlib/blob/master/circuits/poseidon.circom)
|
||||
- [IncrementalQuinTree](https://github.com/appliedzkp/incrementalquintree)
|
||||
- [IncrementalQuinTree algorithm](https://ethresear.ch/t/gas-and-circuit-constraint-benchmarks-of-binary-and-quinary-incremental-Merkle-trees-using-the-poseidon-hash-function/7446)
|
||||
- [Multi-Party Computation (MPC)](https://en.wikipedia.org/wiki/Secure_multi-party_computation)
|
||||
- [Poseidon hash attack](https://github.com/Rate-Limiting-Nullifier/rln-circuits/pull/7#issuecomment-1416085627)
|
||||
- [zerokit](https://github.com/vacp2p/zerokit)
|
||||
- [zk-kit](https://github.com/appliedzkp/zk-kit)
|
||||
- [zk-keeper](https://github.com/akinovak/zk-keeper)
|
||||
- [rust](https://www.rust-lang.org/)
|
||||
|
||||
### Informative
|
||||
|
||||
- [1] [privacy-scaling-explorations](https://medium.com/privacy-scaling-explorations/rate-limiting-nullifier-a-spam-protection-mechanism-for-anonymous-environments-bbe4006a57d)
|
||||
- [2] [security-considerations-of-zk-snark-parameter-multi-party-computation](https://research.nccgroup.com/2020/06/24/)security-considerations-of-zk-snark-parameter-multi-party-computation/
|
||||
- [3] [rln-circuits](https://github.com/Rate-Limiting-Nullifier/rln-circuits/)
|
||||
- [4] [rln docs](https://rate-limiting-nullifier.github.io/rln-docs/)
|
||||
@@ -1,118 +0,0 @@
|
||||
# 4/MVDS-META
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | MVDS Metadata Field |
|
||||
| Slug | 4 |
|
||||
| Status | draft |
|
||||
| Category | Standards Track |
|
||||
| Editor | Sanaz Taheri <sanaz@status.im> |
|
||||
| Contributors | Dean Eigenmann <dean@status.im>, Andrea Maria Piana <andreap@status.im>, Oskar Thorén <oskarth@titanproxy.com> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/4/mvds-meta.md) — chore: add math support (#287)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/4/mvds-meta.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/4/mvds-meta.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/4/mvds-meta.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/4/mvds-meta.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/4/mvds-meta.md) — Fix Files for Linting (#94)
|
||||
- **2024-02-01** — [`3a396b5`](https://github.com/logos-co/logos-lips/blob/3a396b5fb111e73750046afb2ca10d0c28e72e83/vac/4/mvds-meta.md) — Update and rename README.md to mvds-meta.md
|
||||
- **2024-01-30** — [`2e80c3b`](https://github.com/logos-co/logos-lips/blob/2e80c3bb3dc69c45fb7a932bbfaedded3f116f71/vac/4/README.md) — Create README.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
In this specification, we describe a method to construct message history that
|
||||
will aid the consistency guarantees of [2/MVDS](../2/mvds.md).
|
||||
Additionally,
|
||||
we explain how data sync can be used for more lightweight messages that
|
||||
do not require full synchronization.
|
||||
|
||||
## Motivation
|
||||
|
||||
In order for more efficient synchronization of conversational messages,
|
||||
information should be provided allowing a node to more effectively synchronize
|
||||
the dependencies for any given message.
|
||||
|
||||
## Format
|
||||
|
||||
We introduce the metadata message which is used to convey information about a message
|
||||
and how it SHOULD be handled.
|
||||
|
||||
```protobuf
|
||||
package vac.mvds;
|
||||
|
||||
message Metadata {
|
||||
repeated bytes parents = 1;
|
||||
bool ephemeral = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Nodes MAY transmit a `Metadata` message by extending the MVDS [message](../2/mvds.md/#payloads)
|
||||
with a `metadata` field.
|
||||
|
||||
```diff
|
||||
message Message {
|
||||
bytes group_id = 6001;
|
||||
int64 timestamp = 6002;
|
||||
bytes body = 6003;
|
||||
+ Metadata metadata = 6004;
|
||||
}
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
| Name | Description |
|
||||
| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `parents` | list of parent [`message identifier`s](../2/mvds.md/#payloads) for the specific message. |
|
||||
| `ephemeral` | indicates whether a message is ephemeral or not. |
|
||||
|
||||
## Usage
|
||||
|
||||
### `parents`
|
||||
|
||||
This field contains a list of parent [`message identifier`s](../2/mvds.md/#payloads)
|
||||
for the specific message.
|
||||
It MUST NOT contain any messages as parent whose `ack` flag was set to `false`.
|
||||
This establishes a directed acyclic graph (DAG)[^2] of persistent messages.
|
||||
|
||||
Nodes MAY buffer messages until dependencies are satisfied for causal consistency[^3],
|
||||
they MAY also pass the messages straight away for eventual consistency[^4].
|
||||
|
||||
A parent is any message before a new message that
|
||||
a node is aware of that has no children.
|
||||
|
||||
The number of parents for a given message is bound by [0, N],
|
||||
where N is the number of nodes participating in the conversation,
|
||||
therefore the space requirements for the `parents` field is O(N).
|
||||
|
||||
If a message has no parents it is considered a root.
|
||||
There can be multiple roots, which might be disconnected,
|
||||
giving rise to multiple DAGs.
|
||||
|
||||
### `ephemeral`
|
||||
|
||||
When the `ephemeral` flag is set to `false`,
|
||||
a node MUST send an acknowledgment when they have received and processed a message.
|
||||
If it is set to `true`, it SHOULD NOT send any acknowledgment.
|
||||
The flag is `false` by default.
|
||||
|
||||
Nodes MAY decide to not persist ephemeral messages,
|
||||
however they MUST NOT be shared as part of the message history.
|
||||
|
||||
Nodes SHOULD send ephemeral messages in batch mode.
|
||||
As their delivery is not needed to be guaranteed.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## Footnotes
|
||||
|
||||
1: [2/MVDS](../2/mvds.md)
|
||||
2: [directed_acyclic_graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph)
|
||||
3: Jepsen. [Causal Consistency](https://jepsen.io/consistency/models/causal)
|
||||
Jepsen, LLC.
|
||||
4: <https://en.wikipedia.org/wiki/Eventual_consistency>
|
||||
@@ -1,4 +0,0 @@
|
||||
# IFT-TS Raw Specifications
|
||||
|
||||
All IFT-TS specifications that have not reached **draft** status will live in this repository.
|
||||
To learn more about **raw** specifications, take a look at [1/COSS](1/coss.md).
|
||||
@@ -1,267 +0,0 @@
|
||||
# HASHGRAPHLIKE CONSENSUS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Hashgraphlike Consensus Protocol |
|
||||
| Slug | 73 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Ugur Sen [ugur@status.im](mailto:ugur@status.im) |
|
||||
| Contributors | seemenkina [ekaterina@status.im](mailto:ekaterina@status.im) |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/consensus-hashgraphlike.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/consensus-hashgraphlike.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/consensus-hashgraphlike.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/consensus-hashgraphlike.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/consensus-hashgraphlike.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-09-15** — [`f051117`](https://github.com/logos-co/logos-lips/blob/f051117d3782f66d773aaf27845c60a066ac10b6/vac/raw/consensus-hashgraphlike.md) — VAC-RAW/Consensus-hashgraphlike RFC (#142)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies a scalable, decentralized, and Byzantine Fault Tolerant (BFT)
|
||||
consensus mechanism inspired by Hashgraph, designed for binary decision-making in P2P networks.
|
||||
|
||||
## Motivation
|
||||
|
||||
Consensus is one of the essential components of decentralization.
|
||||
In particular, in the decentralized group messaging application is used for
|
||||
binary decision-making to govern the group.
|
||||
Therefore, each user contributes to the decision-making process.
|
||||
Besides achieving decentralization, the consensus mechanism MUST be strong:
|
||||
|
||||
- Under the assumption of at least `2/3` honest users in the network.
|
||||
|
||||
- Each user MUST conclude the same decision and scalability:
|
||||
message propagation in the network MUST occur within `O(log n)` rounds,
|
||||
where `n` is the total number of peers,
|
||||
in order to preserve the scalability of the messaging application.
|
||||
|
||||
## Format Specification
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”,
|
||||
“SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document
|
||||
are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Flow
|
||||
|
||||
Any user in the group initializes the consensus by creating a proposal.
|
||||
Next, the user broadcasts the proposal to the whole network.
|
||||
Upon each user receives the proposal, validates the proposal,
|
||||
adds its vote as yes or no and with its signature and timestamp.
|
||||
The user then sends the proposal and vote to a random peer in a P2P setup,
|
||||
or to a subscribed gossipsub channel if gossip-based messaging is used.
|
||||
Therefore, each user first validates the signature and then adds its new vote.
|
||||
Each sending message counts as a round.
|
||||
After `log(n)` rounds all users in the network have the others vote
|
||||
if at least `2/3` number of users are honest where honesty follows the protocol.
|
||||
|
||||
In general, the voting-based consensus consists of the following phases:
|
||||
|
||||
1. Initialization of voting
|
||||
2. Exchanging votes across the rounds
|
||||
3. Counting the votes
|
||||
|
||||
### Assumptions
|
||||
|
||||
- The users in the P2P network can discover the nodes or they are subscribing same channel in a gossipsub.
|
||||
- We MAY have non-reliable (silent) nodes.
|
||||
- Proposal owners MUST know the number of voters.
|
||||
|
||||
## 1. Initialization of voting
|
||||
|
||||
A user initializes the voting with the proposal payload which is
|
||||
implemented using [protocol buffers v3](https://protobuf.dev/) as follows:
|
||||
|
||||
```bash
|
||||
syntax = "proto3";
|
||||
|
||||
package vac.voting;
|
||||
|
||||
message Proposal {
|
||||
string name = 10; // Proposal name
|
||||
string payload = 11; // Proposal description
|
||||
uint32 proposal_id = 12; // Unique identifier of the proposal
|
||||
bytes proposal_owner = 13; // Public key of the creator
|
||||
repeated Vote votes = 14; // Vote list in the proposal
|
||||
uint32 expected_voters_count = 15; // Maximum number of distinct voters
|
||||
uint32 round = 16; // Number of rounds
|
||||
uint64 timestamp = 17; // Creation time of proposal
|
||||
uint64 expiration_timestamp = 18; // The timestamp at which the proposal becomes outdated
|
||||
bool liveness_criteria_yes = 19; // Shows how managing the silent peers vote
|
||||
}
|
||||
|
||||
message Vote {
|
||||
uint32 vote_id = 20; // Unique identifier of the vote
|
||||
bytes vote_owner = 21; // Voter's public key
|
||||
uint32 proposal_id = 22; // Linking votes and proposals
|
||||
uint64 timestamp = 23; // Time when the vote was cast
|
||||
bool vote = 24; // Vote bool value (true/false)
|
||||
bytes parent_hash = 25; // Hash of previous owner's Vote
|
||||
bytes received_hash = 26; // Hash of previous received Vote
|
||||
bytes vote_hash = 27; // Hash of all previously defined fields in Vote
|
||||
bytes signature = 28; // Signature of vote_hash
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To initiate a consensus for a proposal,
|
||||
a user MUST complete all the fields in the proposal, including attaching its `vote`
|
||||
and the `payload` that shows the purpose of the proposal.
|
||||
Notably, `parent_hash` and `received_hash` are empty strings because there is no previous or received hash.
|
||||
Then the initialization section ends when the user who creates the proposal sends it
|
||||
to the random peer from the network or sends it to the proposal to the specific channel.
|
||||
|
||||
## 2. Exchanging votes across the peers
|
||||
|
||||
Once the peer receives the proposal message `P_1` from a 1-1 or a gossipsub channel does the following checks:
|
||||
|
||||
1. Check the signatures of the each votes in proposal, in particular for proposal `P_1`,
|
||||
verify the signature of `V_1` where `V_1 = P_1.votes[0]` with `V_1.signature` and `V_1.vote_owner`
|
||||
2. Do `parent_hash` check: If there are repeated votes from the same sender,
|
||||
check that the hash of the former vote is equal to the `parent_hash` of the later vote.
|
||||
3. Do `received_hash` check: If there are multiple votes in a proposal, check that the hash of a vote is equal to the `received_hash` of the next one.
|
||||
4. After successful verification of the signature and hashes, the receiving peer proceeds to generate `P_2` containing a new vote `V_2` as following:
|
||||
|
||||
4.1. Add its public key as `P_2.vote_owner`.
|
||||
|
||||
4.2. Set `timestamp`.
|
||||
|
||||
4.3. Set boolean `vote`.
|
||||
|
||||
4.4. Define `V_2.parent_hash = 0` if there is no previous peer's vote, otherwise hash of previous owner's vote.
|
||||
|
||||
4.5. Set `V_2.received_hash = hash(P_1.votes[0])`.
|
||||
|
||||
4.6. Set `proposal_id` for the `vote`.
|
||||
|
||||
4.7. Calculate `vote_hash` by hash of all previously defined fields in Vote:
|
||||
`V_2.vote_hash = hash(vote_id, owner, proposal_id, timestamp, vote, parent_hash, received_hash)`
|
||||
|
||||
4.8. Sign `vote_hash` with its private key corresponding the public key as `vote_owner` component then adds `V_2.vote_hash`.
|
||||
|
||||
5. Create `P_2` with by adding `V_2` as follows:
|
||||
|
||||
5.1. Assign `P_2.name`, `P_2.proposal_id`, and `P_2.proposal_owner` to be identical to those in `P_1`.
|
||||
|
||||
5.2. Add the `V_2` to the `P_2.Votes` list.
|
||||
|
||||
5.3. Increase the round by one, namely `P_2.round = P_1.round + 1`.
|
||||
|
||||
5.4. Verify that the proposal has not expired by checking that: `current_time in [P_timestamp, P_expiration_timestamp]`.
|
||||
If this does not hold, other peers ignore the message.
|
||||
|
||||
After the peer creates the proposal `P_2` with its vote `V_2`,
|
||||
sends it to the random peer from the network or
|
||||
sends it to the proposal to the specific channel.
|
||||
|
||||
## 3. Determining the result
|
||||
|
||||
Because consensus depends on meeting a quorum threshold,
|
||||
each peer MUST verify the accumulated votes to determine whether the necessary conditions have been satisfied.
|
||||
The voting result is set YES if the majority of the `2n/3` from the distinct peers vote YES.
|
||||
|
||||
To verify, the `findDistinctVoter` method processes the proposal by traversing its `Votes` list to determine the number of unique voters.
|
||||
|
||||
If this method returns true, the peer proceeds with strong validation,
|
||||
which ensures that if any honest peer reaches a decision,
|
||||
no other honest peer can arrive at a conflicting result.
|
||||
|
||||
1. Check each `signature` in the vote as shown in the [Section 2](#2-exchanging-votes-across-the-peers).
|
||||
|
||||
2. Check the `parent_hash` chain if there are multiple votes from the same owner namely `vote_i` and `vote_i+1` respectively,
|
||||
the parent hash of `vote_i+1` should be the hash of `vote_i`
|
||||
|
||||
3. Check the `previous_hash` chain, each received hash of `vote_i+1` should be equal to the hash of `vote_i`.
|
||||
|
||||
4. Check the `timestamp` against the replay attack.
|
||||
In particular, the `timestamp` cannot be the old in the determined threshold.
|
||||
|
||||
5. Check that the liveness criteria defined in the Liveness section are satisfied.
|
||||
|
||||
If a proposal is verified by all the checks,
|
||||
the `countVote` method counts each YES vote from the list of Votes.
|
||||
|
||||
## 4. Properties
|
||||
|
||||
The consensus mechanism satisfies liveness and security properties as follows:
|
||||
|
||||
### Liveness
|
||||
|
||||
Liveness refers to the ability of the protocol to eventually reach a decision when sufficient honest participation is present.
|
||||
In this protocol, if `n > 2` and more than `n/2` of the votes among at least `2n/3` distinct peers are YES,
|
||||
then the consensus result is defined as YES; otherwise, when `n ≤ 2`, unanimous agreement (100% YES votes) is required.
|
||||
|
||||
The peer calculates the result locally as shown in the [Section 3](#3-determining-the-result).
|
||||
From the [hashgraph property](https://hedera.com/learning/hedera-hashgraph/what-is-hashgraph-consensus),
|
||||
if a node could calculate the result of a proposal,
|
||||
it implies that no peer can calculate the opposite of the result.
|
||||
Still, reliability issues can cause some situations where peers cannot receive enough messages,
|
||||
so they cannot calculate the consensus result.
|
||||
|
||||
Rounds are incremented when a peer adds and sends the new proposal.
|
||||
Calculating the required number of rounds, `2n/3` from the distinct peers' votes is achieved in two ways:
|
||||
|
||||
1. `2n/3` rounds in pure P2P networks
|
||||
2. `2` rounds in gossipsub
|
||||
|
||||
Since the message complexity is `O(1)` in the gossipsub channel,
|
||||
in case the network has reliability issues,
|
||||
the second round is used for the peers cannot receive all the messages from the first round.
|
||||
|
||||
If an honest and online peer has received at least one vote but not enough to reach consensus,
|
||||
it MAY continue to propagate its own vote — and any votes it has received — to support message dissemination.
|
||||
This process can continue beyond the expected round count,
|
||||
as long as it remains within the expiration time defined in the proposal.
|
||||
The expiration time acts as a soft upper bound to ensure that consensus is either reached or aborted within a bounded timeframe.
|
||||
|
||||
#### Equality of votes
|
||||
|
||||
An equality of votes occurs when verifying at least `2n/3` distinct voters and
|
||||
applying `liveness_criteria_yes` the number of YES and NO votes is equal.
|
||||
|
||||
Handling ties is an application-level decision. The application MUST define a deterministic tie policy:
|
||||
|
||||
RETRY: re-run the vote with a new proposal_id, optionally adjusting parameters.
|
||||
|
||||
REJECT: abort the proposal and return voting result as NO.
|
||||
|
||||
The chosen policy SHOULD be consistent for all peers via proposal's `payload` to ensure convergence on the same outcome.
|
||||
|
||||
### Silent Node Management
|
||||
|
||||
Silent nodes are the nodes that not participate the voting as YES or NO.
|
||||
There are two possible counting votes for the silent peers.
|
||||
|
||||
1. **Silent peers means YES:**
|
||||
Silent peers counted as YES vote, if the application prefer the strong rejection for NO votes.
|
||||
2. **Silent peers means NO:**
|
||||
Silent peers counted as NO vote, if the application prefer the strong acception for NO votes.
|
||||
|
||||
The proposal is set to default true, which means silent peers' votes are counted as YES namely `liveness_criteria_yes` is set true by default.
|
||||
|
||||
### Security
|
||||
|
||||
This RFC uses cryptographic primitives to prevent the
|
||||
malicious behaviours as follows:
|
||||
|
||||
- Vote forgery attempt: creating unsigned invalid votes
|
||||
- Inconsistent voting: a malicious peer submits conflicting votes (e.g., YES to some peers and NO to others)
|
||||
in different stages of the protocol, violating vote consistency and attempting to undermine consensus.
|
||||
- Integrity breaking attempt: tampering history by changing previous votes.
|
||||
- Replay attack: storing the old votes to maliciously use in fresh voting.
|
||||
|
||||
## 5. Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)
|
||||
|
||||
## 6. References
|
||||
|
||||
- [Hedera Hashgraph](https://hedera.com/learning/hedera-hashgraph/what-is-hashgraph-consensus)
|
||||
- [Gossip about gossip](https://docs.hedera.com/hedera/core-concepts/hashgraph-consensus-algorithms/gossip-about-gossip)
|
||||
- [Simple implementation of hashgraph consensus](https://github.com/conanwu777/hashgraph)
|
||||
@@ -1,907 +0,0 @@
|
||||
# ETH-DCGKA
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Decentralized Key and Session Setup for Secure Messaging over Ethereum |
|
||||
| Slug | 103 |
|
||||
| Status | raw |
|
||||
| Category | informational |
|
||||
| Editor | Ramses Fernandez-Valencia <ramses@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/decentralized-messaging-ethereum.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/decentralized-messaging-ethereum.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/decentralized-messaging-ethereum.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/decentralized-messaging-ethereum.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/decentralized-messaging-ethereum.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/decentralized-messaging-ethereum.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-04-04** — [`517b639`](https://github.com/logos-co/logos-lips/blob/517b63984c875670e437d50359f2f67331104974/vac/raw/decentralized-messaging-ethereum.md) — Update the RFCs: Vac Raw RFC (#143)
|
||||
- **2024-10-03** — [`c655980`](https://github.com/logos-co/logos-lips/blob/c655980494a5943634c372009bbea71c13196a8f/vac/raw/decentralized-messaging-ethereum.md) — Eth secpm splitted (#91)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/raw/decentralized-messaging-ethereum.md) — Fix Files for Linting (#94)
|
||||
- **2024-05-27** — [`7e3a625`](https://github.com/logos-co/logos-lips/blob/7e3a625812bd954696b7facc29a205053d1acc3c/vac/raw/decentralized-messaging-ethereum.md) — ETH-SECPM-DEC (#28)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document introduces a decentralized group messaging protocol
|
||||
using Ethereum adresses as identifiers.
|
||||
It is based in the proposal
|
||||
[DCGKA](https://eprint.iacr.org/2020/1281) by Weidner et al.
|
||||
It includes also approximations to overcome limitations related to using PKI and
|
||||
the multi-device setting.
|
||||
|
||||
## Motivation
|
||||
|
||||
The need for secure communications has become paramount.
|
||||
Traditional centralized messaging protocols are susceptible to various security
|
||||
threats, including unauthorized access, data breaches, and single points of
|
||||
failure.
|
||||
Therefore a decentralized approach to secure communication becomes increasingly
|
||||
relevant, offering a robust solution to address these challenges.
|
||||
|
||||
Secure messaging protocols used should have the following key features:
|
||||
|
||||
1. **Asynchronous Messaging:** Users can send messages even if the recipients
|
||||
are not online at the moment.
|
||||
|
||||
2. **Resilience to Compromise:** If a user's security is compromised,
|
||||
the protocol ensures that previous messages remain secure through forward
|
||||
secrecy (FS). This means that messages sent before the compromise cannot be
|
||||
decrypted by adversaries. Additionally, the protocol maintains post-compromise
|
||||
security (PCS) by regularly updating keys, making it difficult for adversaries
|
||||
to decrypt future communication.
|
||||
|
||||
3. **Dynamic Group Management:** Users can easily add or remove group members
|
||||
at any time, reflecting the flexible nature of communication within the app.
|
||||
|
||||
In this field, there exists a *trilemma*, similar to what one observes in
|
||||
blockchain, involving three key aspects:
|
||||
|
||||
1. security,
|
||||
2. scalability, and
|
||||
3. decentralization.
|
||||
|
||||
For instance, protocols like the [MLS](https://messaginglayersecurity.rocks)
|
||||
perform well in terms of scalability and security.
|
||||
However, they falls short in decentralization.
|
||||
|
||||
Newer studies such as [CoCoa](https://eprint.iacr.org/2022/251)
|
||||
improve features related to security and scalability,
|
||||
but they still rely on servers, which may not be fully trusted though they are necessary.
|
||||
|
||||
On the other hand,
|
||||
older studies like [Causal TreeKEM](https://mattweidner.com/assets/pdf/acs-dissertation.pdf)
|
||||
exhibit decent scalability (logarithmic)
|
||||
but lack forward secrecy and have weak post-compromise security (PCS).
|
||||
|
||||
The creators of [DCGKA](https://eprint.iacr.org/2020/1281) introduce a decentralized,
|
||||
asynchronous secure group messaging protocol that supports dynamic groups.
|
||||
This protocol operates effectively on various underlying networks
|
||||
without strict requirements on message ordering or latency.
|
||||
It can be implemented in peer-to-peer or anonymity networks,
|
||||
accommodating network partitions, high latency links, and
|
||||
disconnected operation seamlessly.
|
||||
Notably, the protocol doesn't rely on servers or
|
||||
a consensus protocol for its functionality.
|
||||
|
||||
This proposal provides end-to-end encryption with forward secrecy and
|
||||
post-compromise security,
|
||||
even when multiple users concurrently modify the group state.
|
||||
|
||||
## Theory
|
||||
|
||||
### Protocol overview
|
||||
|
||||
This protocol makes use of ratchets to provide FS
|
||||
by encrypting each message with a different key.
|
||||
|
||||
In the figure one can see the ratchet for encrypting a sequence of messages.
|
||||
The sender requires an initial update secret `I_1`, which is introduced in a PRG.
|
||||
The PRG will produce two outputs, namely a symmetric key for AEAD encryption, and
|
||||
a seed for the next ratchet state.
|
||||
The associated data needed in the AEAD encryption includes the message index `i`.
|
||||
The ciphertext `c_i` associated to message `m_i`
|
||||
is then broadcasted to all group members.
|
||||
The next step requires deleting `I_1`, `k_i` and any old ratchet state.
|
||||
|
||||
After a period of time the sender may replace the ratchet state with new update secrets
|
||||
`I_2`, `I_3`, and so on.
|
||||
|
||||
To start a post-compromise security update,
|
||||
a user creates a new random value known as a seed secret and
|
||||
shares it with every other group member through a secure two-party channel.
|
||||
Upon receiving the seed secret,
|
||||
each group member uses it to calculate an update secret for both the sender's ratchet
|
||||
and their own.
|
||||
Additionally, the recipient sends an unencrypted acknowledgment to the group
|
||||
confirming the update.
|
||||
Every member who receives the acknowledgment updates
|
||||
not only the ratchet for the original sender but
|
||||
also the ratchet for the sender of the acknowledgment.
|
||||
Consequently, after sharing the seed secret through `n - 1` two-party messages and
|
||||
confirming it with `n - 1` broadcast acknowledgments,
|
||||
every group member has derived an update secret and updated their ratchet accordingly.
|
||||
|
||||
When removing a group member,
|
||||
the user who initiates the removal conducts a post-compromise security update
|
||||
by sending the update secret to all group members except the one being removed.
|
||||
To add a new group member,
|
||||
each existing group member shares the necessary state with the new user,
|
||||
enabling them to derive their future update secrets.
|
||||
|
||||
Since group members may receive messages in various orders,
|
||||
it's important to ensure that each sender's ratchet is updated consistently
|
||||
with the same sequence of update secrets at each group member.
|
||||
|
||||
The network protocol used in this scheme ensures that messages from the same sender
|
||||
are processed in the order they were sent.
|
||||
|
||||
### Components of the protocol
|
||||
|
||||
This protocol relies in 3 components:
|
||||
authenticated causal broadcast (ACB),
|
||||
decentralized group membership (DGM) and
|
||||
2-party secure messaging (2SM).
|
||||
|
||||
#### Authenticated causal broadcast
|
||||
|
||||
A causal order is a partial order relation `<` on messages.
|
||||
Two messages `m_1` and `m_2` are causally ordered, or
|
||||
`m_1` causally precedes `m_2`
|
||||
(denoted by `m_1 < m_2`), if one of the following contiditions hold:
|
||||
|
||||
1. `m_1` and `m_2` were sent by the same group member, and
|
||||
`m_1` was sent before `m_2`.
|
||||
2. `m_2` was sent by a group member U, and `m_1` was received and
|
||||
processed by `U` before sending `m_2`.
|
||||
3. There exists `m_3` such that `m_1 < m_3` and `m_3 < m_2`.
|
||||
|
||||
Causal broadcast requires that before processing `m`, a group member must
|
||||
process all preceding messages `{m' | m' < m}`.
|
||||
|
||||
The causal broadcast module used in this protocol authenticates the sender of
|
||||
each message, as well as its causal ordering metadata, using a digital
|
||||
signature under the sender’s identity key.
|
||||
This prevents a passive adversary from impersonating users or affecting
|
||||
causally ordered delivery.
|
||||
|
||||
#### Decentralized group membership
|
||||
|
||||
This protocol assumes the existence of a decentralized group membership
|
||||
function (denoted as DGM) that takes a set of membership change messages and
|
||||
their causal order relantionships, and returns the current set of group
|
||||
members’ IDs. It needs to be deterministic and depend only on causal order, and
|
||||
not exact order.
|
||||
|
||||
#### 2-party secure messaging (2SM)
|
||||
|
||||
This protocol makes use of bidirectional 2-party secure messaging schemes,
|
||||
which consist of 3 algorithms: `2SM-Init`, `2SM-Send` and `2SM-Receive`.
|
||||
|
||||
##### Function 2SM-Init
|
||||
|
||||
This function takes two IDs as inputs:
|
||||
`ID1` representing the local user and `ID2` representing the other party.
|
||||
It returns an initial protocol state `sigma`.
|
||||
The 2SM protocol relies on a Public Key Infrastructure (PKI) or
|
||||
a key server to map these IDs to their corresponding public keys.
|
||||
In practice, the PKI should incorporate ephemeral prekeys.
|
||||
This allows users to send messages to a new group member,
|
||||
even if that member is currently offline.
|
||||
|
||||
##### Function 2SM-Send
|
||||
|
||||
This function takes a state `sigma` and a plaintext `m` as inputs, and returns
|
||||
a new state `sigma’` and a ciphertext `c`.
|
||||
|
||||
##### Function 2SM-Receive
|
||||
|
||||
This function takes a state `sigma` and a ciphertext `c`, and
|
||||
returns a new state `sigma’` and a plaintext `m`.
|
||||
|
||||
This function takes a state `sigma` and a ciphertext `c`, and returns a new
|
||||
state `sigma’` and a plaintext `m`.
|
||||
|
||||
#### Function 2SM Syntax
|
||||
|
||||
The variable `sigma` denotes the state consisting in the variables below:
|
||||
|
||||
```text
|
||||
sigma.mySks[0] = sk
|
||||
sigma.nextIndex = 1
|
||||
sigma.receivedSk = empty_string
|
||||
sigma.otherPk = pk`<br>
|
||||
sigma.otherPksender = “other”
|
||||
sigma.otherPkIndex = 0
|
||||
|
||||
```
|
||||
|
||||
#### 2SM-Init
|
||||
|
||||
On input a key pair `(sk, pk)`, this functions otuputs a state `sigma`.
|
||||
|
||||
#### 2SM-Send
|
||||
|
||||
This function encrypts the message `m` using `sigma.otherPk`, which represents
|
||||
the other party’s current public key.
|
||||
This key is determined based on the last public key generated for the other
|
||||
party or the last public key received from the other party,
|
||||
whichever is more recent. `sigma.otherPkSender` is set to `me` in the former
|
||||
case and `other` in the latter case.
|
||||
|
||||
Metadata including `otherPkSender` and `otherPkIndex` are included in the
|
||||
message to indicate which of the recipient’s public keys is being utilized.
|
||||
|
||||
Additionally, this function generates a new key pair for the local user,
|
||||
storing the secret key in `sigma.mySks` and sending the public key.
|
||||
Similarly, it generates a new key pair for the other party,
|
||||
sending the secret key (encrypted) and storing the public key in
|
||||
`sigma.otherPk`.
|
||||
|
||||
```text
|
||||
sigma.mySks[sigma.nextIndex], myNewPk) = PKE-Gen()
|
||||
(otherNewSk, otherNewPk) = PKE-Gen()
|
||||
plaintext = (m, otherNewSk, sigma`.nextIndex, myNewPk)
|
||||
msg = (PKE-Enc(sigma.otherPk, plaintext), sigma.otherPkSender, sigma.otherPkIndex)
|
||||
sigma.nextIndex++
|
||||
(sigma.otherPk, sigma.otherPkSender, sigma.otherPkIndex) = (otherNewPk, "me", empty_string)
|
||||
return (sigma`, msg)
|
||||
|
||||
```
|
||||
|
||||
#### 2SM-Receive
|
||||
|
||||
This function utilizes the metadata of the message `c` to determine which
|
||||
secret key to utilize for decryption, assigning it to `sk`.
|
||||
If the secret key corresponds to one generated by ourselves,
|
||||
that secret key along with all keys with lower index are deleted.
|
||||
This deletion is indicated by `sigma.mySks[≤ keyIndex] = empty_string`.
|
||||
Subsequently, the new public and secret keys contained in the message are
|
||||
stored.
|
||||
|
||||
```text
|
||||
(ciphertext, keySender, keyIndex) = c
|
||||
if keySender = "other" then
|
||||
sk = sigma.mySks[keyIndex]
|
||||
sigma.mySks[≤ keyIndex] = empty_string
|
||||
else sk = sigma.receivedSk
|
||||
(m, sigma.receivedSk, sigma.otherPkIndex, sigma.otherPk) = PKE-Dec(sk, ciphertext)
|
||||
sigma.otherPkSender = "other"
|
||||
return (sigma, m)
|
||||
|
||||
```
|
||||
|
||||
### PKE Syntax
|
||||
|
||||
The required PKE that MUST be used is ElGamal with a 2048-bit modulus `p`.
|
||||
|
||||
#### Parameters
|
||||
|
||||
The following parameters must be used:
|
||||
|
||||
```text
|
||||
p = 308920927247127345254346920820166145569
|
||||
g = 2
|
||||
|
||||
```
|
||||
|
||||
#### PKE-KGen
|
||||
|
||||
Each user `u` MUST do the following:
|
||||
|
||||
```text
|
||||
PKE-KGen():
|
||||
a = randint(2, p-2)
|
||||
pk = (p, g, g^a)
|
||||
sk = a
|
||||
return (pk, sk)
|
||||
|
||||
```
|
||||
|
||||
#### PKE-Enc
|
||||
|
||||
A user `v` encrypting a message `m` for `u` MUST follow these steps:
|
||||
|
||||
```text
|
||||
PKE-Enc(pk):
|
||||
k = randint(2, p-2)
|
||||
eta = g^k % p
|
||||
delta = m * (g^a)^k % p
|
||||
return ((eta, delta))
|
||||
|
||||
```
|
||||
|
||||
#### PKE-Dec
|
||||
|
||||
The user `u` recovers a message `m` from a ciphertext `c`
|
||||
by performing the following operations:
|
||||
|
||||
```text
|
||||
PKE-Dec(sk):
|
||||
mu = eta^(p-1-sk) % p
|
||||
return ((mu * delta) % p)
|
||||
|
||||
```
|
||||
|
||||
### DCGKA Syntax
|
||||
|
||||
#### Auxiliary functions
|
||||
|
||||
There exist 6 functions that are auxiliary for the rest of components of the
|
||||
protocol, namely:
|
||||
|
||||
#### init
|
||||
|
||||
This function takes an `ID` as input and returns its associated initial state,
|
||||
denoted by `gamma`:
|
||||
|
||||
```text
|
||||
gamma.myId = ID
|
||||
gamma.mySeq = 0
|
||||
gamma.history = empty
|
||||
gamma.nextSeed = empty_string
|
||||
gamma.2sm[·] = empty_string
|
||||
gamma.memberSecret[·, ·, ·] = empty_string
|
||||
gamma.ratchet[·] = empty_string
|
||||
return (gamma)
|
||||
|
||||
```
|
||||
|
||||
#### encrypt-to
|
||||
|
||||
Upon reception of the recipient’s `ID` and a plaintext, it encrypts a direct
|
||||
message for another group member.
|
||||
Should it be the first message for a particular `ID`,
|
||||
then the `2SM` protocol state is initialized and stored in
|
||||
`gamma.2sm[recipient.ID]`.
|
||||
One then uses `2SM_Send` to encrypt the message and store the updated protocol
|
||||
in `gamma`.
|
||||
|
||||
```text
|
||||
if gamma.2sm[recipient_ID] = empty_string then
|
||||
gamma.2sm[recipient_ID] = 2SM_Init(gamma.myID, recipient_ID)
|
||||
(gamma.2sm[recipient_ID], ciphertext) = 2SM_Send(gamma.2sm[recipient_ID], plaintext)
|
||||
return (gamma, ciphertext)
|
||||
|
||||
```
|
||||
|
||||
#### decrypt-from
|
||||
|
||||
After receiving the sender’s `ID` and a ciphertext, it behaves as the reverse
|
||||
function of `encrypt-to` and has a similar initialization:
|
||||
|
||||
```text
|
||||
if gamma.2sm[sender_ID] = empty_string then
|
||||
gamma.2sm[sender_ID] = 2SM_Init(gamma.myID, sender_ID)
|
||||
(gamma.2sm[sender_ID], plaintext) = 2SM_Receive(gamma.2sm[sender_ID], ciphertext)
|
||||
return (gamma, plaintext)
|
||||
|
||||
```
|
||||
|
||||
#### update-ratchet
|
||||
|
||||
This function generates the next update secret `I_update` for the group member
|
||||
`ID`.
|
||||
The ratchet state is stored in `gamma.ratchet[ID]`.
|
||||
It is required to use a HMAC-based key derivation function HKDF to combine the
|
||||
ratchet state with an input, returning an update secret and a new ratchet
|
||||
state.
|
||||
|
||||
```text
|
||||
(updateSecret, gamma.ratchet[ID]) = HKDF(gamma.ratchet[ID], input)
|
||||
return (gamma, updateSecret)
|
||||
|
||||
```
|
||||
|
||||
#### member-view
|
||||
|
||||
This function calculates the set of group members
|
||||
based on the most recent control message sent by the specified user `ID`.
|
||||
It filters the group membership operations
|
||||
to include only those observed by the specified `ID`, and
|
||||
then invokes the DGM function to generate the group membership.
|
||||
|
||||
```text
|
||||
ops = {m in gamma.history st. m was sent or acknowledged by ID}
|
||||
return DGM(ops)
|
||||
|
||||
```
|
||||
|
||||
#### generate-seed
|
||||
|
||||
This functions generates a random bit string and
|
||||
sends it encrypted to each member of the group using the `2SM` mechanism.
|
||||
It returns the updated protocol state and
|
||||
the set of direct messages (denoted as `dmsgs`) to send.
|
||||
|
||||
```text
|
||||
gamma.nextSeed = random.randbytes()
|
||||
dmsgs = empty
|
||||
for each ID in recipients:
|
||||
(gamma, msg) = encrypt-to(gamma, ID, gamma.nextSeed)
|
||||
dmsgs = dmsgs + (ID, msg)
|
||||
return (gamma, dmsgs)
|
||||
|
||||
```
|
||||
|
||||
### Creation of a group
|
||||
|
||||
A group is generated in a 3 steps procedure:
|
||||
|
||||
1. A user calls the `create` function and broadcasts a control message of type
|
||||
*create*.
|
||||
2. Each receiver of the message processes the message and broadcasts an *ack*
|
||||
control message.
|
||||
3. Each member processes the *ack* message received.
|
||||
|
||||
#### create
|
||||
|
||||
This function generates a *create* control message and calls `generate-seed` to
|
||||
define the set of direct messages that need to be sent.
|
||||
Then it calls `process-create` to process the control message for this user.
|
||||
The function `process-create` returns a tuple including an updated state gamma
|
||||
and an update secret `I`.
|
||||
|
||||
```text
|
||||
control = (“create”, gamma.mySeq, IDs)
|
||||
(gamma, dmsgs) = generate-seed(gamma, IDs)
|
||||
(gamma, _, _, I, _) = process-create(gamma, gamma.myId, gamma.mySeq, IDs, empty_string)
|
||||
return (gamma, control, dmsgs, I)
|
||||
|
||||
```
|
||||
|
||||
#### process-seed
|
||||
|
||||
This function initially employs `member-view` to identify the users who were
|
||||
part of the group when the control message was dispatched.
|
||||
Then, it attempts to acquire the seed secret through the following steps:
|
||||
|
||||
1. If the control message was dispatched by the local user, it uses the most
|
||||
recent invocation of `generate-seed` stored the seed secret in
|
||||
`gamma.nextSeed`.
|
||||
2. If the `control` message was dispatched by another user, and the local user
|
||||
is among its recipients, the function utilizes `decrypt-from` to decrypt the
|
||||
direct message that includes the seed secret.
|
||||
3. Otherwise, it returns an `ack` message without deriving an update secret.
|
||||
|
||||
Afterwards, `process-seed` generates separate member secrets for each group
|
||||
member from the seed secret by combining the seed secret and
|
||||
each user ID using HKDF.
|
||||
The secret for the sender of the message is stored in `senderSecret`, while
|
||||
those for the other group members are stored in `gamma.memberSecret`.
|
||||
The sender's member secret is immediately utilized to update their KDF ratchet
|
||||
and compute their update secret `I_sender` using `update-ratchet`.
|
||||
If the local user is the sender of the control message, the process is
|
||||
completed, and the update secret is returned.
|
||||
However, if the seed secret is received from another user, an `ack` control
|
||||
message is constructed for broadcast, including the sender ID and sequence
|
||||
number of the message being acknowledged.
|
||||
|
||||
The final step computes an update secret `I_me` for the local user invoking the
|
||||
`process-ack` function.
|
||||
|
||||
```text
|
||||
recipients = member-view(gamma, sender) - {sender}
|
||||
if sender = gamma.myId then seed = gamma.nextSeed; gamma.nextSeed =
|
||||
empty_string
|
||||
else if gamma.myId in recipients then (gamma, seed) = decrypt-from(gamma,
|
||||
sender, dmsg)
|
||||
else
|
||||
return (gamma, (ack, ++gamma.mySeq, (sender, seq)), empty_string ,
|
||||
empty_string , empty_string)
|
||||
|
||||
for ID in recipients do gamma.memberSecret[sender, seq, ID] = HKDF(seed, ID)
|
||||
senderSecret = HKDF(seed, sender)
|
||||
(gamma, I_sender) = update-ratchet(gamma, sender, senderSecret)
|
||||
if sender = gamma.myId then return (gamma, empty_string , empty_string ,
|
||||
I_sender, empty_string)
|
||||
control = (ack, ++gamma.mySeq, (sender, seq))
|
||||
members = member-view(gamma, gamma.myId)
|
||||
forward = empty
|
||||
for ID in {members - (recipients + {sender})}
|
||||
s = gamma.memberSecret[sender, seq, gamma.myId]
|
||||
(gamma, msg) = encrypt-to(gamma, ID, s)
|
||||
forward = forward + {(ID, msg)}
|
||||
(gamma, _, _, I_me, _) = process-ack(gamma, gamma.myId, gamma.mySeq,
|
||||
(sender, seq), empty_string)
|
||||
return (gamma, control, forward, I_sender, I_me)
|
||||
|
||||
```
|
||||
|
||||
#### process-create
|
||||
|
||||
This function is called by the sender and each of the receivers of the `create`
|
||||
control message.
|
||||
First, it records the information from the create message in the
|
||||
`gamma.history+ {op}`, which is used to track group membership changes. Then,
|
||||
it proceeds to call `process-seed`.
|
||||
|
||||
```text
|
||||
op = (”create”, sender, seq, IDs)
|
||||
gamma.history = gamma.history + {op}
|
||||
return (process-seed(gamma, sender, seq, dmsg))
|
||||
|
||||
```
|
||||
|
||||
#### process-ack
|
||||
|
||||
This function is called by those group members once they receive an ack
|
||||
message.
|
||||
In `process-ack`, `ackID` and `ackSeq` are the sender and sequence number of
|
||||
the acknowledged message.
|
||||
Firstly, if the acknowledged message is a group membership operation, it
|
||||
records the acknowledgement in `gamma.history`.
|
||||
|
||||
Following this, the function retrieves the relevant member secret from
|
||||
`gamma.memberSecret`, which was previously obtained from the seed secret
|
||||
contained in the acknowledged message.
|
||||
|
||||
Finally, it updates the ratchet for the sender of the `ack` and returns the
|
||||
resulting update secret.
|
||||
|
||||
```text
|
||||
if (ackID, ackSeq) was a create / add / remove then
|
||||
op = ("ack", sender, seq, ackID, ackSeq)
|
||||
gamma.history = gamma.history + {op}`
|
||||
s = gamma.memberSecret[ackID, ackSeq, sender]
|
||||
gamma.memberSecret[ackID, ackSeq, sender] = empty_string
|
||||
if (s = empty_string) & (dmsg = empty_string) then return (gamma, empty_string,
|
||||
empty_string, empty_string, empty_string)
|
||||
if (s = empty_string) then (gamma, s) = decrypt-from(gamma, sender, dmsg)
|
||||
(gamma, I) = update-ratchet(gamma, sender, s)
|
||||
return (gamma, empty_string, empty_string, I, empty_string)
|
||||
|
||||
```
|
||||
|
||||
The HKDF function MUST follow RFC 5869 using the hash function SHA256.
|
||||
|
||||
### Post-compromise security updates and group member removal
|
||||
|
||||
The functions `update` and `remove` share similarities with `create`:
|
||||
they both call the function `generate-seed` to encrypt a new seed secret for
|
||||
each group member.
|
||||
The distinction lies in the determination of the group members using `member
|
||||
view`.
|
||||
In the case of `remove`, the user being removed is excluded from the recipients
|
||||
of the seed secret.
|
||||
Additionally, the control message they construct is designated with type
|
||||
`update` or `remove` respectively.
|
||||
|
||||
Likewise, `process-update` and `process-remove` are akin to `process-create`.
|
||||
The function `process-update` skips the update of `gamma.history`,
|
||||
whereas `process-remove` includes a removal operation in the history.
|
||||
|
||||
#### update
|
||||
|
||||
```text
|
||||
control = ("update", ++gamma.mySeq, empty_string)
|
||||
recipients = member-view(gamma, gamma.myId) - {gamma.myId}
|
||||
(gamma, dmsgs) = generate-seed(gamma, recipients)
|
||||
(gamma, _, _, I , _) = process-update(gamma, gamma.myId, gamma.mySeq,
|
||||
empty_string, empty_string)
|
||||
return (gamma, control, dmsgs, I)
|
||||
|
||||
```
|
||||
|
||||
#### remove
|
||||
|
||||
```text
|
||||
control = ("remove", ++gamma.mySeq, empty)
|
||||
recipients = member-view(gamma, gamma.myId) - {ID, gamma.myId}
|
||||
(gamma, dmsgs) = generate-seed(gamma, recipients)
|
||||
(gamma, _, _, I , _) = process-update(gamma, gamma.myId, gamma.mySeq, ID,
|
||||
empty_string)
|
||||
return (gamma, control, dmsgs, I)
|
||||
|
||||
```
|
||||
|
||||
#### process-update
|
||||
|
||||
`return process-seed(gamma, sender, seq, dmsg)`
|
||||
|
||||
#### process-remove
|
||||
|
||||
```text
|
||||
op = ("remove", sender, seq, removed)
|
||||
gamma.history = gamma.history + {op}
|
||||
return process-seed(gamma, sender, seq, dmsg)
|
||||
|
||||
```
|
||||
|
||||
### Group member addition
|
||||
|
||||
#### add
|
||||
|
||||
When adding a new group member, an existing member initiates the process by
|
||||
invoking the `add` function and providing the ID of the user to be added.
|
||||
This function prepares a control message marked as `add` for broadcast to the
|
||||
group. Simultaneously, it creates a welcome message intended for the new member
|
||||
as a direct message.
|
||||
This `welcome` message includes the current state of the sender's KDF ratchet,
|
||||
encrypted using `2SM`, along with the history of group membership operations
|
||||
conducted so far.
|
||||
|
||||
```text
|
||||
control = ("add", ++gamma.mySeq, ID)
|
||||
(gamma, c) = encrypt-to(gamma, ID, gamma.ratchet[gamma.myId])
|
||||
op = ("add", gamma.myId, gamma.mySeq, ID)
|
||||
welcome = (gamma.history + {op}, c)
|
||||
(gamma, _, _, I, _) = process-add(gamma, gamma.myId, gamma.mySeq, ID, empty_string)
|
||||
return (gamma, control, (ID, welcome), I)
|
||||
|
||||
```
|
||||
|
||||
#### process-add
|
||||
|
||||
This function is invoked by both the sender and each recipient of an `add`
|
||||
message, which includes the new group member. If the local user is the newly
|
||||
added member, the function proceeds to call `process-welcome` and then exits.
|
||||
Otherwise, it extends `gamma.history` with the `add` operation.
|
||||
|
||||
Line 5 determines whether the local user was already a group member at the time
|
||||
the `add` message was sent; this condition is typically true but may be false
|
||||
if multiple users were added concurrently.
|
||||
|
||||
On lines 6 to 8, the ratchet for the sender of the *add* message is updated
|
||||
twice. In both calls to `update-ratchet`, a constant string is used as the
|
||||
ratchet input instead of a random seed secret.
|
||||
|
||||
The value returned by the first ratchet update is stored in
|
||||
`gamma.memberSecret` as the added user’s initial member secret. The result of
|
||||
the second ratchet update becomes `I_sender`, the update secret for the sender
|
||||
of the `add` message. On line 10, if the local user is the sender, the update
|
||||
secret is returned.
|
||||
|
||||
If the local user is not the sender, an acknowledgment for the `add` message is
|
||||
required.
|
||||
Therefore, on line 11, a control message of type `add-ack` is constructed for
|
||||
broadcast.
|
||||
Subsequently, in line 12 the current ratchet state is encrypted using `2SM` to
|
||||
generate a direct message intended for the added user, allowing them to decrypt
|
||||
subsequent messages sent by the sender.
|
||||
Finally, in lines 13 to 15, `process-add-ack` is called to calculate the local
|
||||
user’s update secret (`I_me`), which is then returned along with `I_sender`.
|
||||
|
||||
```text
|
||||
if added = gamma.myId then return process-welcome(gamma, sender, seq, dmsg)
|
||||
op = ("add", sender, seq, added)
|
||||
gamma.history = gamma.history + {op}
|
||||
if gamma.myId in member-view(gamma, sender) then
|
||||
(gamma, s) = update-ratchet(gamma, sender, "welcome")
|
||||
gamma.memberSecret[sender, seq, added] = s
|
||||
(gamma, I_sender) = update-ratchet(gamma, sender, "add")
|
||||
else I_sender = empty_string
|
||||
if sender = gamma.myId then return (gamma, empty_string, empty_string,
|
||||
I_sender, empty_string)
|
||||
control = ("add-ack", ++gamma.mySeq, (sender, seq))
|
||||
(gamma, c) = encrypt-to(gamma, added, ratchet[gamma.myId])
|
||||
(gamma, _, _, I_me, _) = process-add-ack(gamma, gamma.myId, gamma.mySeq,
|
||||
(sender, seq), empty_string)
|
||||
return (gamma, control, {(added, c)}, I_sender, I_me)
|
||||
|
||||
```
|
||||
|
||||
#### process-add-ack
|
||||
|
||||
This function is invoked by both the sender and each recipient of an `add-ack`
|
||||
message, including the new group member. Upon lines 1–2, the acknowledgment is
|
||||
added to `gamma.history`, mirroring the process in `process-ack`.
|
||||
If the current user is the new group member, the `add-ack` message includes the
|
||||
direct message constructed in `process-add`; this direct message contains the
|
||||
encrypted ratchet state of the sender of the `add-ack`, then it is decrypted on
|
||||
lines 3–5.
|
||||
|
||||
Upon line 6, a check is performed to check if the local user was already a
|
||||
group member at the time the `add-ack` was sent. If affirmative, a new update
|
||||
secret `I` for the sender of the `add-ack` is computed on line 7 by invoking
|
||||
`update-ratchet` with the constant string `add`.
|
||||
|
||||
In the scenario involving the new member, the ratchet state was recently
|
||||
initialized on line 5. This ratchet update facilitates all group members,
|
||||
including the new addition, to derive each member’s update by obtaining any
|
||||
update secret from before their inclusion.
|
||||
|
||||
```text
|
||||
op = ("ack", sender, seq, ackID, ackSeq)
|
||||
gamma$.history = gamma.history + {op}
|
||||
if dmsg != empty_string then
|
||||
(gamma, s) = decrypt-from(gamma, sender, dmsg)
|
||||
gamma.ratchet[sender] = s
|
||||
if gamma.myId in member-view(gamma, sender) then
|
||||
(gamma, I) = update-ratchet(gamma, sender, "add")
|
||||
return (gamma, empty_string, empty_string, I, empty_string)
|
||||
else return (gamma, empty_string, empty_string, empty_string, empty_string)
|
||||
|
||||
```
|
||||
|
||||
#### process-welcome
|
||||
|
||||
This function serves as the second step called by a newly added group member.
|
||||
In this context, `adderHistory` represents the adding user’s copy of
|
||||
`gamma.history` sent in their welcome message, which is utilized to initialize
|
||||
the added user’s history.
|
||||
Here, `c` denotes the ciphertext of the adding user’s ratchet state, which is
|
||||
decrypted on line 2 using `decrypt-from`.
|
||||
|
||||
Once `gamma.ratchet[sender]` is initialized, `update-ratchet` is invoked twice
|
||||
on lines 3 to 5 with the constant strings `welcome` and `add` respectively.
|
||||
These operations mirror the ratchet operations performed by every other group
|
||||
member in `process-add`.
|
||||
The outcome of the first `update-ratchet` call becomes the first member secret
|
||||
for the added user,
|
||||
while the second call returns `I_sender`, the update secret for the sender of
|
||||
the add operation.
|
||||
|
||||
Subsequently, the new group member constructs an *ack* control message to
|
||||
broadcast on line 6 and calls `process-ack` to compute their initial update
|
||||
secret I_me. The function `process-ack` reads from `gamma.memberSecret` and
|
||||
passes it to `update-ratchet`. The previous ratchet state for the new member is
|
||||
the empty string `empty`, as established by `init`, thereby initializing the
|
||||
new member’s ratchet.
|
||||
Upon receiving the new member’s `ack`, every other group member initializes
|
||||
their copy of the new member’s ratchet in a similar manner.
|
||||
|
||||
By the conclusion of `process-welcome`, the new group member has acquired
|
||||
update secrets for themselves and the user who added them.
|
||||
The ratchets for other group members are initialized by `process-add-ack`.
|
||||
|
||||
```text
|
||||
gamma.history = adderHistory
|
||||
(gamma, gamma.ratchet[sender]) = decrypt-from(gamma, sender, c)
|
||||
(gamma, s) = update-ratchet(gamma, sender, "welcome")
|
||||
gamma.memberSecret[sender, seq, gamma.myId] = s
|
||||
(gamma, I_sender) = update-ratchet(gamma, sender, "add")
|
||||
control = ("ack", ++gamma.mySeq, (sender, seq))
|
||||
(gamma, _, _, I_me, _) = process-ack(gamma, gamma.myId, gamma.mySeq, (sender,
|
||||
seq), empty_string)
|
||||
return (gamma, control, empty_string , I_sender, I_me)
|
||||
|
||||
```
|
||||
|
||||
## Privacy Considerations
|
||||
|
||||
### Dependency on PKI
|
||||
|
||||
The [DCGKA](https://eprint.iacr.org/2020/1281) proposal presents some
|
||||
limitations highlighted by the authors.
|
||||
Among these limitations one finds the requirement of a PKI (or a key server)
|
||||
mapping IDs to public keys.
|
||||
|
||||
One method to overcome this limitation is adapting the protocol SIWE (Sign in
|
||||
with Ethereum) so a user `u_1` who wants to start a communication with a user
|
||||
`u_2` can interact with latter’s wallet to request a public key using an
|
||||
Ethereum address as `ID`.
|
||||
|
||||
#### SIWE
|
||||
|
||||
The [SIWE](https://docs.login.xyz/general-information/siwe-overview) (Sign In
|
||||
With Ethereum) proposal was a suggested standard for leveraging Ethereum to
|
||||
authenticate and authorize users on web3 applications.
|
||||
Its goal is to establish a standardized method for users to sign in to web3
|
||||
applications using their Ethereum address and private key,
|
||||
mirroring the process by which users currently sign in to web2 applications
|
||||
using their email and password.
|
||||
Below follows the required steps:
|
||||
|
||||
1. A server generates a unique Nonce for each user intending to sign in.
|
||||
2. A user initiates a request to connect to a website using their wallet.
|
||||
3. The user is presented with a distinctive message that includes the Nonce and
|
||||
details about the website.
|
||||
4. The user authenticates their identity by signing in with their wallet.
|
||||
5. Upon successful authentication, the user's identity is confirmed or
|
||||
approved.
|
||||
6. The website grants access to data specific to the authenticated user.
|
||||
|
||||
#### Our approach
|
||||
|
||||
The idea in the [DCGKA](https://eprint.iacr.org/2020/1281) setting closely
|
||||
resembles the procedure outlined in SIWE. Here:
|
||||
|
||||
1. The server corresponds to user D1,who initiates a request (instead of
|
||||
generating a nonce) to obtain the public key of user D2.
|
||||
2. Upon receiving the request, the wallet of D2 send the request to the user,
|
||||
3. User D2 receives the request from the wallet, and decides whether accepts or
|
||||
rejects.
|
||||
4. The wallet and responds with a message containing the requested public key
|
||||
in case of acceptance by D2.
|
||||
|
||||
This message may be signed, allowing D1 to verify that the owner of the
|
||||
received public key is indeed D2.
|
||||
|
||||
### Multi-device setting
|
||||
|
||||
One may see the set of devices as a group and create a group key for internal
|
||||
communications.
|
||||
One may use treeKEM for instance, since it provides interesting properties like
|
||||
forward secrecy and post-compromise security.
|
||||
All devices share the same `ID`, which is held by one of them, and from other
|
||||
user’s point of view, they would look as a single user.
|
||||
|
||||
Using servers, like in the paper
|
||||
[Multi-Device for Signal](https://eprint.iacr.org/2019/1363), should be
|
||||
avoided; but this would imply using a particular device as receiver and
|
||||
broadcaster within the group.
|
||||
There is an obvious drawback which is having a single device working as a
|
||||
“server”. Should this device be attacked or without connection, there should be
|
||||
a mechanism for its revocation and replacement.
|
||||
|
||||
Another approach for communications between devices could be using the keypair
|
||||
of each device. This could open the door to use UPKE, since keypairs should be
|
||||
regenerated frequently.
|
||||
|
||||
Each time a device sends a message, either an internal message or an external
|
||||
message, it needs to replicate and broadcast it to all devices in the group.
|
||||
|
||||
The mechanism for the substitution of misbehaving leader devices follows:
|
||||
|
||||
1. Each device within a group knows the details of other leader devices. This
|
||||
information may come from metadata in received messages, and is replicated by
|
||||
the leader device.
|
||||
2. To replace a leader, the user should select any other device within its
|
||||
group and use it to send a signed message to all other users.
|
||||
3. To get the ability to sign messages, this new leader should request the
|
||||
keypair associated to the ID to the wallet.
|
||||
4. Once the leader has been changed, it revocates access from DCGKA to the
|
||||
former leader using the DCGKA protocol.
|
||||
5. The new leader starts a key update in DCGKA.
|
||||
|
||||
Not all devices in a group should be able to send messages to other users. Only
|
||||
the leader device should be in charge of sending and receiving messages.
|
||||
To prevent other devices from sending messages outside their group, a
|
||||
requirement should be signing each message. The keys associated to the `ID`
|
||||
should only be in control of the leader device.
|
||||
|
||||
The leader device is in charge of setting the keys involved in the DCGKA. This
|
||||
information must be replicated within the group to make sure it is updated.
|
||||
|
||||
To detect missing messages or potential misbehavior, messages must include a
|
||||
counter.
|
||||
|
||||
### Using UPKE
|
||||
|
||||
Managing the group of devices of a user can be done either using a group key
|
||||
protocol such as treeKEM or using the keypair of each device.
|
||||
Setting a common key for a group of devices under the control of the same actor
|
||||
might be excessive, furthermore it may imply some of the problems one can find
|
||||
in the usual setting of a group of different users;
|
||||
for example: one of the devices may not participate in the required updating
|
||||
processes, representing a threat for the group.
|
||||
|
||||
The other approach to managing the group of devices is using each device’s
|
||||
keypair, but it would require each device updating these materia frequently,
|
||||
something that may not happens.
|
||||
|
||||
[UPKE](https://eprint.iacr.org/2022/068) is a form of asymetric cryptography
|
||||
where any user can update any other user’s key pair by running an update
|
||||
algorithm with (high-entropy) private coins. Any sender can initiate a *key
|
||||
update* by sending a special update ciphertext.
|
||||
This ciphertext updates the receiver’s public key and also, once processed by
|
||||
the receiver, will update their secret key.
|
||||
|
||||
To the best of my knowledge, there exists several efficient constructions both
|
||||
[UPKE from ElGamal](https://eprint.iacr.org/2019/1189) (based in the DH
|
||||
assumption) and [UPKE from Lattices](https://eprint.iacr.org/2023/1400)
|
||||
(based in lattices).
|
||||
None of them have been implemented in a secure messaging protocol, and this
|
||||
opens the door to some novel research.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via
|
||||
[CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [DCGKA](https://eprint.iacr.org/2020/1281)
|
||||
- [MLS](https://messaginglayersecurity.rocks)
|
||||
- [CoCoa](https://eprint.iacr.org/2022/251)
|
||||
- [Causal TreeKEM](https://mattweidner.com/assets/pdf/acs-dissertation.pdf)
|
||||
- [SIWE](https://docs.login.xyz/general-information/siwe-overview)
|
||||
- [Multi-device for Signal](https://eprint.iacr.org/2019/1363)
|
||||
- [UPKE](https://eprint.iacr.org/2022/068)
|
||||
- [UPKE from ElGamal](https://eprint.iacr.org/2019/1189)
|
||||
- [UPKE from Lattices](https://eprint.iacr.org/2023/1400)
|
||||
@@ -1,590 +0,0 @@
|
||||
# DECENTRALIZED-MLS-OFFCHAIN-CONSENSUS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Secure channel setup using decentralized MLS |
|
||||
| Slug | 104 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Ugur Sen [ugur@status.im](mailto:ugur@status.im) |
|
||||
| Contributors | seemenkina [ekaterina@status.im](mailto:ekaterina@status.im) |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-04-02** — [`155c310`](https://github.com/logos-co/logos-lips/blob/155c310d7bfad6ea3cd9f68e45c68dad731ff629/docs/ift-ts/raw/decentralized-mls-offchain-consensus.md) — de-MLS RFC name change (#303)
|
||||
- **2026-03-29** — [`ff05dbd`](https://github.com/logos-co/logos-lips/blob/ff05dbd51176443b3e548e9575c3610685c32d63/docs/ift-ts/raw/eth-mls-offchain.md) — ETH-MLS-OFFCHAIN RFC multi-steward follow up (#298)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/eth-mls-offchain.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/eth-mls-offchain.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/eth-mls-offchain.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/eth-mls-offchain.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/eth-mls-offchain.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-11-26** — [`e39d288`](https://github.com/logos-co/logos-lips/blob/e39d2884fee1b8a0b1b20a430d7004945ce919f6/vac/raw/eth-mls-offchain.md) — VAC/RAW/ ETH-MLS-OFFCHAIN RFC multi-steward support (#193)
|
||||
- **2025-08-21** — [`3b968cc`](https://github.com/logos-co/logos-lips/blob/3b968ccce3848da67cddb0295a9cdcb37d63d18c/vac/raw/eth-mls-offchain.md) — VAC/RAW/ ETH-MLS-OFFCHAIN RFC (#166)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
The following document specifies scalable
|
||||
and decentralized secure group messaging application by
|
||||
integrating Message Layer Security (MLS) backend.
|
||||
Decentralization refers each user is a node in P2P network and
|
||||
each user has voice for any changes in group.
|
||||
This is achieved by integrating a consensus mechanism.
|
||||
Lastly, this RFC can also be referred to as de-MLS,
|
||||
decentralized MLS, to emphasize its deviation
|
||||
from the centralized trust assumptions of traditional MLS deployments.
|
||||
|
||||
## Motivation
|
||||
|
||||
Group messaging is a fundamental part of digital communication,
|
||||
yet most existing systems depend on centralized servers,
|
||||
which introduce risks around privacy, censorship, and unilateral control.
|
||||
In restrictive settings, servers can be blocked or surveilled;
|
||||
in more open environments, users still face opaque moderation policies,
|
||||
data collection, and exclusion from decision-making processes.
|
||||
To address this, a decentralized, scalable peer-to-peer
|
||||
group messaging system is proposed, where each participant runs a node, contributes
|
||||
to message propagation, and takes part in governance autonomously.
|
||||
Group membership changes are decided collectively through a lightweight
|
||||
partially synchronous, fault-tolerant consensus protocol without a centralized identity.
|
||||
This design enables truly democratic group communication and is well-suited
|
||||
for use cases like activist collectives, research collaborations, DAOs, support groups,
|
||||
and decentralized social platforms.
|
||||
|
||||
## Format Specification
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”,
|
||||
“SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document
|
||||
are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Assumptions
|
||||
|
||||
- The nodes in the P2P network can discover other nodes or will connect to other nodes when subscribing to same topic in a gossipsub.
|
||||
- The presence of non-reliable (silent) nodes MAY be assumed.
|
||||
- A lightweight, scalable consensus mechanism with deterministic finality within a specific time MUST be employed.
|
||||
- The network MUST enforce a rate-limiting mechanism for all entities in order to mitigate spam.
|
||||
- $\Delta$ (Delta) is a protocol parameter denoting a bounded time interval (in seconds)
|
||||
that defines the maximum synchronization window of the system.
|
||||
- At least $2n/3$ of the members MUST become synchronized within $\Delta$ time, where $n$ is the group size.
|
||||
|
||||
## Roles
|
||||
|
||||
The three roles used in de-MLS is as follows:
|
||||
|
||||
- `node`: Nodes are participants in the network that are not currently members
|
||||
of any secure group messaging session but remain available as potential candidates for group membership.
|
||||
- `member`: Members are special nodes in the secure group messaging who
|
||||
obtains current group key of secure group messaging.
|
||||
Each node is assigned a unique identity represented as a 20-byte value named `member id`.
|
||||
- `steward`: Stewards are special and transparent members in the secure group
|
||||
messaging who organize the changes by releasing commit messages upon the voted proposals.
|
||||
There are two special subsets of steward as epoch and backup steward,
|
||||
which are defined in the section de-MLS Objects.
|
||||
|
||||
## MLS Background
|
||||
|
||||
The de-MLS consists of MLS backend, so the MLS services and other MLS components
|
||||
are taken from the original [MLS specification](https://datatracker.ietf.org/doc/rfc9420/), with or without modifications.
|
||||
|
||||
### MLS Services
|
||||
|
||||
MLS is operated in two services authentication service (AS) and delivery service (DS).
|
||||
Authentication service enables group members to authenticate the credentials presented by other group members.
|
||||
The delivery service routes MLS messages among the nodes or
|
||||
members in the protocol in the correct order and
|
||||
manage the `keyPackage` of the users where the `keyPackage` is the objects
|
||||
that provide some public information about a user as specified in [MLS specification](https://datatracker.ietf.org/doc/rfc9420/).
|
||||
|
||||
### MLS Objects
|
||||
|
||||
Following section presents the MLS objects and components that used in this RFC:
|
||||
|
||||
`Epoch`: Time intervals that changes the state that is defined by members,
|
||||
section 3.4 in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/).
|
||||
An epoch is represented as a monotonically increasing integer.
|
||||
It does not correspond to a fixed wall-clock time interval.
|
||||
Instead, the epoch is incremented upon each valid `commit message` that results in a state transition.
|
||||
|
||||
`MLS proposal message:` Members MUST receive the proposal message prior to the
|
||||
corresponding commit message that initiates a new epoch with key changes,
|
||||
in order to ensure the intended security properties, section 12.1 in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/).
|
||||
Here, the add and remove proposals are used.
|
||||
|
||||
`Application message`: This message type used in arbitrary encrypted communication between group members.
|
||||
This is restricted by [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/) as if there is pending proposal,
|
||||
the application message should be cut.
|
||||
Note that: Since the MLS is based on servers, this delay between proposal and commit messages are very small.
|
||||
|
||||
`Commit message:` After members receive the proposals regarding group changes,
|
||||
the committer, who may be any member of the group, as specified in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/),
|
||||
generates the necessary key material for the next epoch, including the appropriate welcome messages
|
||||
for new joiners and new entropy for removed members. In this RFC, the committers only MUST be stewards.
|
||||
|
||||
### de-MLS Objects
|
||||
|
||||
This section presents the de-MLS objects:
|
||||
|
||||
`Voting proposal`: Similar to MLS proposals, but processed only if approved through a voting process.
|
||||
They function as application messages in the MLS group,
|
||||
allowing the steward to collect them without halting the protocol.
|
||||
There are three types of `voting proposal` according to the type of consensus as in shown [Consensus Types section](#consensus-types),
|
||||
these are, `commit proposal`, `steward election proposal` and `emergency criteria proposal`.
|
||||
|
||||
`Epoch steward`: The steward assigned to commit in `epoch E` according to the steward list.
|
||||
Holds the primary responsibility for creating commit in that epoch.
|
||||
|
||||
`Backup steward`: The steward next in line after the `epoch steward` on the `steward list` in `epoch E`.
|
||||
Only becomes active if the `epoch steward` is malicious or fails,
|
||||
in which case it completes the commitment phase.
|
||||
If unused in `epoch E`, it automatically becomes the `epoch steward` in `epoch E+1`.
|
||||
|
||||
`Steward list`: It is an ordered list that contains the `member id`s of authorized stewards.
|
||||
Each steward in the list becomes main responsible for creating the commit message when its turn arrives,
|
||||
according to this order for each epoch.
|
||||
For example, suppose there are two stewards in the list `steward A` first and `steward B` last in the list.
|
||||
`steward A` is responsible for creating the commit message for first epoch.
|
||||
Similarly, `steward B` is for the last epoch.
|
||||
Since the `epoch steward` is the primary committer for an epoch,
|
||||
it holds the main responsibility for producing the commit.
|
||||
However, other stewards MAY also generate a commit within the same epoch to preserve liveness
|
||||
in case the epoch steward is inactive or slow.
|
||||
Duplicate commits are not re-applied and only the single valid commit for the epoch is accepted by the group,
|
||||
as in described in [commit validation service](#commit-validation-service) against the multiple comitting.
|
||||
|
||||
Therefore, if a malicious steward occurred, the `backup steward` will be charged with committing.
|
||||
Lastly, the size of the list named as `sn`, which also shows the epoch interval for steward list determination.
|
||||
|
||||
## Flow
|
||||
|
||||
General flow is as follows:
|
||||
|
||||
- Each `node` creates and sends their `credential` includes `keyPackage`.
|
||||
- Each `member` creates `voting proposals` sends them to from MLS group during `epoch E`.
|
||||
- Proposals are voted on during the $\Delta$ time window.
|
||||
During this period, the system enters a freezing phase (no new proposals are accepted) to ensure
|
||||
that at least 2n/3 members become synchronized, thereby preserving the
|
||||
health and correctness of the [commit validation service](#commit-validation-service).
|
||||
- Meanwhile, the `steward` collects finalized `voting proposals` from MLS group and converts them into
|
||||
`MLS proposals` then sends them with corresponding `commit messages`
|
||||
- Eventually, upon receiving `commit messages`, each member applies the
|
||||
[commit validation service](#commit-validation-service) locally.
|
||||
After successful validation, the member transitions to the next `epoch E+1`.
|
||||
|
||||
## Creating Voting Proposal
|
||||
|
||||
A `member` MAY initializes the voting with the proposal payload
|
||||
which is implemented using [protocol buffers v3](https://protobuf.dev/) as follows:
|
||||
|
||||
```protobuf
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
message Proposal {
|
||||
string name = 10; // Proposal name
|
||||
string payload = 11; // Describes the what is voting fore
|
||||
int32 proposal_id = 12; // Unique identifier of the proposal
|
||||
bytes proposal_owner = 13; // Public key of the creator
|
||||
repeated Vote votes = 14; // Vote list in the proposal
|
||||
int32 expected_voters_count = 15; // Maximum number of distinct voters
|
||||
int32 round = 16; // Number of Votes
|
||||
int64 timestamp = 17; // Creation time of proposal
|
||||
int64 expiration_time = 18; // Time interval that the proposal is active
|
||||
bool liveness_criteria_yes = 19; // Shows how managing the silent peers vote
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message Vote {
|
||||
int32 vote_id = 20; // Unique identifier of the vote
|
||||
bytes vote_owner = 21; // Voter's public key
|
||||
int64 timestamp = 22; // Time when the vote was cast
|
||||
bool vote = 23; // Vote bool value (true/false)
|
||||
bytes parent_hash = 24; // Hash of previous owner's Vote
|
||||
bytes received_hash = 25; // Hash of previous received Vote
|
||||
bytes vote_hash = 26; // Hash of all previously defined fields in Vote
|
||||
bytes signature = 27; // Signature of vote_hash
|
||||
}
|
||||
```
|
||||
|
||||
The voting proposal MAY include adding a `node` or removing a `member`.
|
||||
After the `member` creates the voting proposal,
|
||||
it is emitted to the network via the MLS `Application message` with a lightweight,
|
||||
epoch based voting such as [hashgraphlike consensus.](https://github.com/logos-co/logos-lips/blob/consensus-hashgraph-like/vac/raw/consensus-hashgraphlike.md)
|
||||
This consensus result MUST be finalized within the epoch as YES or NO.
|
||||
|
||||
If the voting result is YES, this points out the voting proposal will be converted into
|
||||
the MLS proposal by the `steward` and following commit message that starts the new epoch.
|
||||
|
||||
All `members` including `stewards` MUST maintain a local store of finalized voting proposals
|
||||
for at least the duration `threshold_duration` mentioned in [Steward Violation List](#steward-violation-list),
|
||||
required to validate incoming commits and perform [Commit validation service](#commit-validation-service).
|
||||
|
||||
## Creating welcome message
|
||||
|
||||
When a MLS `MLS proposal message` is created by the `steward`,
|
||||
a `commit message` SHOULD follow,
|
||||
as in section 12.04 [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/) to the members.
|
||||
In order for the new `member` joining the group to synchronize with the current members
|
||||
who received the `commit message`,
|
||||
the `steward` sends a welcome message to the node as the new `member`,
|
||||
as in section 12.4.3.1. [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/).
|
||||
|
||||
## Single steward
|
||||
|
||||
To naive way to create a decentralized secure group messaging is having a single transparent `steward`
|
||||
who only applies the changes regarding the result of the voting.
|
||||
|
||||
This is mostly similar with the general flow and specified in voting proposal and welcome message creation sections.
|
||||
|
||||
1. Each time a single `steward` initializes a group with group parameters with parameters
|
||||
as in section 8.1. Group Context in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/).
|
||||
2. The each `node` who wants to be a `member` needs to obtain this anouncement and create `credential`
|
||||
includes `keyPackage` that is specified in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/) section 10.
|
||||
3. The `node` MUST send the plaintext `KeyPackage`, as defined in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/),
|
||||
accompanied by its signature, and publish it to the Welcome topic.
|
||||
This ensures that all current group members are aware that a new participant intends to join.
|
||||
Upon receipt, the `steward` MUST initiate a voting proposal to decide on admitting the new member.
|
||||
It also provides flexibility for liveness in multi-steward settings,
|
||||
allowing more than one steward to obtain `KeyPackages` to commit.
|
||||
4. The `steward` aggregates all `KeyPackages` utilizes them to provision group additions for new members,
|
||||
based on the outcome of the voting process.
|
||||
5. Any `member` start to create `voting proposals` for adding or removing users,
|
||||
and present them to the voting in the MLS group as an application message.
|
||||
However, unlimited use of `voting proposals` within the group may be misused by
|
||||
malicious or overly active members.
|
||||
Therefore, an application-level constraint MAY be introduced to limit the number
|
||||
or frequency of proposals initiated by each member in order to prevent spam or abuse.
|
||||
6. After waiting for the $\Delta$ synchronization window, the `steward` collects
|
||||
finalized `voting proposals` within epoch `E` that have received affirmative votes
|
||||
from members via application messages.
|
||||
The `steward` includes only those proposals that have obtained a majority of "YES" votes.
|
||||
Since voting proposals are transmitted as application messages, omitting
|
||||
non-finalized proposals does not affect the protocol’s correctness or
|
||||
consistency.
|
||||
7. The `steward` converts all approved `voting proposals` into
|
||||
corresponding `MLS proposals` and `commit message`, and
|
||||
transmits both in a single operation as in [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/) section 12.4,
|
||||
including welcome messages for the new members.
|
||||
Therefore, the `commit message` ends the previous epoch and create new ones.
|
||||
8. Upon receiving a `commit message`, the `members` first execute the [commit validation service](#commit-validation-service),
|
||||
including verification of signatures and associated `voting proposals`.
|
||||
If the commit is deemed valid, the `members` apply the commit and synchronize to the upcoming epoch.
|
||||
|
||||
## Multi stewards
|
||||
|
||||
Decentralization has already been achieved in the previous section.
|
||||
However, to improve availability and ensure censorship resistance,
|
||||
the single steward protocol is extended to a multi steward architecture.
|
||||
In this design, each epoch is coordinated by a designated steward,
|
||||
operating under the same protocol as the single steward model.
|
||||
Thus, the multi steward approach primarily defines how steward roles
|
||||
rotate across epochs while preserving the underlying structure and logic of the original protocol.
|
||||
Two variants of the multi steward design are introduced to address different system requirements.
|
||||
|
||||
In the multi steward setting, multiple stewards MAY issue `commit messages` within the same epoch.
|
||||
As a result, members may receive different numbers of commit messages with potentially differing contents.
|
||||
For all received commits, the [commit validation service](#commit-validation-service) is executed locally and
|
||||
MUST deterministically output at most one valid commit to be applied for the epoch transition.
|
||||
|
||||
### Consensus Types
|
||||
|
||||
Consensus is agnostic with its payload; therefore, it can be used for various purposes.
|
||||
Note that each message for the consensus of proposals is an `application message` in the MLS object section.
|
||||
It is used in three ways as follows:
|
||||
|
||||
1. `Commit proposal`: It is the proposal instance that is specified in Creating Voting Proposal section
|
||||
with `Proposal.payload` MUST show the commit request from `members`.
|
||||
Any member MAY create this proposal in any epoch and `epoch steward` MUST collect and commit YES voted proposals.
|
||||
This is the only proposal type common to both single steward and multi steward designs.
|
||||
2. `Steward election proposal`: This is the process that finalizes the `steward list`,
|
||||
which sets and orders stewards responsible for creating commits over a predefined number of range in (`sn_min`,`sn_max`).
|
||||
The validity of the choosen `steward list` ends
|
||||
when the last steward in the list (the one at the final index) completes its commit.
|
||||
At that point, a new `steward election proposal` MUST be initiated again by any member during the corresponding epoch.
|
||||
The `Proposal.payload` field MUST represent the ordered identities of the proposed stewards.
|
||||
Each steward election proposal MUST be verified and finalized through the consensus process
|
||||
so that members can identify which steward will be responsible in each epoch
|
||||
and detect any unauthorized steward commits.
|
||||
3. `Emergency criteria proposal`: If there is a malicious member or steward,
|
||||
this event MUST be finalized through a governance vote,
|
||||
reflecting the expectation of active participation from members in the decentralized governance process.
|
||||
If the proposal returns YES, a score penalty MUST be applied to the targeted member or steward
|
||||
by decreasing their peer score, and a score reward MUST be granted to the creator of the proposal;
|
||||
if the proposal returns NO, a score penalty MUST be applied to the creator of the proposal.
|
||||
`Proposal.payload` MUST include evidence of dishonesty as defined in the Steward Violation List,
|
||||
along with the identifier of the malicious member or steward.
|
||||
This proposal can be created by any member in any epoch.
|
||||
|
||||
The order of consensus proposal messages is important to achieving a consistent result.
|
||||
Therefore, messages MUST be prioritized by type in the following order, from highest to lowest priority:
|
||||
|
||||
- `Emergency criteria proposal`
|
||||
|
||||
- `Steward election proposal`
|
||||
|
||||
- `Commit proposal`
|
||||
|
||||
This means that if a higher-priority consensus proposal is present in the network,
|
||||
lower-priority messages MUST be withheld from transmission until the higher-priority proposals have been finalized.
|
||||
|
||||
#### Partial Freeze Semantics
|
||||
|
||||
This prioritization is realized through a partial freeze of lower-priority governance traffic.
|
||||
When an active `Emergency criteria proposal` is observed and has not yet been finalized,
|
||||
honest nodes MUST temporarily suspend the propagation and creation of lower-priority consensus proposal messages,
|
||||
including Steward election proposals and Commit proposals.
|
||||
Such messages MUST be dropped and MUST NOT be forwarded over the network until the emergency proposal is finalized.
|
||||
|
||||
This partial freeze applies only to governance-related messages,
|
||||
MLS application messages MAY continue to be transmitted normally.
|
||||
|
||||
If a malicious member attempts to generate or propagate
|
||||
lower-priority proposals during an active emergency,
|
||||
these messages will not be observed by the majority of honest nodes
|
||||
due to deterministic message filtering.
|
||||
Implementations MAY additionally penalize such behavior using peer scoring mechanisms.
|
||||
|
||||
To enforce this behavior, members MUST be able to identify the type of incoming consensus messages
|
||||
and apply priority-based filtering accordingly.
|
||||
|
||||
### Steward list creation
|
||||
|
||||
The `steward list` consists of steward nominees who will become actual stewards
|
||||
if the `steward election proposal` is finalized with YES,
|
||||
is arbitrarily chosen from `member` and OPTIONALLY adjusted depending on the needs of the implementation.
|
||||
The `steward list` size, defined by the minimum `sn_min` and maximum `sn_max` bounds,
|
||||
is determined at the time of group creation.
|
||||
The `sn_min` requirement is applied only when the total number of members exceeds `sn_min`;
|
||||
if the number of available members falls below this threshold,
|
||||
the list size automatically adjusts to include all existing members.
|
||||
|
||||
The actual size of the list MAY vary within this range as `sn`, with the minimum value being at least 1.
|
||||
|
||||
The index of the slots shows epoch info and value of index shows `member id`s.
|
||||
The next in line steward for the `epoch E` is named as `epoch steward`, which has index E.
|
||||
And the subsequent steward in the `epoch E` is named as the `backup steward`.
|
||||
For example, let's assume steward list is (S3, S2, S1) if in the previous epoch the roles were
|
||||
(`backup steward`: S2, `epoch steward`: S1), then in the next epoch they become
|
||||
(`backup steward`: S3, `epoch steward`: S2) by shifting.
|
||||
|
||||
If the `epoch steward` is honest, the `backup steward` does not involve the process in epoch,
|
||||
and the `backup steward` will be the `epoch steward` within the `epoch E+1`.
|
||||
|
||||
If the `epoch steward` is malicious, the `backup steward` is involved in the commitment phase in `epoch E`
|
||||
and the former steward becomes the `backup steward` in `epoch E`.
|
||||
|
||||
Liveness criteria:
|
||||
|
||||
Once the active `steward list` has completed its assigned epochs,
|
||||
|
||||
members MUST proceed to elect the next set of stewards
|
||||
(which MAY include some or all of the previous members).
|
||||
This election is conducted through a type 2 consensus procedure, `steward election proposal`.
|
||||
|
||||
A `Steward election proposal` is considered valid only if the resulting `steward list`
|
||||
is produced through a deterministic process that ensures an unbiased distribution of steward assignments,
|
||||
since allowing bias could enable a malicious participant to manipulate the list
|
||||
and retain control within a favored group for multiple epochs.
|
||||
|
||||
The list MUST consist of at least `sn_min` members, including retained previous stewards,
|
||||
sorted according to the ascending value of `SHA256(epoch E || member id || group id)`,
|
||||
where `epoch E` is the epoch in which the election proposal is initiated,
|
||||
and `group id` for shuffling the list across the different groups.
|
||||
Any proposal with a list that does not adhere to this generation method MUST be rejected by all members.
|
||||
|
||||
It is assumed that that there are no recurring entries in `SHA256(epoch E || member id || group id)`,
|
||||
since the SHA256 outputs are unique when there is no repetition in the `member id` values,
|
||||
against the conflicts on sorting issues.
|
||||
|
||||
### Multi steward with big consensuses
|
||||
|
||||
In this model, all group modifications, such as adding or removing members,
|
||||
must be approved through consensus by all participants,
|
||||
including the steward assigned for `epoch E`.
|
||||
A configuration with multiple stewards operating under a shared consensus protocol offers
|
||||
increased decentralization and stronger protection against censorship.
|
||||
However, this benefit comes with reduced operational efficiency.
|
||||
The model is therefore best suited for small groups that value
|
||||
decentralization and censorship resistance more than performance.
|
||||
|
||||
To create a multi steward with a big consensus,
|
||||
the group is initialized with a single steward as specified as follows:
|
||||
|
||||
1. The steward initialized the group with the config file.
|
||||
This config file MUST contain (`sn_min`,`sn_max`) as the `steward list` size range.
|
||||
2. The steward adds the members as a centralized way till the number of members reaches the `sn_min`.
|
||||
Then, members propose lists by voting proposal with size `sn`
|
||||
as a consensus among all members, as mentioned in the consensus section 2, according to the checks:
|
||||
the size of the proposed list `sn` is in the interval (`sn_min`,`sn_max`).
|
||||
Note that if the total number of members is below `sn_min`,
|
||||
then the steward list size MUST be equal to the total member count.
|
||||
3. After the voting proposal ends up with a `steward list`,
|
||||
and group changes are ready to be committed as specified in single steward section
|
||||
with a difference which is members also check the committed steward is `epoch steward` or `backup steward`,
|
||||
otherwise anyone can create `emergency criteria proposal`.
|
||||
4. If the `epoch steward` violates the changing process as described in the Steward Violation List,
|
||||
one of the members MUST initialize an `emergency criteria proposal` to apply a peer score penalty to the malicious steward.
|
||||
|
||||
A large consensus group provides better decentralization, but it requires significant coordination,
|
||||
which MAY not be suitable for groups with more than 1000 members.
|
||||
|
||||
### Multi steward with small consensuses
|
||||
|
||||
The small consensus model offers improved efficiency with a trade-off in decentralization.
|
||||
In this design, group changes require consensus only among the stewards, rather than all members.
|
||||
Regular members participate by periodically selecting the stewards by `steward election proposal`
|
||||
but do not take part in commit decision by `commit proposal`.
|
||||
This structure enables faster coordination since consensus is achieved within a smaller group of stewards.
|
||||
It is particularly suitable for large user groups, where involving every member in each decision would be impractical.
|
||||
|
||||
The flow is similar to the big consensus including the `steward list` finalization with all members consensus
|
||||
only the difference here, the commit messages requires `commit proposal` only among the stewards.
|
||||
|
||||
### Commit validation service
|
||||
|
||||
Since `stewards` are allowed to produce a commit even when they are not the designated `epoch steward`,
|
||||
multiple commits may appear within the same commit context, often reflecting recurring versions of the same proposals.
|
||||
To ensure a consistent and deterministic outcome, all members MUST locally perform
|
||||
commit validation over the set of candidate commits.
|
||||
|
||||
This validation process takes as input the set of `finalized voting proposals` locally stored by the member,
|
||||
as remarked in [Creating Voting Proposal](#creating-voting-proposal), and multiple candidate `commit messages`
|
||||
with different lengths and contents, each containing `voting proposals`.
|
||||
The process deterministically selects at most a single valid commit as output.
|
||||
In cases where protocol violations are detected, the process MAY additionally trigger peer scoring penalties.
|
||||
|
||||
For all candidate commits entering validation, the `creator ID` MUST be identified
|
||||
and verified against the local epoch context to ensure that the commit is eligible for the current epoch.
|
||||
Commits originating from unauthorized or context-inconsistent creators MUST be rejected.
|
||||
The `creator ID` MAY additionally be used for peer scoring purposes, including optional slashing or rewarding mechanisms,
|
||||
depending on whether the commit is determined to be valid or invalid.
|
||||
|
||||
A commit is considered valid only if it references governance proposals
|
||||
that have been finalized through voting and are known to the member.
|
||||
Commits that reference non-finalized voting proposals MUST be rejected and
|
||||
MUST trigger a peer score penalty for the commit author,
|
||||
as this behavior constitutes a protocol violation.
|
||||
|
||||
Among the valid candidate commits, the commit derived from the longest
|
||||
deterministic proposal sequence SHOULD be selected as the single valid commit.
|
||||
Any other competing commits that do not match the selected commit MUST be
|
||||
classified as misbehaviour and penalized with a lower reputation score
|
||||
according to the misbehaviour scoring rules defined in this specification.
|
||||
The proposal sequence is ordered by the ascending value of each proposal as `SHA256(proposal)`.
|
||||
Therefore, commit messages that contain the same set of voting proposals
|
||||
are identical in content and can be easily deduplicated.
|
||||
|
||||
Since MLS derives new group secrets from the committer’s contribution,
|
||||
two `commit messages` containing the exact same ordered set of `voting proposals`
|
||||
but produced by different `stewards` will generate different group keys.
|
||||
Therefore, proposal equivalence alone does not guarantee state equivalence.
|
||||
If multiple valid commits contain the identical deterministic proposal sequence,
|
||||
the commit validation service MUST select first, if there is `Epoch steward`,
|
||||
otherwise the commit whose `committer ID` is lexicographically smallest (according to canonical ordering)
|
||||
as the single valid output, thereby avoiding different state forks.
|
||||
Competing commits that contain the same deterministic proposal sequence
|
||||
but differ only due to steward-generated MLS commit entropy MUST NOT be classified as misbehaviour
|
||||
and MAY instead be treated as honest participation for peer scoring purposes.
|
||||
|
||||
## Steward violation list
|
||||
|
||||
A steward’s activity is called a violation if the action is one or more of the following:
|
||||
|
||||
1. Broken commit: The steward releases a different commit message from the voted `commit proposal`.
|
||||
This activity is identified by the `members` since the [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/) provides the methods
|
||||
that members can use to identify the broken commit messages that are possible in a few situations,
|
||||
such as commit and proposal incompatibility. Specifically, the broken commit can arise as follows:
|
||||
1. The commit belongs to the earlier epoch.
|
||||
2. The commit message should equal the latest epoch
|
||||
3. The commit needs to be compatible with the previous epoch’s `MLS proposal`.
|
||||
2. Broken MLS proposal: The steward prepares a different `MLS proposal` for the corresponding `voting proposal`.
|
||||
This activity is identified by the `members` since both `MLS proposal` and `voting proposal` are visible
|
||||
and can be identified by checking the hash of `Proposal.payload` and `MLSProposal.payload` is the same as RFC9240 section 12.1. Proposals.
|
||||
3. Censorship and inactivity: The situation where there is a voting proposal that is visible for every member,
|
||||
and the Steward does not provide an MLS proposal and commit within the configured `threshold_duration`,
|
||||
after which the voting process is considered finalized by the majority timer.
|
||||
This activity is again identified by the `members`since `voting proposals` are visible to every member in the group,
|
||||
therefore each member can verify that there is no `MLS proposal` corresponding to `voting proposal`,
|
||||
or commit was produced for a voting proposal that has already been finalized due to timer expiration.
|
||||
|
||||
## Peer Scoring
|
||||
|
||||
To improve fairness in member and steward management, de-MLS SHOULD incorporate a
|
||||
lightweight peer scoring mechanism.
|
||||
Unfairness is not an intrinsic property of a member.
|
||||
Instead, it arises as a consequence of punitive actions such as removal following an observed malicious behavior.
|
||||
However, behaviors that appear malicious are not always the result of intent.
|
||||
Network faults, temporary partitions, message delays, or client-side failures may lead to unintended protocol deviations.
|
||||
A peer scoring mechanism allows de-MLS to account for such transient and non-adversarial conditions by accumulating evidence over time.
|
||||
This enables the system to distinguish persistent and intentional misbehavior from accidental faults.
|
||||
Member removal should be triggered only in cases of sustained and intentional malicious activity,
|
||||
thereby preserving fairness while maintaining security and liveness.
|
||||
|
||||
In this approach, each node maintains a local peer score table mapping `member_id` to a score,
|
||||
with new members starting from a configurable default value `default_peer_score`.
|
||||
Peer score updates MUST be performed only for stewards that are active in the current epoch context.
|
||||
Peer scores may decrease due to violations and increase due to honest behavior;
|
||||
such score adjustments are derived from observable protocol events, such as
|
||||
successful commits or emergency criteria proposals, and each peer updates its local table accordingly.
|
||||
In particular, peer score updates MAY be triggered either by direct local observation of protocol violations
|
||||
or by the finalized outcome of a governance vote.
|
||||
Regardless of the trigger, score updates are applied locally by each peer to its own peer score table.
|
||||
|
||||
Members MUST periodically evaluate peer scores against the predefined threshold `threshold_peer_score`.
|
||||
A removal operation based on the `threshold_peer_score` MUST be initiated as an `emergency criteria proposal`
|
||||
by at least one member and, only after being finalized with a YES outcome, MUST be included in the subsequent commit.
|
||||
To prevent abuse, if such a removal emergency criteria proposal is finalized with a NO outcome,
|
||||
a low score MAY be applied to the proposal owner.
|
||||
This mechanism allows accidental or transient failures to be tolerated while still enabling
|
||||
decisive action against repeated or harmful behavior.
|
||||
The exact scoring rules, recovery mechanisms, and escalation criteria are left for future discussion.
|
||||
|
||||
## Timer-Based Anti-Deadlock Mechanism
|
||||
|
||||
In de-MLS, a deadlock refers to a prolonged period during which no valid commit is produced
|
||||
despite the presence of at least one `finalized commit proposal` that require a group state change.
|
||||
To mitigate deadlock risks in de-MLS, a timer-based anti-deadlock mechanism SHOULD be introduced.
|
||||
|
||||
Each member maintains a local timer with a configured `threshold_duration`.
|
||||
The timer MUST start when the member observes a `finalized commit proposal` that requires a corresponding commit
|
||||
(e.g., add/remove membership changes) and MUST reset only when
|
||||
the [commit validation service](#commit-validation-service) outputs a valid commit for the current commit context.
|
||||
|
||||
If the `threshold_duration` is exceeded, the member waits an additional buffer period to account for network delays
|
||||
and then triggers a high-priority `emergency proposal` indicating a potential deadlock.
|
||||
If the proposal returns YES, the protocol SHOULD temporarily allow any member to commit in order to restore liveness.
|
||||
Since timers may expire at different times in a P2P setting,
|
||||
the buffer period mitigates false positives, while commit filtering is required
|
||||
to prevent commit flooding during recovery.
|
||||
|
||||
This timer-based method is used only for anti-deadlock detection.
|
||||
Cases where a commit message includes fewer finalized voting proposals than expected are handled by [Steward Violation List](#steward-violation-list).
|
||||
Emergency proposals that return NO, MUST incur a peer score penalty for the creator of the proposal to reduce abuse.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
In this section, the security considerations are shown as de-MLS assurance.
|
||||
|
||||
1. Malicious Steward: A Malicious steward can act maliciously,
|
||||
as in the Steward violation list section.
|
||||
Therefore, de-MLS enforces that any steward only follows the protocol under the consensus order
|
||||
and commits without emergency criteria application.
|
||||
2. Malicious Member: A member is only marked as malicious
|
||||
when the member acts by releasing a commit message.
|
||||
3. Steward list election bias: Although SHA256 is used together with two global variables
|
||||
to shuffle stewards in a deterministic and verifiable manner,
|
||||
this approach only minimizes election bias; it does not completely eliminate it.
|
||||
This design choice is intentional, in order to preserve the efficiency advantages provided by the MLS mechanism.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)
|
||||
|
||||
### References
|
||||
|
||||
- [MLS RFC 9420](https://datatracker.ietf.org/doc/rfc9420/)
|
||||
- [Hashgraphlike Consensus](https://github.com/logos-co/logos-lips/blob/consensus-hashgraph-like/vac/raw/consensus-hashgraphlike.md)
|
||||
- [vacp2p/de-mls](https://github.com/vacp2p/de-mls)
|
||||
@@ -1,215 +0,0 @@
|
||||
# EXTENDED-KADEMLIA-DISCOVERY
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Extended Kademlia Discovery with capability filtering |
|
||||
| Slug | 143 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Simon-Pierre Vivier <simvivier@status.im> |
|
||||
| Contributors | Hanno Cornelius <hanno@status.im>|
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-26** — [`8bba444`](https://github.com/logos-co/logos-lips/blob/8bba4441aa3601cef6fb75ff5d48b1cd27350a5c/docs/ift-ts/raw/extended-kad-disco.md) — chore: fix lint (#275)
|
||||
- **2026-01-23** — [`8164992`](https://github.com/logos-co/logos-lips/blob/8164992534b14b2466fc1117bbeef2ae2d14f249/docs/ift-ts/raw/extended-kad-disco.md) — chore: fix lint
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification defines a lightweight peer discovery mechanism
|
||||
built on top of the libp2p Kademlia DHT.
|
||||
It allows nodes to advertise themselves by storing a new type of peer record under
|
||||
their own peer ID and enables other nodes to discover peers in the network via
|
||||
random walks through the DHT.
|
||||
The mechanism supports capability-based filtering of `services` entries,
|
||||
making it suitable for overlay networks that
|
||||
require connectivity to peers offering specific protocols or features.
|
||||
|
||||
## Motivation
|
||||
|
||||
The standard libp2p Kademlia DHT provides
|
||||
content routing and peer routing toward specific keys or peer IDs,
|
||||
but offers limited support for general-purpose random peer discovery
|
||||
— i.e. finding *any well-connected peer* in the network.
|
||||
|
||||
Existing alternatives such as mDNS,
|
||||
Rendezvous,
|
||||
or bootstrap lists do not always satisfy the needs of
|
||||
large-scale decentralized overlay networks that require:
|
||||
|
||||
- Organic growth of connectivity without strong trust in bootstrap nodes
|
||||
- Discovery of peers offering specific capabilities (e.g. protocols, bandwidth classes, service availability)
|
||||
- Resilience against eclipse attacks and network partitioning
|
||||
- Low overhead compared to gossip-based or pubsub-based discovery
|
||||
|
||||
By leveraging the already-deployed Kademlia routing table and random-walk behavior,
|
||||
this document define a simple, low-cost discovery primitive that reuses existing infrastructure while adding capability advertisement and filtering via a new record type.
|
||||
|
||||
## Semantic
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”,
|
||||
“SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document
|
||||
are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
Please refer to [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md) (`Kad-DHT`)
|
||||
and [extensible peer records specification](https://github.com/logos-co/logos-lips/blob/31dfa0c8c2f3e7f7365156246c4eb7b7c390e76e/vac/raw/extensible-peer-records.md) (`XPR`) for terminology used in this document.
|
||||
|
||||
## Protocol
|
||||
|
||||
### Record Propagation
|
||||
|
||||
A node that wants to make itself discoverable,
|
||||
also known as an _advertiser_,
|
||||
MUST encode its discoverable information in an [`XPR`](https://github.com/logos-co/logos-lips/blob/31dfa0c8c2f3e7f7365156246c4eb7b7c390e76e/vac/raw/extensible-peer-records.md#extensible-peer-records).
|
||||
The encoded information MUST be sufficient for discoverers to connect to this advertiser.
|
||||
It MAY choose to encode some or all of its capabilities (and related information)
|
||||
as `services` in the `XPR`.
|
||||
This will allow future discoverers to filter discovered records based on desired capabilities.
|
||||
|
||||
In order to advertise this record,
|
||||
the advertiser SHOULD first retrieve the `k` closest peers to its own peer ID
|
||||
in its own `Kad-DHT` [routing table](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md#kademlia-routing-table).
|
||||
This assumes that the routing table has been previously initialised
|
||||
and follows the regular [bootstrap process](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md#bootstrap-process) as per the `Kad-DHT` specification.
|
||||
The advertiser SHOULD then send a `PUT_VALUE` message to these `k` peers
|
||||
to store the `XPR` against its own peer ID.
|
||||
This process SHOULD be repeated periodically to maintain the advertised record.
|
||||
We RECOMMEND an interval of once every `30` minutes.
|
||||
|
||||
#### Use of `XPR` in `identify`
|
||||
|
||||
Advertisers SHOULD include their `XPR`s as the `signedPeerRecord`
|
||||
in libp2p `Identify` [messages](https://github.com/libp2p/specs/blob/0762325f693afb2e620d32d4f55ba962d1293ff9/identify/README.md#the-identify-message).
|
||||
|
||||
> **Note:** For more information, see the `identify` protocol implementations,
|
||||
such as [go-libp2p](https://github.com/libp2p/go-libp2p/blob/636d44e15abc7bfbd1da09cc9fef674249625ae6/p2p/protocol/identify/pb/identify.proto#L37),
|
||||
as at the time of writing (Jan 2026)
|
||||
the `signedPeerRecord` field extension is not yet part of any official specification.
|
||||
|
||||
### Record Discovery
|
||||
|
||||
A node that wants to discover peers to connect to,
|
||||
also known as a _discoverer_,
|
||||
SHOULD perform the following random walk discovery procedure (`FIND_RANDOM`):
|
||||
|
||||
1. Choose a random value in the `Kad-DHT` key space. (`R_KEY`).
|
||||
|
||||
2. Follow the `Kad-DHT` [peer routing](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md#peer-routing) algorithm,
|
||||
with `R_KEY` as the target.
|
||||
This procedure loops the `Kad-DHT` `FIND_NODE` procedure to the target key,
|
||||
each time receiving closer peers (`closerPeers`) to the target key in response,
|
||||
until no new closer peers can be found.
|
||||
Since the target is random,
|
||||
the discoverer SHOULD consider each _previously unseen_ peer in each response's `closerPeers` field,
|
||||
as a randomly discovered node of potential interest.
|
||||
The discoverer MUST keep track of such peers as `discoveredPeer`s.
|
||||
|
||||
3. For each `discoveredPeer`, attempt to retrieve a corresponding `XPR`.
|
||||
This can be done in one of two ways:
|
||||
|
||||
3.1 If the `discoveredPeer` in the response contains at least one multiaddress in the `addrs` field,
|
||||
attempt a connection to that peer and wait to receive the `XPR` as part of the [`identify` procedure](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/identify/README.md).
|
||||
|
||||
3.2 If the `discoveredPeer` does not include `addrs` information,
|
||||
or the connection attempt to included `addrs` fails,
|
||||
or more service information is required before a connection can be attempted,
|
||||
MAY perform a [value retrieval](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md#value-retrieval) procedure to the `discoveredPeer` ID.
|
||||
|
||||
4. For each retrieved `XPR`, validate the signature against the peer ID.
|
||||
In addition, the discoverer MAY filter discovered peers
|
||||
based on the capabilities encoded within the `services` field of the `XPR`.
|
||||
The discoverer SHOULD ignore (and disconnect, if already connected) discovered peers
|
||||
with invalid `XPR`s
|
||||
or that do not advertise the `services` of interest to the discoverer.
|
||||
|
||||
### Privacy Enhancements
|
||||
|
||||
To prevent network topology mapping and eclipse attacks,
|
||||
`Kad-DHT` nodes MUST NOT disclose connection type in [response messages](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md#rpc-messages).
|
||||
The `connection` field of every `Peer` MUST always be set to `NOT_CONNECTED`.
|
||||
|
||||
## API Specification
|
||||
|
||||
Implementers of this protocol,
|
||||
SHOULD wrap the implementation in a functional interface similar to the one defined below.
|
||||
|
||||
In Extended Kademlia Discovery, the discovery protocol is based on a random DHT walk,
|
||||
optionally filtering the randomly discovered peers by capability.
|
||||
However, it's possible to define discovery protocols with better performance in finding peers with specific capabilities.
|
||||
The aim is to define an API that is compatible with Extended Kademlia Discovery
|
||||
and more sophisticated capability discovery protocols,
|
||||
maintaining similar function signatures even if the underlying protocol differs.
|
||||
This section may be extracted into a separate API specification once new capability discovery protocols are defined.
|
||||
|
||||
The API is defined in the form of C-style bindings.
|
||||
However, this simply serves to illustrate the exposed functions
|
||||
and can be adapted into the conventions of any strongly typed language.
|
||||
Although unspecified in the API below,
|
||||
all functions SHOULD return an error result type appropriate to the implementation language.
|
||||
|
||||
### `start()`
|
||||
|
||||
Start the discovery protocol,
|
||||
including all tasks related to bootstrapping and maintaining the routing table
|
||||
and advertising this node and its capabilities.
|
||||
|
||||
In the case of Extended Kademlia Discovery,
|
||||
`start()` will kick off the periodic task of [refreshing the propagated `XPR`](#record-propagation).
|
||||
|
||||
### `stop()`
|
||||
|
||||
Stop the discovery protocol,
|
||||
including all tasks related to maintaining the routing table
|
||||
and advertising this node and its capabilities.
|
||||
|
||||
In the case of Extended Kademlia Discovery,
|
||||
`stop()` will cancel the periodic task of [refreshing the propagated `XPR`](#record-propagation).
|
||||
|
||||
### `start_advertising(const char* service_id)`
|
||||
|
||||
Start advertising this node against any capability
|
||||
encoded as an input `service_id` string.
|
||||
|
||||
In the case of Extended Kademlia Discovery,
|
||||
`start_advertising()` will include the input `service_id`
|
||||
in the [regularly propagated `XPR`](#record-propagation).
|
||||
|
||||
### `stop_advertising(const char* service_id)`
|
||||
|
||||
Stop advertising this node against the capability
|
||||
encoded in the input `service_id` string.
|
||||
|
||||
In the case of Extended Kademlia Discovery,
|
||||
`stop_advertising()` will exclude the `service_id`
|
||||
from the [regularly propagated `XPR`](#record-propagation),
|
||||
if it was previously included.
|
||||
|
||||
### `ExtensiblePeerRecords* lookup(const char* service_id, ...)`
|
||||
|
||||
Lookup and return records for peers supporting the capability encoded in the input `service_id` string,
|
||||
using the underlying discovery protocol.
|
||||
`service_id` is an OPTIONAL input argument.
|
||||
If unset, it indicates a lookup for peers supporting any (or zero) capabilities.
|
||||
|
||||
In the case of Extended Kademlia Discovery,
|
||||
`lookup()` will trigger the random walk [record discovery](#record-discovery),
|
||||
filtering discovered records based on `service_id`, if specified.
|
||||
If no `service_id` is specified,
|
||||
Extended Kademlia Discovery will just return a random selection of peer records,
|
||||
matching any capability.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [extended peer records specification](https://github.com/logos-co/logos-lips/blob/31dfa0c8c2f3e7f7365156246c4eb7b7c390e76e/vac/raw/extensible-peer-records.md)
|
||||
- [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/blob/e87cb1c32a666c2229d3b9bb8f9ce1d9cfdaa8a9/kad-dht/README.md)
|
||||
- [RFC002 Signed Envelope](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0002-signed-envelopes.md)
|
||||
- [RFC003 Routing Records](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md)
|
||||
- [capability discovery](https://github.com/logos-co/logos-lips/blob/31dfa0c8c2f3e7f7365156246c4eb7b7c390e76e/vac/raw/logos-capability-discovery.md)
|
||||
@@ -1,177 +0,0 @@
|
||||
# EXTENSIBLE-PEER-RECORDS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Extensible Peer Records |
|
||||
| Slug | 74 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Hanno Cornelius <hanno@status.im> |
|
||||
| Contributors | Simon-Pierre Vivier <simvivier@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/extensible-peer-records.md) — chore: add math support (#287)
|
||||
- **2026-01-24** — [`ffca40a`](https://github.com/logos-co/logos-lips/blob/ffca40abfa6b42f239439550cd2fc47fc802f22a/docs/ift-ts/raw/extensible-peer-records.md) — Mix spam and sybil protection protocol using RLN (#252)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/extensible-peer-records.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/extensible-peer-records.md) — Chore/mdbook updates (#258)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This RFC proposes Extensible Peer Records,
|
||||
an extension of libp2p's [routing records](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md),
|
||||
that enables peers to encode an arbitrary list of supported services and essential service-related information
|
||||
in distributable records.
|
||||
This version of routing records allows peers to communicate capabilities such as protocol support,
|
||||
and essential information related to such capabilities.
|
||||
This is especially useful when (signed) records are used in peer discovery,
|
||||
allowing discoverers to filter for peers matching a desired set of capability criteria.
|
||||
Extensible Peer Records maintain backwards compatibility with standard libp2p routing records,
|
||||
while adding an extensible service information field that supports finer-grained capability communication.
|
||||
|
||||
> **_A note on terminology:_** We opt to call this structure a "_peer record_", even though the corresponding libp2p specification refers to a "_routing record_".
|
||||
> This is because the libp2p specification itself defines an internal [`PeerRecord` type](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md#address-record-format),
|
||||
> and, when serialised into a signed envelope, this is most often called a "_signed peer record_" (see, for example, [go-libp2p identify protocol](https://github.com/libp2p/go-libp2p/blob/479b24baab77b4b99d7e31462b91cc04f89f1de4/p2p/protocol/identify/pb/identify.proto#L37)).
|
||||
|
||||
The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”,
|
||||
“SHOULD NOT”, “RECOMMENDED”, “MAY”, and
|
||||
“OPTIONAL” in this document are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Motivation
|
||||
|
||||
We propose a new peer record as an extension of libp2p's [RFC003 Routing Records](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md)
|
||||
that allows encoding an arbitrary list of services,
|
||||
and essential information pertaining to those services,
|
||||
supported by the peer.
|
||||
|
||||
There are at least two reasons why a peer might want to encode service information in its peer records:
|
||||
|
||||
1. **To augment `identify` with peer capabilities:**
|
||||
The libp2p [`identify` protocol](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/identify/README.md) allows peers to exchange critical information,
|
||||
such as supported protocols,
|
||||
on first connection.
|
||||
The peer record (in a signed envelope) can also be exchanged during `identify`.
|
||||
However, peers may want to exchange finer-grained information related to supported protocols/services,
|
||||
that would otherwise require an application-level negotiation protocol,
|
||||
or that is critical to connect to the service in the first place.
|
||||
An example would be nodes supporting libp2p [`mix` protocol](https://rfc.vac.dev/vac/raw/mix) also needing to exchange the mix key
|
||||
before the service can be used.
|
||||
2. **To advertise supported services:**
|
||||
If the peer record is used as the discoverable record for a peer
|
||||
(as we propose for various discovery methods)
|
||||
that peer may want to encode a list of supported services
|
||||
in its advertised record.
|
||||
These services may be (but is not limited to) a list of supported libp2p protocols
|
||||
and critical information pertaining to that service (such as the mix key, explained above).
|
||||
Discoverers can then filter discovered records for desired capabilities
|
||||
based on the encoded service information
|
||||
or use it to initiate the service.
|
||||
|
||||
## Wire protocol
|
||||
|
||||
### Extensible Peer Records
|
||||
|
||||
Extensible Peer Records MUST adhere to the following structure:
|
||||
|
||||
```protobuf
|
||||
syntax = "proto3";
|
||||
|
||||
package peer.pb;
|
||||
|
||||
// ExtensiblePeerRecord messages contain information that is useful to share with other peers.
|
||||
// Currently, an ExtensiblePeerRecord contains the public listen addresses for a peer
|
||||
// and an extensible list of supported services as key-value pairs.
|
||||
//
|
||||
// ExtensiblePeerRecords are designed to be serialised to bytes and placed inside of
|
||||
// SignedEnvelopes before sharing with other peers.
|
||||
message ExtensiblePeerRecord {
|
||||
|
||||
// AddressInfo is a wrapper around a binary multiaddr. It is defined as a
|
||||
// separate message to allow us to add per-address metadata in the future.
|
||||
message AddressInfo {
|
||||
bytes multiaddr = 1;
|
||||
}
|
||||
|
||||
// peer_id contains a libp2p peer id in its binary representation.
|
||||
bytes peer_id = 1;
|
||||
|
||||
// seq contains a monotonically-increasing sequence counter to order ExtensiblePeerRecords in time.
|
||||
uint64 seq = 2;
|
||||
|
||||
// addresses is a list of public listen addresses for the peer.
|
||||
repeated AddressInfo addresses = 3;
|
||||
|
||||
message ServiceInfo{
|
||||
string id = 1;
|
||||
optional bytes data = 2;
|
||||
}
|
||||
|
||||
// Extensible list of advertised services
|
||||
repeated ServiceInfo services = 4;
|
||||
}
|
||||
```
|
||||
|
||||
A peer MAY include a list of supported services in the `services` field.
|
||||
These services could be libp2p protocols,
|
||||
in which case it is RECOMMENDED that the `ServiceInfo` `id` field
|
||||
be derived from the libp2p protocol identifier.
|
||||
In any case, for each supported service,
|
||||
the `id` field MUST be populated with a string identifier for that service.
|
||||
In addition, the `data` field MAY be populated with additional information about the service.
|
||||
It is RECOMMENDED that each `data` field be no more than `33` bytes.
|
||||
(We choose `33` here to allow for the encoding of `256` bit keys with parity.
|
||||
Also see [_Size constraints_](#size-constraints) for recommendations on limiting the overall record size.)
|
||||
|
||||
The rest of the `ExtensiblePeerRecord`
|
||||
MUST be populated as per the libp2p [`PeerRecord` specification](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md).
|
||||
Due to the natural extensibility of protocol buffers,
|
||||
serialised `ExtensiblePeerRecord`s are backwards compatible with libp2p `PeerRecord`s,
|
||||
only adding the functionality related to service info exchange.
|
||||
|
||||
#### Size constraints
|
||||
|
||||
To limit the impact on resources,
|
||||
`ExtensiblePeerRecord`s SHOULD NOT be used to encode information
|
||||
that is not essential for discovery or service initiation.
|
||||
Since these records are likely to be exchanged frequently,
|
||||
they should be kept as small as possible while still providing all necessary functionality.
|
||||
Although specific applications MAY choose to enforce a smaller size,
|
||||
it is RECOMMENDED that an absolute maximum size of `1024` bytes is enforced for valid records.
|
||||
Extensible Peer Records may be included in size-constrained protocols
|
||||
that further limit the size (such as DNS).
|
||||
|
||||
### Wrapping in Signed Peer Envelopes
|
||||
|
||||
Extensible Peer Records MUST be wrapped in libp2p [signed envelope](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0002-signed-envelopes.md)s
|
||||
before distributing them to peers.
|
||||
The corresponding `ExtensiblePeerRecord` message is serialised into the signed envelope's `payload` field.
|
||||
|
||||
#### Signed Envelope Domain
|
||||
|
||||
Extensible Peer Records MUST use `libp2p-routing-state` as domain separator string
|
||||
for the envelope signature.
|
||||
This is the same as for ordinary libp2p [routing records](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md#signed-envelope-domain).
|
||||
|
||||
#### Signed Envelope Payload Type
|
||||
|
||||
Extensible Peer Records MUST use the UTF8 string `/libp2p/extensible-peer-record/`
|
||||
as the `payload_type` value.
|
||||
|
||||
> **_Note:_** this will make Extensible Peer Records a subtype of the "namespace" [multicodec](https://github.com/multiformats/multicodec/blob/0c6c7d75f1580af329847dbc9900859a445ed980/table.csv).
|
||||
> In future we may define a more compact multicodec type for Extensible Peer Records.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [libp2p `identify`](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/identify/README.md)
|
||||
- [libp2p mix](https://rfc.vac.dev/vac/raw/mix)
|
||||
- [multicodec](https://github.com/multiformats/multicodec/blob/0c6c7d75f1580af329847dbc9900859a445ed980/table.csv)
|
||||
- [RFC002 Signed Envelope](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0002-signed-envelopes.md)
|
||||
- [RFC003 Routing Records](https://github.com/libp2p/specs/blob/7740c076350b6636b868a9e4a411280eea34d335/RFC/0003-routing-records.md)
|
||||
@@ -1,243 +0,0 @@
|
||||
# GOSSIPSUB-TOR-PUSH
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Gossipsub Tor Push |
|
||||
| Slug | 105 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Daniel Kaiser <danielkaiser@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/gossipsub-tor-push.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/gossipsub-tor-push.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/gossipsub-tor-push.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/gossipsub-tor-push.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/gossipsub-tor-push.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/gossipsub-tor-push.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/raw/gossipsub-tor-push.md) — Fix Files for Linting (#94)
|
||||
- **2024-05-27** — [`99be3b9`](https://github.com/logos-co/logos-lips/blob/99be3b974509ea03561c7ef4b1b02a56f24e9297/vac/raw/gossipsub-tor-push.md) — Move Raw Specs (#37)
|
||||
- **2024-02-01** — [`cd8c9f4`](https://github.com/logos-co/logos-lips/blob/cd8c9f45f4d3eb0d8275fbaad378a42370c5b9a6/vac/46/gossipsub-tor-push.md) — Update and rename GOSSIPSUB-TOR-PUSH.md to gossipsub-tor-push.md
|
||||
- **2024-01-27** — [`0db60c1`](https://github.com/logos-co/logos-lips/blob/0db60c18c18cfd2373204083cf4a1f5f3b8845dd/vac/46/GOSSIPSUB-TOR-PUSH.md) — Create GOSSIPSUB-TOR-PUSH.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document extends the [libp2p gossipsub specification](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md)
|
||||
specifying gossipsub Tor Push,
|
||||
a gossipsub-internal way of pushing messages into a gossipsub network via Tor.
|
||||
Tor Push adds sender identity protection to gossipsub.
|
||||
|
||||
**Protocol identifier**: /meshsub/1.1.0
|
||||
|
||||
Note: Gossipsub Tor Push does not have a dedicated protocol identifier.
|
||||
It uses the same identifier as gossipsub and
|
||||
works with all [pubsub](https://github.com/libp2p/specs/tree/master/pubsub)
|
||||
based protocols.
|
||||
This allows nodes that are oblivious to Tor Push to process messages received via
|
||||
Tor Push.
|
||||
|
||||
## Background
|
||||
|
||||
Without extensions, [libp2p gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md)
|
||||
does not protect sender identities.
|
||||
|
||||
A possible design of an anonymity extension to gossipsub
|
||||
is pushing messages through an anonymization network
|
||||
before they enter the gossipsub network.
|
||||
[Tor](https://www.torproject.org/) is currently the largest anonymization network.
|
||||
It is well researched and works reliably.
|
||||
Basing our solution on Tor both inherits existing security research,
|
||||
as well as allows for a quick deployment.
|
||||
|
||||
Using the anonymization network approach,
|
||||
even the first gossipsub node that relays a given message
|
||||
cannot link the message to its sender
|
||||
(within a relatively strong adversarial model).
|
||||
Taking the low bandwidth overhead and the low latency overhead into consideration,
|
||||
Tor offers very good anonymity properties.
|
||||
|
||||
## Functional Operation
|
||||
|
||||
Tor Push allows nodes to push messages over Tor into the gossipsub network.
|
||||
The approach specified in this document is fully backwards compatible.
|
||||
Gossipsub nodes that do not support Tor Push can receive and relay Tor Push messages,
|
||||
because Tor Push uses the same Protocol ID as gossipsub.
|
||||
|
||||
Messages are sent over Tor via [SOCKS5](https://www.rfc-editor.org/rfc/rfc1928).
|
||||
Tor Push uses a dedicated libp2p context to prevent information leakage.
|
||||
To significantly increase resilience and mitigate circuit failures,
|
||||
Tor Push establishes several connections,
|
||||
each to a different randomly selected gossipsub node.
|
||||
|
||||
## Specification
|
||||
|
||||
This section specifies the format of Tor Push messages,
|
||||
as well as how Tor Push messages are received and sent, respectively.
|
||||
|
||||
### Wire Format
|
||||
|
||||
The wire format of a Tor Push message corresponds verbatim to a typical
|
||||
[libp2p pubsub message](https://github.com/libp2p/specs/tree/master/pubsub#the-message).
|
||||
|
||||
```protobuf
|
||||
message Message {
|
||||
optional string from = 1;
|
||||
optional bytes data = 2;
|
||||
optional bytes seqno = 3;
|
||||
required string topic = 4;
|
||||
optional bytes signature = 5;
|
||||
optional bytes key = 6;
|
||||
}
|
||||
```
|
||||
|
||||
### Receiving Tor Push Messages
|
||||
|
||||
Any node supporting a protocol with ID `/meshsub/1.1.0` (e.g. gossipsub),
|
||||
can receive Tor Push messages.
|
||||
Receiving nodes are oblivious to Tor Push and
|
||||
will process incoming messages according to the respective `meshsub/1.1.0` specification.
|
||||
|
||||
### Sending Tor Push Messages
|
||||
|
||||
In the following, we refer to nodes sending Tor Push messages as Tp-nodes
|
||||
(Tor Push nodes).
|
||||
|
||||
Tp-nodes MUST setup a separate libp2p context, i.e. [libp2p switch](https://docs.libp2p.io/concepts/multiplex/switch/),
|
||||
which MUST NOT be used for any purpose other than Tor Push.
|
||||
We refer to this context as Tp-context.
|
||||
The Tp-context MUST NOT share any data, e.g. peer lists, with the default context.
|
||||
|
||||
Tp-peers are peers a Tp-node plans to send Tp-messages to.
|
||||
Tp-peers MUST support `/meshsub/1.1.0`.
|
||||
For retrieving Tp-peers,
|
||||
Tp-nodes SHOULD use an ambient peer discovery method
|
||||
that retrieves a random peer sample (from the set of all peers),
|
||||
e.g. [33/WAKU2-DISCV5](../../messaging/standards/core/33/discv5.md).
|
||||
|
||||
Tp-nodes MUST establish a connection as described in sub-section
|
||||
[Tor Push Connection Establishment](#connection-establishment) to at least one Tp-peer.
|
||||
To significantly increase resilience,
|
||||
Tp-nodes SHOULD establish Tp-connections to `D` peers,
|
||||
where `D` is the [desired gossipsub out-degree](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters),
|
||||
with a default value of `8`.
|
||||
|
||||
Each Tp-message MUST be sent via the Tp-context over at least one Tp-connection.
|
||||
To increase resilience,
|
||||
Tp-messages SHOULD be sent via the Tp-context over all available Tp-connections.
|
||||
|
||||
Control messages of any kind, e.g. gossipsub graft, MUST NOT be sent via Tor Push.
|
||||
|
||||
#### Connection Establishment
|
||||
|
||||
Tp-nodes establish a `/meshsub/1.1.0` connection to tp-peers via
|
||||
[SOCKS5](https://www.rfc-editor.org/rfc/rfc1928) over [Tor](https://www.torproject.org/).
|
||||
|
||||
Establishing connections, which in turn establishes the respective Tor circuits,
|
||||
can be done ahead of time.
|
||||
|
||||
#### Epochs
|
||||
|
||||
Tor Push introduces epochs.
|
||||
The default epoch duration is 10 minutes.
|
||||
(We might adjust this default value based on experiments and
|
||||
evaluation in future versions of this document.
|
||||
It seems a good trade-off between traceablity and circuit building overhead.)
|
||||
|
||||
For each epoch, the Tp-context SHOULD be refreshed, which includes
|
||||
|
||||
* libp2p peer-ID
|
||||
* Tp-peer list
|
||||
* connections to Tp-peers
|
||||
|
||||
Both Tp-peer selection for the next epoch and
|
||||
establishing connections to the newly selected peers
|
||||
SHOULD be done during the current epoch
|
||||
and be completed before the new epoch starts.
|
||||
This avoids adding latency to message transmission.
|
||||
|
||||
## Security/Privacy Considerations
|
||||
|
||||
### Fingerprinting Attacks
|
||||
|
||||
Protocols that feature distinct patterns are prone to fingerprinting attacks
|
||||
when using them over Tor Push.
|
||||
Both malicious guards and exit nodes could detect these patterns
|
||||
and link the sender and receiver, respectively, to transmitted traffic.
|
||||
As a mitigation, such protocols can introduce dummy messages and/or
|
||||
padding to hide patterns.
|
||||
|
||||
### DoS
|
||||
|
||||
#### General DoS against Tor
|
||||
|
||||
Using untargeted DoS to prevent Tor Push messages
|
||||
from entering the gossipsub network would cost vast resources,
|
||||
because Tor Push transmits messages over several circuits and
|
||||
the Tor network is well established.
|
||||
|
||||
#### Targeting the Guard
|
||||
|
||||
Denying the service of a specific guard node
|
||||
blocks Tp-nodes using the respective guard.
|
||||
Tor guard selection will replace this guard [TODO elaborate].
|
||||
Still, messages might be delayed during this window
|
||||
which might be critical to certain applications.
|
||||
|
||||
#### Targeting the Gossipsub Network
|
||||
|
||||
Without sophisticated rate limiting (for example using [17/WAKU2-RLN-RELAY](../../messaging/standards/core/17/rln-relay.md)),
|
||||
attackers can spam the gossipsub network.
|
||||
It is not enough to just block peers that send too many messages,
|
||||
because these messages might actually come from a Tor exit node
|
||||
that many honest Tp-nodes use.
|
||||
Without Tor Push,
|
||||
protocols on top of gossipsub could block peers
|
||||
if they exceed a certain message rate.
|
||||
With Tor Push, this would allow the reputation-based DoS attack described in
|
||||
[Bitcoin over Tor isn't a Good Idea](https://ieeexplore.ieee.org/abstract/document/7163022).
|
||||
|
||||
#### Peer Discovery
|
||||
|
||||
The discovery mechanism could be abused to link requesting nodes
|
||||
to their Tor connections to discovered nodes.
|
||||
An attacker that controls both the node that responds to a discovery query,
|
||||
and the node who’s ENR the response contains,
|
||||
can link the requester to a Tor connection
|
||||
that is expected to be opened to the node represented by the returned ENR soon after.
|
||||
|
||||
Further, the discovery mechanism (e.g. discv5)
|
||||
could be abused to distribute disproportionately many malicious nodes.
|
||||
For instance if p% of the nodes in the network are malicious,
|
||||
an attacker could manipulate the discovery to return malicious nodes with 2p% probability.
|
||||
The discovery mechanism needs to be resilient against this attack.
|
||||
|
||||
### Roll-out Phase
|
||||
|
||||
During the roll-out phase of Tor Push, during which only a few nodes use Tor Push,
|
||||
attackers can narrow down the senders of Tor messages
|
||||
to the set of gossipsub nodes that do not originate messages.
|
||||
Nodes who want anonymity guarantees even during the roll-out phase
|
||||
can use separate network interfaces for their default context and
|
||||
Tp-context, respectively.
|
||||
For the best protection, these contexts should run on separate physical machines.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
* [libp2p gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md)
|
||||
* [libp2p pubsub](https://github.com/libp2p/specs/tree/master/pubsub)
|
||||
* [libp2p pubsub message](https://github.com/libp2p/specs/tree/master/pubsub#the-message)
|
||||
* [libp2p switch](https://docs.libp2p.io/concepts/multiplex/switch)
|
||||
* [SOCKS5](https://www.rfc-editor.org/rfc/rfc1928)
|
||||
* [Tor](https://www.torproject.org/)
|
||||
* [33/WAKU2-DISCV5](../../messaging/standards/core/33/discv5.md)
|
||||
* [Bitcoin over Tor isn't a Good Idea](https://ieeexplore.ieee.org/abstract/document/7163022)
|
||||
* [17/WAKU2-RLN-RELAY](../../messaging/standards/core/17/rln-relay.md)
|
||||
|
Before Width: | Height: | Size: 58 KiB |
|
Before Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 36 KiB |
@@ -1 +0,0 @@
|
||||
|
||||
@@ -1,360 +0,0 @@
|
||||
# RLN DoS Protection for Mixnet
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | RLN DoS Protection for Mixnet |
|
||||
| Slug | 144 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Prem Prathi <prem@status.im> |
|
||||
| Contributors | |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/mix-spam-protection-rln.md) — chore: add math support (#287)
|
||||
- **2026-01-29** — [`3cd2d09`](https://github.com/logos-co/logos-lips/blob/3cd2d090a4c8aa7a762dd9357d21cd73bb57cd15/docs/ift-ts/raw/mix-spam-protection-rln.md) — fix title of doc (#282)
|
||||
- **2026-01-29** — [`0e53ebb`](https://github.com/logos-co/logos-lips/blob/0e53ebb1b0d090d1d2957a0164c85c38d81560f8/docs/ift-ts/raw/mix-spam-protection-rln.md) — change header to new format (#279)
|
||||
- **2026-01-24** — [`ffca40a`](https://github.com/logos-co/logos-lips/blob/ffca40abfa6b42f239439550cd2fc47fc802f22a/docs/ift-ts/raw/mix-spam-protection-rln.md) — Mix spam and sybil protection protocol using RLN (#252)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document defines a spam and sybil protection protocol for [libp2p mix](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md) based mixnets.
|
||||
The protocol specifies how [Rate Limiting Nullifiers (RLN)](https://vac.dev/rln) can be integrated into libp2p mix.
|
||||
RLN allows mix nodes to detect and drop spam without identifying legitimate users, addressing spam attacks.
|
||||
RLN requires membership for mix nodes to send or forward messages, addressing the sybil attack vector.
|
||||
RLN satisfies the spam protection [requirements](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md#91-spam-protection-mechanism-requirements) defined in the libp2p mix protocol.
|
||||
|
||||
## Background / Rationale / Motivation
|
||||
|
||||
Mixnets provide strong privacy guarantees by routing messages through multiple mix nodes using layered encryption and per-hop delays to obscure both routing paths and timing correlations. In order to have a production-ready mixnet using the [libp2p mix](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md), two critical vulnerabilities must be addressed:
|
||||
|
||||
1. **Spam attacks**: An attacker can generate well-formed sphinx packets targeting mix nodes and can exhaust their resources.
|
||||
In case of mixnets, it is easy to attack a later hop in the mix path by choosing different first hop nodes.
|
||||
An attacker with minimal resources can launch spam/DoS attacks against individual mix nodes. By targeting all mix nodes in this manner, the attacker can render the entire mixnet unusable.
|
||||
2. **Sybil attacks**: Adversaries operating multiple node identities can increase the probability of path compromise, enabling deanonymization through traffic correlation or timing analysis.
|
||||
|
||||
The [libp2p mix](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md) protocol provides an extension for integrating spam protection mechanisms.
|
||||
This specification proposes to use [Rate Limiting Nullifiers (RLN)](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/raw/rln-v2.md) as the spam prevention and sybil protection mechanism.
|
||||
This approach introduces some trade-offs such as additional per-hop latency for proof generation which are discussed in the [Tradeoffs](#tradeoffs) section.
|
||||
|
||||
## Terminology
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”,
|
||||
“NOT RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in [RFC 2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Node Roles
|
||||
|
||||
Mix protocol defines 3 roles for the nodes in the mix network - sender, exit, intermediary.
|
||||
|
||||
- A sender node is the originator node of a message, i.e a node that wishes to originate/send messages using the mix network.
|
||||
- An exit node is responsible for delivering messages to the destination protocol.
|
||||
- An intermediary node is responsible for forwarding a mix packet to the next mix node in the path.
|
||||
|
||||
### Message
|
||||
|
||||
Message is the actual sphinx packet including headers and encrypted payload that is either originated or forwarded by a mix node.
|
||||
|
||||
### Messaging Rate
|
||||
|
||||
The messaging rate is defined as the number of messages that can be sent/forwarded per fixed unit of time, termed an `epoch`.
|
||||
Since we're using this as shorthand for the maximum allowable rate, this is also known as the _rate limit_.
|
||||
The length of each epoch is constant and defined as the `period`.
|
||||
|
||||
We define an `epoch` as $\lceil$ `unix_time` / `period` $\rceil$.
|
||||
For example, if `unix_time` is `1644810116` and we set `period` to `30`, then `epoch` is $\lceil$ `(unix_time/period)` $\rceil$ `= 54827004`.
|
||||
|
||||
> **NOTE:** The `epoch` refers to the epoch in RLN and not Unix epoch.
|
||||
> This means that no more messages than the registered rate limit can be sent per epoch, where the epoch length (`period`) is up to the application.
|
||||
|
||||
See section [System Parameters](#system-parameters) for details on the `period` parameter.
|
||||
|
||||
## Approach
|
||||
|
||||
### Overview
|
||||
|
||||
The protocol implements RLN using a [per-hop generated proof approach](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md#922-per-hop-generated-proofs), where each node in the mix path generates and verifies proofs.
|
||||
This enables network-wide spam protection while preserving user privacy.
|
||||
|
||||
Each mix node MUST have an RLN group membership in order to send or forward messages in the mixnet.
|
||||
Each mix node in the path (except the sender) verifies the incoming RLN proof before processing the message.
|
||||
After verification, each node generates and attaches a new RLN proof before forwarding the message to the next hop.
|
||||
|
||||
To effectively detect spam, mix nodes SHOULD identify when a node exceeds its [messaging rate](#messaging-rate) by reusing the same nullifier across multiple messages within an epoch (known as "double signalling").
|
||||
Since a message does not traverse all the mix nodes in the network, a spammer could exploit different paths to avoid detection by any single mix node.
|
||||
To address this, intermediary and exit nodes SHOULD participate in a [coordination layer](#coordination-layer) that indicates already seen [messaging metadata](#messaging-metadata) across the mix nodes.
|
||||
This enables all participating mix nodes to detect double signalling across different paths, derive the spammer's private key, and initiate slashing.
|
||||
|
||||
### Rationale
|
||||
|
||||
RLN is well-suited for spam and sybil protection in libp2p mix based mixnets due to the following properties:
|
||||
|
||||
- Sybil Resistance:
|
||||
- Requiring membership for each mix node creates friction to participate in the mixnet to send or forward messages
|
||||
- Operating multiple identities becomes costly, mitigating sybil attacks that could compromise mix path selection
|
||||
|
||||
- Privacy-Preserving Spam Protection:
|
||||
- Uses zero-knowledge proofs to enforce rate limits without revealing sender identities
|
||||
- Ties spam protection proof to the message content, making proofs non-reusable across messages
|
||||
- Enables economic deterrence through slashing without compromising anonymity
|
||||
|
||||
- Network-Level Benefits:
|
||||
- RLN enables setting a deterministic [messaging rate](#messaging-rate) for the mixnet, which translates to predictable bandwidth requirements (messages per epoch × sphinx packet size).
|
||||
- This makes it easier to provision and estimate resource usage for nodes participating in the mixnet.
|
||||
- The rate limit creates a baseline traffic level that, when combined with cover traffic, helps maintain k-anonymity even during periods of low organic traffic.
|
||||
|
||||
### Setup
|
||||
|
||||
Each mix node has an RLN key pair consisting of a secret key `sk` and public key `pk` as defined in [RLN](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md).
|
||||
The secret key `sk` MUST be persisted securely by the mix node.
|
||||
|
||||
A mixnet that is spam-protected requires all mix nodes in it to form an [RLN group](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md#flow).
|
||||
|
||||
- Mix nodes MUST be registered to the RLN group to be able to send or forward messages.
|
||||
- Registration MAY be moderated through a smart contract deployed on a blockchain.
|
||||
|
||||
Note: The criteria for membership is out of scope of the spec and should be implementation-specific (e.g requiring stake)
|
||||
|
||||
The group membership data MUST be synchronized initially so that the mix node has the latest Merkle root in order to generate or verify RLN proofs.
|
||||
See [Group Synchronization](#group-synchronization) for details on maintaining synchronization.
|
||||
|
||||
Intermediary and exit mix nodes SHOULD subscribe to the coordination layer (defined [below](#coordination-layer)) in order to detect rate limit violations collaboratively.
|
||||
This ensures that mix nodes can detect spam and trigger slashing.
|
||||
|
||||
### Sending and forwarding messages
|
||||
|
||||
In order to send/forward messages via mixnet, a mix node MUST include the [RateLimitProof](#ratelimitproof) in the sphinx packet as [$\sigma$](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md#922-per-hop-generated-proofs).
|
||||
|
||||
#### Proof Generation
|
||||
|
||||
When generating an RLN proof, the node MUST:
|
||||
|
||||
1. Use its secret key `sk` and the current `epoch`
|
||||
2. Obtain the current Merkle root and [`path_elements`](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md#obtaining-merkle-proof) from the synchronized membership tree
|
||||
3. Generate a keccak256 hash of all components of the **outgoing** sphinx packet [(α', β', γ', δ')](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md#81-packet-structure-overview) and set it as the proof signal. This prevents proof reuse across different messages.
|
||||
|
||||
**Sender nodes**:
|
||||
|
||||
- generate an RLN proof for the initial sphinx packet
|
||||
- attach the proof to the packet before sending to the next hop
|
||||
|
||||
**Intermediary and Exit nodes**:
|
||||
|
||||
MUST do the following for every incoming mix packet:
|
||||
|
||||
- verify the incoming packet's RLN proof (see [Message validation](#message-validation))
|
||||
- process the sphinx packet according to the mix protocol
|
||||
- generate a NEW RLN proof for the outgoing packet
|
||||
- attach the new proof before forwarding to the next hop
|
||||
|
||||
### Group Synchronization
|
||||
|
||||
Proof generation relies on the knowledge of Merkle tree root `merkle_root` and `path_elements` (the authentication path in the Merkle proof as defined in [RLN](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md#obtaining-merkle-proof)) which both require access to the membership Merkle tree.
|
||||
Proof verification also requires knowledge of the `merkle_root` to validate that the proof was generated against a valid membership tree state.
|
||||
The RLN membership group MUST be synchronized across all mix nodes to ensure the latest Merkle root is used for RLN proof generation and verification.
|
||||
Stale roots may cause legitimate proofs to be rejected.
|
||||
Using an old root can allow inference about the index of the user's `pk` in the membership tree hence compromising user privacy and breaking message unlinkability.
|
||||
|
||||
In order to accommodate network delays, nodes MUST maintain a window of recent valid roots (see `acceptable_root_window_size` in [System Parameters](#system-parameters)).
|
||||
We recommend `5` for `acceptable_root_window_size`.
|
||||
|
||||
### Coordination Layer
|
||||
|
||||
The coordination layer enables network-wide spam detection by preventing rate limit violations through nullifier reuse detection.
|
||||
The coordination layer SHOULD be used to broadcast [messaging metadata](#messaging-metadata).
|
||||
When a node detects spam, it can reconstruct the spammer's secret key using the shared key shares and initiate [slashing](#spam-detection-and-slashing).
|
||||
|
||||
Intermediary and exit nodes that participate in the coordination layer MUST both subscribe to receive metadata and broadcast metadata from messages they process.
|
||||
Sender-only nodes need not participate in this coordination layer as they only originate messages and do not forward or validate messages from others.
|
||||
|
||||
The coordination layer MUST have its own spam and sybil protection mechanism in order to prevent from these attacks.
|
||||
We recommend using [WAKU-RLN-RELAY](https://github.com/logos-co/logos-lips/blob/72196d89c1084d625c22b1d5cb775ad7729ad577/waku/standards/core/17/rln-relay.md)
|
||||
In this case, the Messaging Metadata MUST be encoded as the Waku Message payload.
|
||||
We recommend using the [public Waku Network](https://github.com/logos-co/logos-lips/blob/72196d89c1084d625c22b1d5cb775ad7729ad577/waku/standards/core/64/network.md) with a content topic agreed by all mix nodes.
|
||||
|
||||
### Message validation
|
||||
|
||||
A mix node MUST validate a received message using the below checks, discard the message and stop further checks or processing on failure.
|
||||
|
||||
1. If the `epoch` in the received message differs from the mix node's current `epoch` by more than `max_epoch_gap`.
|
||||
2. If the `merkle_root` is NOT in the `acceptable_root_window_size` past roots of the mix node.
|
||||
3. If the zero-knowledge proof `proof` is valid. It does so by running the zk verification algorithm as explained in [RLN](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md#verification-and-slashing).
|
||||
|
||||
If all checks pass, the node proceeds to [spam detection](#spam-detection-and-slashing) before processing the message.
|
||||
|
||||
#### Spam detection and Slashing
|
||||
|
||||
To enable local spam detection and slashing, mix nodes MUST store the [messaging metadata](#messaging-metadata) in a local cache. This includes metadata from:
|
||||
|
||||
- messages processed locally by the mix layer
|
||||
- messages received via the coordination layer
|
||||
|
||||
The cache SHOULD be cleared for epoch data older than `max_epoch_gap`.
|
||||
To identify spam messages, the node checks whether a message with an identical `nullifier` is present in the epoch's cache.
|
||||
|
||||
1. If no entry exists for this `nullifier`, the node stores the [messaging metadata](#messaging-metadata) in the cache and proceeds to process the message normally.
|
||||
2. If an entry exists and its `share_x` and `share_y` components are different from the incoming message, then proceed with slashing.
|
||||
The mix node uses the `share_x` and `share_y` of the new message and the shares from the local cache to reconstruct the `sk` of the message owner.
|
||||
The `sk` then MUST be used to delete the spammer from the group and withdraw its staked funds.
|
||||
The node MUST discard the message and MUST NOT forward it.
|
||||
3. If the `share_x` and `share_y` fields in the local cache are identical to the incoming message, then the message is a duplicate and MUST be discarded.
|
||||
|
||||
After successfully validating a message, intermediary and exit nodes SHOULD broadcast the [message's metadata](#messaging-metadata) using the coordination layer to enable network-wide spam detection.
|
||||
The broadcast on the coordination layer MAY be batched atleast once per epoch to reduce constant traffic on coordination layer.
|
||||
|
||||
## Wire Format Specification / Syntax
|
||||
|
||||
### Spam protection proof
|
||||
|
||||
The following `RateLimitProof` MUST be added to the sphinx packet as $\sigma$ as explained in [sending](#sending-and-forwarding-messages).
|
||||
|
||||
```protobuf
|
||||
syntax = "proto3";
|
||||
|
||||
message RateLimitProof {
|
||||
bytes proof = 1;
|
||||
bytes merkle_root = 2;
|
||||
bytes epoch = 3;
|
||||
bytes share_x = 4;
|
||||
bytes share_y = 5;
|
||||
bytes nullifier = 6;
|
||||
}
|
||||
```
|
||||
|
||||
#### RateLimitProof
|
||||
|
||||
Below is the description of the fields of `RateLimitProof` and their types.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| ----------------------: | ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `proof` | array of 128 bytes compressed | the zkSNARK proof as explained in the [Sending process](#sending-and-forwarding-messages) |
|
||||
| `merkle_root` | array of 32 bytes in little-endian order | the root of membership group Merkle tree at the time of sending the message |
|
||||
| `epoch` | array of 32 bytes | the current epoch at time of sending the message |
|
||||
| `share_x` and `share_y` | array of 32 bytes each | Shamir secret shares of the user's secret identity key `sk` . `share_x` is the hash of the message. `share_y` is calculated using [Shamir secret sharing scheme](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md) |
|
||||
| `nullifier` | array of 32 bytes | internal nullifier derived from `epoch` and node's `sk` as explained in [RLN construct](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md) |
|
||||
|
||||
### Messaging Metadata
|
||||
|
||||
[Messaging metadata](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md#notes-from-implementation) is metadata which is broadcasted via coordination layer and cached by mix nodes locally.
|
||||
This helps identify duplicate signalling in order to detect spam.
|
||||
|
||||
```protobuf
|
||||
syntax = "proto3";
|
||||
|
||||
message ExternalNullifier {
|
||||
bytes internal_nullifier = 1;
|
||||
repeated bytes x_shares = 2;
|
||||
repeated bytes y_shares = 3;
|
||||
}
|
||||
|
||||
message MessagingMetadata {
|
||||
repeated ExternalNullifier nullifiers = 1;
|
||||
}
|
||||
```
|
||||
|
||||
### System Parameters
|
||||
|
||||
The system parameters are summarized in the following table.
|
||||
|
||||
| Parameter | Description |
|
||||
| ----------------------------: | ---------------------------------------------------------------------------------- |
|
||||
| `period` | the length of `epoch` in seconds |
|
||||
| `staked_fund` | the amount of funds to be staked by mix nodes at the registration |
|
||||
| `max_epoch_gap` | the maximum allowed gap between the `epoch` of a mix node and the incoming message |
|
||||
| `acceptable_root_window_size` | the maximum number of past Merkle roots to store |
|
||||
|
||||
## Security/Privacy Considerations
|
||||
|
||||
### Known Attack Vectors and Mitigations
|
||||
|
||||
#### Sybil Attacks
|
||||
|
||||
- **Attack**: Adversary operates multiple node identities to increase path compromise probability
|
||||
- **Limitation**: Well-funded adversary can still acquire multiple memberships
|
||||
- **Mitigation**: Membership registration can consider other criteria along with stake to reduce chance of sybil identities.
|
||||
|
||||
#### Coordination Layer Attacks
|
||||
|
||||
- **Attack**: Flood coordination layer with spam metadata to create DoS
|
||||
- **Mitigation**: Coordination layer MUST implement its own spam protection (line 156)
|
||||
|
||||
#### Timing Attacks
|
||||
|
||||
- **Attack**: Correlate message timing across hops to deanonymize users
|
||||
- **Mitigation**: Mix protocol's per-hop delays provide timing obfuscation
|
||||
- **Note**: RLN metadata broadcast may create additional timing side-channels requiring analysis
|
||||
|
||||
### Privacy Considerations
|
||||
|
||||
#### Nullifier Linkability
|
||||
|
||||
- **Concern**: Nullifiers are broadcast via coordination layer, potentially enabling traffic analysis
|
||||
- **Analysis**: Nullifiers are derived from epoch and secret key, changing per epoch
|
||||
- **Limitation**: Within an epoch, multiple messages from same node share nullifier metadata structure
|
||||
|
||||
### Out of Scope
|
||||
|
||||
The following are explicitly out of scope for this specification and left to implementations:
|
||||
|
||||
- Specific membership criteria and stake amounts
|
||||
- Coordination layer protocol selection and configuration
|
||||
- Blockchain selection for RLN group management
|
||||
|
||||
## Tradeoffs
|
||||
|
||||
### Additional Latency due to proof generation in every hop
|
||||
|
||||
Per-hop RLN proof generation introduces additional latency at each mix node in the path:
|
||||
|
||||
- **Proof generation time**: Typically `100-500ms` per hop depending on hardware capabilities
|
||||
- **End-to-end impact**: For a `3-hop` path, this adds `300-1500ms` to total message delivery time
|
||||
- **Comparison**: This is significant compared to the mix protocol's per-hop delay
|
||||
- **Mitigation**: See [Future Work](#future-work) for potential optimizations using pre-computed proofs
|
||||
|
||||
This latency needs to be considered while deciding the approach to be used.
|
||||
|
||||
### Membership registration friction
|
||||
|
||||
Requiring RLN group membership for all mix nodes creates barriers to network participation:
|
||||
|
||||
- **Stake requirement**: Nodes MUST stake funds to join, limiting casual participation
|
||||
- **Registration overhead**: On-chain registration adds complexity and potential costs (gas fees)
|
||||
- **Benefit**: This friction is intentional and necessary for sybil resistance
|
||||
|
||||
The appropriate stake amount MUST balance accessibility against attack economics (see [System Parameters](#system-parameters)).
|
||||
|
||||
### Cost of ZK Proof Generation
|
||||
|
||||
Zero-knowledge proof generation imposes computational costs on mix nodes. Proof generation is CPU-intensive, requiring modern processors. May be prohibitive for mobile or embedded devices.
|
||||
|
||||
**Mitigation**: See [Future Work](#future-work) for potential research into using alternative proving systems.
|
||||
|
||||
These costs must be factored into operational expenses and node requirements.
|
||||
|
||||
## Future Work
|
||||
|
||||
In order to reduce latency introduced at each hop:
|
||||
|
||||
- RLN can be used with pre-computed proofs as explained [here](https://forum.vac.dev/t/rln-with-pre-computed-proofs/606). This approach can be explored further and could potentially replace the current proposed RLN implementation.
|
||||
- Research other proving systems that would generate faster ZK proofs.
|
||||
|
||||
Additional sybil resistance mechanisms could augment RLN by incorporating reputation-based lists similar to Tor's "directory authorities".
|
||||
|
||||
These help clients build circuits that are less likely to be entirely controlled by sybils through a range of techniques that limit nodes' possible influence based on trustworthiness metrics.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [libp2p mix protocol](https://github.com/logos-co/logos-lips/blob/cfc08e9f0e51de20fc5f24b77ad01163c113706e/vac/raw/mix.md/)
|
||||
- [Rate Limiting Nullifiers (RLN)](https://vac.dev/rln)
|
||||
- [Rate Limiting Nullifiers v2](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/raw/rln-v2.md)
|
||||
- [RLN v1](https://github.com/logos-co/logos-lips/blob/dabc31786b4a4ca704ebcd1105239faff7ac2b47/vac/32/rln-v1.md)
|
||||
- [Waku-Relay](https://rfc.vac.dev/spec/11/)
|
||||
- [RLN with precomputed proofs](https://forum.vac.dev/t/rln-with-pre-computed-proofs/606)
|
||||
- [Poseidon hash implementation](https://eprint.iacr.org/2019/458.pdf)
|
||||
@@ -1,238 +0,0 @@
|
||||
# Multi-message_id Burn RLN
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Multi-message_id burn feature RLN |
|
||||
| Slug | 141 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Ugur Sen <ugur@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-21** — [`70f3cfb`](https://github.com/logos-co/logos-lips/blob/70f3cfb4df4e9a94e56b1284e98ee1dc9df50ac7/docs/ift-ts/raw/multi-message_id-burn-rln.md) — chore: mdbook font fix (#266)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies multi-message_id burn RLN which the users
|
||||
can use their multiple message rights at once unlike previous versions of RLN
|
||||
that require a separate execution per `message_id`.
|
||||
|
||||
## Motivation
|
||||
|
||||
RLN is a decentralized rate-limiting mechanism designed for anonymous networks.
|
||||
In [RLNv2](rln-v2.md), the latest version of the protocol, users can apply arbitrary rate limits
|
||||
by defining a specific limit over the `message_id`.
|
||||
However, this version does not support the simultaneous exercise
|
||||
of multiple messaging rights under a single `message_id`.
|
||||
In other words, if a user needs to consume multiple `message_id` units,
|
||||
they must compute separate proofs for each one.
|
||||
|
||||
This lack of flexibility creates an imbalance: users sending signals
|
||||
of significantly different sizes still consume only one `message_id` per proof.
|
||||
While computing multiple proofs is a trivial workaround,
|
||||
it is neither computationally efficient nor manageable for high-throughput applications.
|
||||
|
||||
Multiple burning refers to the mechanism where a fixed number of `message_id` units are processed
|
||||
within the circuit to generate multiple corresponding nullifiers inside a single cryptographic proof.
|
||||
This multiple burning feature may unlock the usage of RLN for big signals
|
||||
such as large messages or complex transactions, by validating their resource consumption in a single proof.
|
||||
|
||||
Alternatively, multiple burning could be realized by defining a separate circuit
|
||||
for each possible number of `message_id` units to be consumed.
|
||||
While such an approach would allow precise specialization, it would significantly increase
|
||||
operational complexity by requiring the management, deployment, and verification
|
||||
of multiple circuit variants.
|
||||
|
||||
To avoid this complexity, this document adopts a single, fixed-size but flexible
|
||||
circuit design, where a bounded number of `message_id` units can be selectively
|
||||
burned using selector bits.
|
||||
This approach preserves the simplicity of a single
|
||||
circuit while enabling efficient multi-burn proofs within a single execution.
|
||||
|
||||
This document specifies the mechanism that allows users to burn multiple `message_id` units
|
||||
at once by slightly modifying the existing [RLNv2](rln-v2.md) circuit.
|
||||
|
||||
## Format Specification
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”,
|
||||
“SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document
|
||||
are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
## Recap of RLNv2
|
||||
|
||||
Since the multi-message_id RLN is achieved by modifying the existing RLNv2 protocol,
|
||||
it is helpful to first recap RLNv2.
|
||||
Note that this modification only affects the signaling section;
|
||||
the remaining sections—registration, verification, and slashing—remain identical to RLNv2.
|
||||
|
||||
### RLNv2 Registration
|
||||
|
||||
RLN-Diff introduces per-user rate limits. Therefore, **id_commitment** must depend on
|
||||
`user_message_limit`, where
|
||||
0 ≤ `user_message_limit` ≤ `message_limit`.
|
||||
|
||||
The user submits the same `identity_secret_hash` as in
|
||||
[32/RLN-V1](32/rln-v1.md), i.e.
|
||||
`poseidonHash(identity_secret)`, together with `user_message_limit` to a server or
|
||||
smart contract.
|
||||
|
||||
The verifier computes
|
||||
`rate_commitment = poseidonHash(identity_secret_hash, user_message_limit)`,
|
||||
which is inserted as a leaf in the membership Merkle tree.
|
||||
|
||||
### RLNv2 Signalling
|
||||
|
||||
For proof generation, the user need to submit the following fields to the circuit:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret: identity_secret_hash,
|
||||
path_elements: Merkle_proof.path_elements,
|
||||
identity_path_index: Merkle_proof.indices,
|
||||
x: signal_hash,
|
||||
message_id: message_id,
|
||||
external_nullifier: external_nullifier,
|
||||
user_message_limit: message_limit
|
||||
}
|
||||
```
|
||||
|
||||
Calculating output
|
||||
|
||||
The output `[y, internal_nullifier]` is calculated in the following way:
|
||||
|
||||
```js
|
||||
|
||||
a_0 = identity_secret_hash;
|
||||
a_1 = poseidonHash([a0, external_nullifier, message_id]);
|
||||
|
||||
y = a_0 + x * a_1;
|
||||
|
||||
internal_nullifier = poseidonHash([a_1]);
|
||||
|
||||
```
|
||||
|
||||
### RLNv2 Verification/slashing
|
||||
|
||||
Verification and slashing in both subprotocols remain the same as in [32/RLN-V1](32/rln-v1.md).
|
||||
The only difference that may arise is the `message_limit` check in RLN-Same,
|
||||
since it is now a public input of the Circuit.
|
||||
|
||||
## Multi-message_id Burn RLN (Multi-burn RLN)
|
||||
|
||||
The multi-burn protocol follows previous versions by comprising
|
||||
registration, signaling, and verification/slashing sections.
|
||||
|
||||
Since the registration and verification/slashing mechanisms remain unchanged,
|
||||
this section focuses exclusively on the modifications to the signaling process.
|
||||
|
||||
### Multi-burn RLN Signalling
|
||||
|
||||
The multi-burn RLN signalling section consists of the proving of the circuit as follows:
|
||||
|
||||
Circuit parameters
|
||||
|
||||
Public Inputs
|
||||
|
||||
* `x`
|
||||
* `external_nullifier`
|
||||
* `selector_used []`
|
||||
|
||||
Private Inputs
|
||||
|
||||
* `identity_secret_hash`
|
||||
* `path_elements`
|
||||
* `identity_path_index`
|
||||
* `message_id []`
|
||||
* `user_message_limit`
|
||||
|
||||
Outputs
|
||||
|
||||
* `y []`
|
||||
* `root`
|
||||
* `internal_nullifiers []`
|
||||
|
||||
The output `(root, y [], internal_nullifiers [])` is calculated in the following way:
|
||||
|
||||
```js
|
||||
|
||||
a_0 = identity_secret_hash;
|
||||
a_1i = poseidonHash([a0, external_nullifier, message_id [i]]);
|
||||
|
||||
y_i = a_0 + x * a_1i;
|
||||
|
||||
internal_nullifiers_i = poseidonHash([a_1i]);
|
||||
|
||||
```
|
||||
|
||||
where 0 < `i` ≤ `max_out`, `max_out` is a new parameter that is fixed for a application.
|
||||
`max_out` is arranged the requirements of the application.
|
||||
To define this fixed number makes the circuit is flexiable with a single circuit that is maintable.
|
||||
Since the user is free to burn arbitrary number of `message_id` at once up to `max_out`.
|
||||
|
||||
Note that within a given epoch, the `external_nullifier` MUST be identical for all messages
|
||||
as shown in NULL (unused) output [section](#null-unused-outputs),
|
||||
as it is computed deterministically from the epoch value and the `rln_identifier` as follows:
|
||||
|
||||
```js
|
||||
external_nullifier = poseidonHash([epoch, rln_identifier]);
|
||||
|
||||
```
|
||||
|
||||
#### NULL (unused) outputs
|
||||
|
||||
Since the number of used `message_id` values MAY be less than `max_out`, the difference
|
||||
`j = max_out - i`, where `0 ≤ j ≤ max_out − 1`, denotes the number of unused output slots.
|
||||
|
||||
These `j` outputs are referred to as **NULL outputs**. NULL outputs carry no semantic meaning and
|
||||
**MUST** be identical to one another in order to unambiguously indicate that they correspond to
|
||||
unused `message_id` slots and do not represent valid proofs.
|
||||
|
||||
To compute NULL outputs, the circuit makes use of a selector bit array `selector_used []`,
|
||||
where `selector_used[i] = 1` denotes a used `message_id` slot and `selector_used[i] = 0` denotes
|
||||
an unused slot.
|
||||
|
||||
The `message_id` values MUST NOT be checked in the circuit incrementally (e.g., `1, 2, 3, ...`),
|
||||
independently of whether a slot is used or unused.
|
||||
For the best practice the application MAY pass the `message_id` values incrementally and
|
||||
tracks unused `message_id` values across executions to ensure that subsequent executions
|
||||
continue from the last assigned `message_id` without reuse or skipping.
|
||||
The circuit computes the corresponding intermediate values for all slots according to the RLNv2 equations.
|
||||
|
||||
For each slot `k`, the final outputs are masked using the selector bits as
|
||||
follows:
|
||||
|
||||
```js
|
||||
|
||||
a_0 = identity_secret_hash;
|
||||
a_1i = poseidonHash([a0, external_nullifier, message_id [i]]);
|
||||
|
||||
y_i = selector_used[i] * (a_0 + x * a_1i);
|
||||
|
||||
internal_nullifiers_i = selector_used[i] * poseidonHash([a_1i]);
|
||||
|
||||
```
|
||||
|
||||
Since multiplication by zero yields the additive identity in the field,
|
||||
all unused slots (`selector_used[k] = 0`) result in
|
||||
`y[k] = 0` and `internal_nullifiers[k] = 0`, which are interpreted as
|
||||
NULL outputs and carry no semantic meaning.
|
||||
|
||||
As a consequence, the presence of valid-looking `message_id` values in unused
|
||||
slots does not result in additional burns, as their corresponding outputs are
|
||||
fully masked and ignored during verification.
|
||||
Moreover, `message_id` values that are provided to the circuit but correspond to unused slots (`selector_used[k] = 0`)
|
||||
are not considered consumed and MAY be reused in subsequent proofs in which the corresponding selector bit is set to `1`.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/)
|
||||
|
||||
### References
|
||||
|
||||
* [RLNv1](32/rln-v1.md)
|
||||
* [RLNv2](rln-v2.md)
|
||||
@@ -1,359 +0,0 @@
|
||||
# NOISE-X3DH-DOUBLE-RATCHET
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Secure 1-to-1 channel setup using X3DH and the double ratchet |
|
||||
| Slug | 108 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Ramses Fernandez <ramses@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/noise-x3dh-double-ratchet.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/noise-x3dh-double-ratchet.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/noise-x3dh-double-ratchet.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/noise-x3dh-double-ratchet.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/noise-x3dh-double-ratchet.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-04-04** — [`517b639`](https://github.com/logos-co/logos-lips/blob/517b63984c875670e437d50359f2f67331104974/vac/raw/noise-x3dh-double-ratchet.md) — Update the RFCs: Vac Raw RFC (#143)
|
||||
- **2024-10-03** — [`c655980`](https://github.com/logos-co/logos-lips/blob/c655980494a5943634c372009bbea71c13196a8f/vac/raw/eth-secure-channel.md) — Eth secpm splitted (#91)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Motivation
|
||||
|
||||
The need for secure communications has become paramount.
|
||||
This specification outlines a protocol describing a
|
||||
secure 1-to-1 comunication channel between 2 users. The
|
||||
main components are the X3DH key establishment mechanism,
|
||||
combined with the double ratchet. The aim of this
|
||||
combination of schemes is providing a protocol with both
|
||||
forward secrecy and post-compromise security.
|
||||
|
||||
## Theory
|
||||
|
||||
The specification is based on the noise protocol framework.
|
||||
It corresponds to the double ratchet scheme combined with
|
||||
the X3DH algorithm, which will be used to initialize the former.
|
||||
We chose to express the protocol in noise to be be able to use
|
||||
the noise streamlined implementation and proving features.
|
||||
The X3DH algorithm provides both authentication and forward
|
||||
secrecy, as stated in the
|
||||
[X3DH specification](https://signal.org/docs/specifications/x3dh/).
|
||||
|
||||
This protocol will consist of several stages:
|
||||
|
||||
1. Key setting for X3DH: this step will produce
|
||||
prekey bundles for Bob which will be fed into X3DH.
|
||||
It will also allow Alice to generate the keys required
|
||||
to run the X3DH algorithm correctly.
|
||||
2. Execution of X3DH: This step will output
|
||||
a common secret key `SK` together with an additional
|
||||
data vector `AD`. Both will be used in the double
|
||||
ratchet algorithm initialization.
|
||||
3. Execution of the double ratchet algorithm
|
||||
for forward secure, authenticated communications,
|
||||
using the common secret key `SK`, obtained from X3DH, as a root key.
|
||||
|
||||
The protocol assumes the following requirements:
|
||||
|
||||
- Alice knows Bob’s Ethereum address.
|
||||
- Bob is willing to participate in the protocol,
|
||||
and publishes his public key.
|
||||
- Bob’s ownership of his public key is verifiable,
|
||||
- Alice wants to send message M to Bob.
|
||||
- An eavesdropper cannot read M’s content
|
||||
even if she is storing it or relaying it.
|
||||
|
||||
## Syntax
|
||||
|
||||
### Cryptographic suite
|
||||
|
||||
The following cryptographic functions MUST be used:
|
||||
|
||||
- `X488` as Diffie-Hellman function `DH`.
|
||||
- `SHA256` as KDF.
|
||||
- `AES256-GCM` as AEAD algorithm.
|
||||
- `SHA512` as hash function.
|
||||
- `XEd448` for digital signatures.
|
||||
|
||||
### X3DH initialization
|
||||
|
||||
This scheme MUST work on the curve curve448.
|
||||
The X3DH algorithm corresponds to the IX pattern in Noise.
|
||||
|
||||
Bob and Alice MUST define personal key pairs
|
||||
`(ik_B, IK_B)` and `(ik_A, IK_A)` respectively where:
|
||||
|
||||
- The key `ik` must be kept secret,
|
||||
- and the key `IK` is public.
|
||||
|
||||
Bob MUST generate new keys using
|
||||
`(ik_B, IK_B) = GENERATE_KEYPAIR(curve = curve448)`.
|
||||
|
||||
Bob MUST also generate a public key pair
|
||||
`(spk_B, SPK_B) = GENERATE_KEYPAIR(curve = curve448)`.
|
||||
|
||||
`SPK` is a public key generated and stored at medium-term.
|
||||
Both signed prekey and the certificate MUST
|
||||
undergo periodic replacement.
|
||||
After replacing the key,
|
||||
Bob keeps the old private key of `SPK`
|
||||
for some interval, dependant on the implementation.
|
||||
This allows Bob to decrypt delayed messages.
|
||||
|
||||
Bob MUST sign `SPK` for authentication:
|
||||
`SigSPK = XEd448(ik, Encode(SPK))`
|
||||
|
||||
A final step requires the definition of
|
||||
`prekey_bundle = (IK, SPK, SigSPK, OPK_i)`
|
||||
|
||||
One-time keys `OPK` MUST be generated as
|
||||
`(opk_B, OPK_B) = GENERATE_KEYPAIR(curve = curve448)`.
|
||||
|
||||
Before sending an initial message to Bob,
|
||||
Alice MUST generate an AD: `AD = Encode(IK_A) || Encode(IK_B)`.
|
||||
|
||||
Alice MUST generate ephemeral key pairs
|
||||
`(ek, EK) = GENERATE_KEYPAIR(curve = curve448)`.
|
||||
|
||||
The function `Encode()` transforms a
|
||||
curve448 public key into a byte sequence.
|
||||
This is specified in the [RFC 7748](http://www.ietf.org/rfc/rfc7748.txt)
|
||||
on elliptic curves for security.
|
||||
|
||||
One MUST consider `q = 2^446 - 13818066809895115352007386748515426880336692474882178609894547503885`
|
||||
for digital signatures with `(XEd448_sign, XEd448_verify)`:
|
||||
|
||||
```text
|
||||
XEd448_sign((ik, IK), message):
|
||||
Z = randbytes(64)
|
||||
r = SHA512(2^456 - 2 || ik || message || Z )
|
||||
R = (r * convert_mont(5)) % q
|
||||
h = SHA512(R || IK || M)
|
||||
s = (r + h * ik) % q
|
||||
return (R || s)
|
||||
```
|
||||
|
||||
```text
|
||||
XEd448_verify(u, message, (R || s)):
|
||||
if (R.y >= 2^448) or (s >= 2^446): return FALSE
|
||||
h = (SHA512(R || 156326 || message)) % q
|
||||
R_check = s * convert_mont(5) - h * 156326
|
||||
if R == R_check: return TRUE
|
||||
return FALSE
|
||||
```
|
||||
|
||||
```text
|
||||
convert_mont(u):
|
||||
u_masked = u % mod 2^448
|
||||
inv = ((1 - u_masked)^(2^448 - 2^224 - 3)) % (2^448 - 2^224 - 1)
|
||||
P.y = ((1 + u_masked) * inv)) % (2^448 - 2^224 - 1)
|
||||
P.s = 0
|
||||
return P
|
||||
```
|
||||
|
||||
### Use of X3DH
|
||||
|
||||
This specification combines the double ratchet
|
||||
with X3DH using the following data as initialization for the former:
|
||||
|
||||
- The `SK` output from X3DH becomes the `SK`
|
||||
input of the double ratchet. See section 3.3 of
|
||||
[Signal Specification](https://signal.org/docs/specifications/doubleratchet/)
|
||||
for a detailed description.
|
||||
- The `AD` output from X3DH becomes the `AD`
|
||||
input of the double ratchet. See sections 3.4 and 3.5 of
|
||||
[Signal Specification](https://signal.org/docs/specifications/doubleratchet/)
|
||||
for a detailed description.
|
||||
- Bob’s signed prekey `SigSPKB` from X3DH is used as Bob’s
|
||||
initial ratchet public key of the double ratchet.
|
||||
|
||||
X3DH has three phases:
|
||||
|
||||
1. Bob publishes his identity key and prekeys to a server,
|
||||
a network, or dedicated smart contract.
|
||||
2. Alice fetches a prekey bundle from the server,
|
||||
and uses it to send an initial message to Bob.
|
||||
3. Bob receives and processes Alice's initial message.
|
||||
|
||||
Alice MUST perform the following computations:
|
||||
|
||||
```text
|
||||
dh1 = DH(IK_A, SPK_B, curve = curve448)
|
||||
dh2 = DH(EK_A, IK_B, curve = curve448)
|
||||
dh3 = DH(EK_A, SPK_B)
|
||||
SK = KDF(dh1 || dh2 || dh3)
|
||||
```
|
||||
|
||||
Alice MUST send to Bob a message containing:
|
||||
|
||||
- `IK_A, EK_A`.
|
||||
- An identifier to Bob's prekeys used.
|
||||
- A message encrypted with AES256-GCM using `AD` and `SK`.
|
||||
|
||||
Upon reception of the initial message, Bob MUST:
|
||||
|
||||
1. Perform the same computations above with the `DH()` function.
|
||||
2. Derive `SK` and construct `AD`.
|
||||
3. Decrypt the initial message encrypted with `AES256-GCM`.
|
||||
4. If decryption fails, abort the protocol.
|
||||
|
||||
### Initialization of the double datchet
|
||||
|
||||
In this stage Bob and Alice have generated key pairs
|
||||
and agreed a shared secret `SK` using X3DH.
|
||||
|
||||
Alice calls `RatchetInitAlice()` defined below:
|
||||
|
||||
```text
|
||||
RatchetInitAlice(SK, IK_B):
|
||||
state.DHs = GENERATE_KEYPAIR(curve = curve448)
|
||||
state.DHr = IK_B
|
||||
state.RK, state.CKs = HKDF(SK, DH(state.DHs, state.DHr))
|
||||
state.CKr = None
|
||||
state.Ns, state.Nr, state.PN = 0
|
||||
state.MKSKIPPED = {}
|
||||
```
|
||||
|
||||
The HKDF function MUST be the proposal by
|
||||
[Krawczyk and Eronen](http://www.ietf.org/rfc/rfc5869.txt).
|
||||
In this proposal `chaining_key` and `input_key_material`
|
||||
MUST be replaced with `SK` and the output of `DH` respectively.
|
||||
|
||||
Similarly, Bob calls the function `RatchetInitBob()` defined below:
|
||||
|
||||
```text
|
||||
RatchetInitBob(SK, (ik_B,IK_B)):
|
||||
state.DHs = (ik_B, IK_B)
|
||||
state.Dhr = None
|
||||
state.RK = SK
|
||||
state.CKs, state.CKr = None
|
||||
state.Ns, state.Nr, state.PN = 0
|
||||
state.MKSKIPPED = {}
|
||||
```
|
||||
|
||||
### Encryption
|
||||
|
||||
This function performs the symmetric key ratchet.
|
||||
|
||||
```text
|
||||
RatchetEncrypt(state, plaintext, AD):
|
||||
state.CKs, mk = HMAC-SHA256(state.CKs)
|
||||
header = HEADER(state.DHs, state.PN, state.Ns)
|
||||
state.Ns = state.Ns + 1
|
||||
return header, AES256-GCM_Enc(mk, plaintext, AD || header)
|
||||
```
|
||||
|
||||
The `HEADER` function creates a new message header
|
||||
containing the public key from the key pair output of the `DH`function.
|
||||
It outputs the previous chain length `pn`,
|
||||
and the message number `n`.
|
||||
The returned header object contains ratchet public key
|
||||
`dh` and integers `pn` and `n`.
|
||||
|
||||
### Decryption
|
||||
|
||||
The function `RatchetDecrypt()` decrypts incoming messages:
|
||||
|
||||
```text
|
||||
RatchetDecrypt(state, header, ciphertext, AD):
|
||||
plaintext = TrySkippedMessageKeys(state, header, ciphertext, AD)
|
||||
if plaintext != None:
|
||||
return plaintext
|
||||
if header.dh != state.DHr:
|
||||
SkipMessageKeys(state, header.pn)
|
||||
DHRatchet(state, header)
|
||||
SkipMessageKeys(state, header.n)
|
||||
state.CKr, mk = HMAC-SHA256(state.CKr)
|
||||
state.Nr = state.Nr + 1
|
||||
return AES256-GCM_Dec(mk, ciphertext, AD || header)
|
||||
```
|
||||
|
||||
Auxiliary functions follow:
|
||||
|
||||
```text
|
||||
DHRatchet(state, header):
|
||||
state.PN = state.Ns
|
||||
state.Ns = state.Nr = 0
|
||||
state.DHr = header.dh
|
||||
state.RK, state.CKr = HKDF(state.RK, DH(state.DHs, state.DHr))
|
||||
state.DHs = GENERATE_KEYPAIR(curve = curve448)
|
||||
state.RK, state.CKs = HKDF(state.RK, DH(state.DHs, state.DHr))
|
||||
```
|
||||
|
||||
```text
|
||||
SkipMessageKeys(state, until):
|
||||
if state.NR + MAX_SKIP < until:
|
||||
raise Error
|
||||
if state.CKr != none:
|
||||
while state.Nr < until:
|
||||
state.CKr, mk = HMAC-SHA256(state.CKr)
|
||||
state.MKSKIPPED[state.DHr, state.Nr] = mk
|
||||
state.Nr = state.Nr + 1
|
||||
```
|
||||
|
||||
```text
|
||||
TrySkippedMessageKey(state, header, ciphertext, AD):
|
||||
if (header.dh, header.n) in state.MKSKIPPED:
|
||||
mk = state.MKSKIPPED[header.dh, header.n]
|
||||
delete state.MKSKIPPED[header.dh, header.n]
|
||||
return AES256-GCM_Dec(mk, ciphertext, AD || header)
|
||||
else: return None
|
||||
```
|
||||
|
||||
## Information retrieval
|
||||
|
||||
### Static data
|
||||
|
||||
Some data, such as the key pairs `(ik, IK)` for Alice and Bob,
|
||||
MAY NOT be regenerated after a period of time.
|
||||
Therefore the prekey bundle MAY be stored in long-term
|
||||
storage solutions, such as a dedicated smart contract
|
||||
which outputs such a key pair when receiving an Ethereum wallet
|
||||
address.
|
||||
|
||||
Storing static data is done using a dedicated
|
||||
smart contract `PublicKeyStorage` which associates
|
||||
the Ethereum wallet address of a user with his public key.
|
||||
This mapping is done by `PublicKeyStorage`
|
||||
using a `publicKeys` function, or a `setPublicKey` function.
|
||||
This mapping is done if the user passed an authorization process.
|
||||
A user who wants to retrieve a public key associated
|
||||
with a specific wallet address calls a function `getPublicKey`.
|
||||
The user provides the wallet address as the only
|
||||
input parameter for `getPublicKey`.
|
||||
The function outputs the associated public key
|
||||
from the smart contract.
|
||||
|
||||
### Ephemeral data
|
||||
|
||||
Storing ephemeral data on Ethereum MAY be done using
|
||||
a combination of on-chain and off-chain solutions.
|
||||
This approach provides an efficient solution to
|
||||
the problem of storing updatable data in Ethereum.
|
||||
|
||||
1. Ethereum stores a reference or a hash
|
||||
that points to the off-chain data.
|
||||
2. Off-chain solutions can include systems like IPFS,
|
||||
traditional cloud storage solutions, or
|
||||
decentralized storage networks such as a
|
||||
[Swarm](https://www.ethswarm.org).
|
||||
|
||||
In any case, the user stores the associated
|
||||
IPFS hash, URL or reference in Ethereum.
|
||||
|
||||
The fact of a user not updating the ephemeral information
|
||||
can be understood as Bob not willing to participate in any
|
||||
communication.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [The Double Ratchet Algorithm](https://signal.org/docs/specifications/doubleratchet/)
|
||||
- [The X3DH Key Agreement Protocol](https://signal.org/docs/specifications/x3dh/)
|
||||
@@ -1,732 +0,0 @@
|
||||
# PAYMENT-STREAMS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Payment Streams Protocol for Logos Services |
|
||||
| Slug | 155 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Sergei Tikhomirov <sergei@status.im> |
|
||||
| Contributors | Akhil Peddireddy <akhil@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-03-18** — [`e07c655`](https://github.com/logos-co/logos-lips/blob/e07c655a1fb86b46c99c3dd164a29438ab093b49/docs/ift-ts/raw/payment-streams.md) — Chore: move and fix header for payment streams spec (#295)
|
||||
- **2026-02-24** — [`14fd5c0`](https://github.com/logos-co/logos-lips/blob/14fd5c09ccb76cb36ebb6a4b6c8082850172d330/vac/raw/payment-streams.md) — docs: add payment streams raw spec (#224)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document provides a functional specification
|
||||
for a payment streams protocol for Logos services.
|
||||
|
||||
A payment stream is an off-chain protocol
|
||||
where a payer's deposit releases gradually to a payee.
|
||||
The blockchain determines fund accrual based on elapsed time.
|
||||
|
||||
This specification defines stream-backed eligibility proof types
|
||||
for the incentivization framework
|
||||
defined in the incentivization specification
|
||||
(see [References](#references)).
|
||||
The incentivization specification is defined
|
||||
in the context of Logos Messaging request-response protocols.
|
||||
This specification can be extended to non-Messaging services.
|
||||
|
||||
The protocol targets Logos blockchain,
|
||||
which includes the Logos Execution Zone (LEZ).
|
||||
This document clarifies MVP requirements
|
||||
and facilitates discussion with Logos blockchain and LEZ developers
|
||||
on implementation feasibility and challenges.
|
||||
|
||||
## Language
|
||||
|
||||
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
|
||||
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL"
|
||||
in this document are to be interpreted as described in
|
||||
[RFC 2119](http://tools.ietf.org/html/rfc2119).
|
||||
|
||||
## Change Process
|
||||
|
||||
This document is governed by the [1/COSS](./1/coss.md) (COSS).
|
||||
|
||||
## Motivation
|
||||
|
||||
Logos is a privacy-focused tech stack that includes
|
||||
Logos Messaging, Logos Blockchain, and Logos Storage.
|
||||
|
||||
Logos Messaging comprises a suite of communication protocols
|
||||
with both P2P and request-response structures.
|
||||
The backbone P2P protocols use tit-for-tat mechanisms.
|
||||
Incentivization is introduced
|
||||
for auxiliary request-response protocols
|
||||
with well-defined user and provider roles.
|
||||
One such protocol is Store,
|
||||
which allows users to query historical messages
|
||||
from Logos Messaging relay nodes.
|
||||
|
||||
This specification introduces a payment streams protocol
|
||||
for Store and other request-response protocols.
|
||||
The protocol targets the following requirements:
|
||||
|
||||
- Performance: Efficient payments with low latency and fees.
|
||||
- Security: Limited loss exposure through spending controls.
|
||||
- Privacy: On-chain deposit identity unlinkable to off-chain service requests.
|
||||
- Extendability: Simple initial design with room for enhancements.
|
||||
|
||||
After reviewing prior work on payment channels, streams,
|
||||
e-cash, and tickets,
|
||||
payment streams were selected as the most suitable mechanism.
|
||||
|
||||
Payment streams enable unidirectional time-based fund flows
|
||||
from payer to payee.
|
||||
Streams are simpler than alternatives
|
||||
and map well to use cases with distinct roles.
|
||||
Parties need not store old states or initiate disputes
|
||||
as required in payment channel protocols.
|
||||
Streams avoid relying on a centralized mint entity,
|
||||
typical for e-cash and ticket protocols,
|
||||
improving resilience and privacy.
|
||||
|
||||
Different service patterns suit different payment mechanisms.
|
||||
Ongoing services align well with streams
|
||||
that provide time-based automatic fund accrual.
|
||||
One-time or on-demand services suit
|
||||
payment channels with one-off payments.
|
||||
|
||||
This specification targets streams
|
||||
for services with steady usage patterns.
|
||||
Addressing burst services with one-off payments
|
||||
remains future work.
|
||||
|
||||
Logos blockchain uses the Logos Execution Zone (LEZ),
|
||||
which enables both transparent and shielded execution.
|
||||
LEZ is a natural fit
|
||||
for the on-chain component of the payment protocol.
|
||||
|
||||
This document facilitates discussion with Logos developers on
|
||||
whether the required functionality can be implemented,
|
||||
which parts are most challenging and how to simplify them,
|
||||
and other implementation considerations.
|
||||
|
||||
## Theory and Semantics
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
The protocol has two roles:
|
||||
|
||||
- User: the party paying for services (payer).
|
||||
- Provider: the party delivering services and receiving payment (payee).
|
||||
|
||||
The protocol uses a two-level architecture
|
||||
of vaults and streams.
|
||||
|
||||
A vault holds a user's deposit and backs multiple streams.
|
||||
A user MAY have multiple vaults.
|
||||
One vault MAY back streams to different providers.
|
||||
To start using the protocol,
|
||||
the user MUST deposit funds into a vault.
|
||||
The user MAY withdraw unallocated funds from the vault at any time.
|
||||
Vault withdrawals send funds to addresses,
|
||||
which MAY be external addresses or other vaults.
|
||||
Allocating funds from a vault to a stream
|
||||
is not considered a withdrawal,
|
||||
as the funds remain within the protocol.
|
||||
|
||||
A stream is an individual payment flow from a vault to one provider.
|
||||
When creating a stream,
|
||||
the user MUST allocate a portion of vault funds to that stream.
|
||||
Each stream MUST belong to exactly one vault.
|
||||
Each stream MUST specify an accrual rate (tokens per time unit).
|
||||
An allocation is the portion of vault funds committed to a stream.
|
||||
The sum of all stream allocations MUST NOT exceed the vault balance.
|
||||
|
||||
A claim is the operation
|
||||
where the provider retrieves accrued funds from a stream.
|
||||
The provider MAY claim accrued funds from a stream in any state.
|
||||
A claim MUST transfer the full accrued balance to the provider.
|
||||
|
||||
### Stream Lifecycle
|
||||
|
||||
Stream states:
|
||||
|
||||
- ACTIVE: Funds accrue to the provider at the agreed rate.
|
||||
- PAUSED: Accrual is stopped.
|
||||
The stream transitions to PAUSED by user action
|
||||
or automatically when allocated funds are fully accrued.
|
||||
The user MAY resume the stream.
|
||||
- CLOSED: Stream is permanently terminated.
|
||||
The stream MUST NOT transition to any other state.
|
||||
|
||||
Stream state transitions:
|
||||
|
||||
- Create: User creates a stream in ACTIVE state
|
||||
by allocating funds from the vault.
|
||||
- Pause: User pauses an ACTIVE stream, stopping accrual.
|
||||
The stream also transitions automatically from ACTIVE to PAUSED
|
||||
when allocated funds are fully accrued.
|
||||
- Resume: User resumes a PAUSED stream, restarting accrual.
|
||||
Resume MUST fail if remaining allocation is zero.
|
||||
- Top-Up: User MAY add funds to stream allocation.
|
||||
Top-up MUST transition the stream to ACTIVE state.
|
||||
If the user wants to add funds without resuming,
|
||||
the user MUST pause the stream after top-up.
|
||||
- Close: Either user or provider MAY close the stream
|
||||
from any non-CLOSED state.
|
||||
When a stream is closed,
|
||||
unaccrued funds MUST automatically return to the user's vault.
|
||||
Accrued funds remain available for the provider to claim.
|
||||
- Claim: Provider MAY claim accrued funds from a stream in any state.
|
||||
A claim MUST transfer the full accrued balance;
|
||||
partial claims are not supported.
|
||||
A claim operation does not change stream state.
|
||||
|
||||
### Stream State Transition Diagram
|
||||
|
||||
```mermaid
|
||||
graph LR;
|
||||
ACTIVE -->|pause / deplete| PAUSED;
|
||||
PAUSED -->|resume / top-up| ACTIVE;
|
||||
ACTIVE -->|close| CLOSED;
|
||||
PAUSED -->|close| CLOSED;
|
||||
```
|
||||
|
||||
### Assumptions
|
||||
|
||||
Parties MUST agree on stream parameters before creation.
|
||||
A separate discovery protocol SHOULD enable
|
||||
providers to advertise services and accepted payment terms.
|
||||
|
||||
The provider SHOULD announce
|
||||
accepted eligibility proof types and service parameters
|
||||
via the discovery protocol.
|
||||
|
||||
The following is an informal list of discoverable parameters
|
||||
(to be formally defined in the context of the discovery specification):
|
||||
|
||||
- accepted eligibility proof types
|
||||
- accepted tokens
|
||||
- required rate (tokens per time unit)
|
||||
- minimum allocation
|
||||
- required vault buffer percentage (RECOMMENDED default: 5%)
|
||||
- load cap (cumulative resource limit per stream per time window)
|
||||
- `VaultProof` response cap
|
||||
(maximum response size for `VaultProof`-backed requests)
|
||||
- `max_open_stream_window` (RECOMMENDED default: 300 seconds)
|
||||
(maximum acceptable duration
|
||||
between receiving a `StreamProposal` and stream establishment)
|
||||
|
||||
Users SHOULD monitor service delivery
|
||||
and take action when providers stop delivering service.
|
||||
Since users are typically online to receive service,
|
||||
monitoring quality and pausing or closing streams
|
||||
is a reasonable expectation.
|
||||
|
||||
Providers SHOULD monitor the stream on-chain
|
||||
and SHOULD stop providing service when a stream is not `ACTIVE`.
|
||||
|
||||
## Off-Chain Protocol
|
||||
|
||||
This section describes off-chain communication
|
||||
for stream establishment, service delivery, and termination.
|
||||
|
||||
### Design Rationale
|
||||
|
||||
On-chain state is the source of truth for fund allocation and accrual.
|
||||
Off-chain communication coordinates lifecycle events
|
||||
and enables service delivery.
|
||||
|
||||
This specification does not redefine the service provision protocol.
|
||||
The incentivization specification (see [References](#references))
|
||||
defines the generic request-response framework
|
||||
with `EligibilityProof` and `EligibilityStatus`.
|
||||
This specification extends `EligibilityProof`
|
||||
with two new types for stream-backed service provision,
|
||||
defined in the following subsection.
|
||||
|
||||
### Eligibility Proof Types
|
||||
|
||||
The incentivization specification's `EligibilityProof`
|
||||
is extended with two new optional fields:
|
||||
`stream_proposal` and `stream_proof`.
|
||||
These fields are mutually exclusive.
|
||||
The first `ServiceRequest` MUST use `stream_proposal`;
|
||||
its semantics: "I want to open a stream to you
|
||||
with these parameters;
|
||||
here is proof I have a vault to back it;
|
||||
here is my first request."
|
||||
All subsequent requests MUST use `stream_proof`.
|
||||
|
||||
```protobuf
|
||||
message EligibilityProof {
|
||||
// existing, from incentivization specification
|
||||
optional bytes proof_of_payment = 1;
|
||||
// new, for stream-backed service provision
|
||||
optional bytes stream_proposal = 2;
|
||||
optional bytes stream_proof = 3;
|
||||
}
|
||||
```
|
||||
|
||||
#### StreamProposal
|
||||
|
||||
```protobuf
|
||||
message StreamProposal {
|
||||
VaultProof vault_proof = 1;
|
||||
StreamParams stream_params = 2;
|
||||
bytes public_key = 3; // key for signing subsequent service requests
|
||||
}
|
||||
```
|
||||
|
||||
#### VaultProof
|
||||
|
||||
A `VaultProof` proves that the user controls a vault
|
||||
with sufficient unallocated funds
|
||||
to back the proposed stream.
|
||||
|
||||
```protobuf
|
||||
message VaultProof {
|
||||
bytes vault_id = 1; // on-chain identifier of the vault
|
||||
bytes provider_id = 2; // target provider (prevents replay)
|
||||
uint64 balance_commitment = 3; // asserted unallocated balance
|
||||
bytes owner_signature = 4; // signature covering all fields above
|
||||
}
|
||||
```
|
||||
|
||||
The provider SHOULD verify on-chain
|
||||
that the vault's unallocated balance is at least
|
||||
`stream_allocation * (1 + buffer)`,
|
||||
where `stream_allocation` is from the accompanying `StreamParams`
|
||||
and `buffer` is the provider's required vault buffer percentage
|
||||
(RECOMMENDED default: 5%).
|
||||
|
||||
The user MAY issue `VaultProof`s to multiple providers.
|
||||
The user MUST ensure that issuing a new `VaultProof`
|
||||
does not cause the total of all promised `VaultProof` allocations
|
||||
from this vault
|
||||
to exceed the vault's unallocated balance.
|
||||
|
||||
#### StreamParams
|
||||
|
||||
`StreamParams` contains proposed stream parameters.
|
||||
|
||||
```protobuf
|
||||
message StreamParams {
|
||||
bytes service_id = 1; // identifier of the requested service
|
||||
uint64 stream_rate = 2; // proposed accrual rate (tokens per time unit)
|
||||
uint64 stream_allocation = 3; // proposed initial allocation
|
||||
uint64 open_stream_by = 4; // stream establishment deadline (absolute timestamp)
|
||||
}
|
||||
```
|
||||
|
||||
The `open_stream_by` field is an absolute timestamp
|
||||
by which the user commits to establishing the stream on-chain.
|
||||
The user MUST set `open_stream_by` to a future timestamp
|
||||
no later than the current time plus `max_open_stream_window`.
|
||||
|
||||
#### StreamProof
|
||||
|
||||
A `StreamProof` links a request to an active on-chain stream.
|
||||
|
||||
```protobuf
|
||||
message StreamProof {
|
||||
bytes stream_id = 1; // on-chain identifier of the stream
|
||||
bytes signature = 2; // signature over request_data using committed public_key
|
||||
}
|
||||
```
|
||||
|
||||
The provider SHOULD verify on-chain
|
||||
that the stream is `ACTIVE`,
|
||||
that the signature matches the committed `public_key`,
|
||||
and that the stream parameters match
|
||||
those originally proposed.
|
||||
|
||||
### Message Types
|
||||
|
||||
The off-chain protocol uses three message types:
|
||||
`ServiceRequest`, `ServiceResponse`, and `ServiceTermination`.
|
||||
|
||||
#### ServiceRequest
|
||||
|
||||
A `ServiceRequest` has two top-level fields,
|
||||
consistent with the incentivization specification pattern:
|
||||
|
||||
- `request_data`: service-specific payload
|
||||
- `eligibility_proof`: an `EligibilityProof`
|
||||
containing either a `stream_proposal` or a `stream_proof`
|
||||
(see [Eligibility Proof Types](#eligibility-proof-types))
|
||||
|
||||
#### ServiceResponse
|
||||
|
||||
A `ServiceResponse` MUST include:
|
||||
|
||||
- `eligibility_status`: an `EligibilityStatus`
|
||||
(from the incentivization specification) with:
|
||||
- `status_code`: indicating acceptance,
|
||||
parameter rejection, proof invalidity, etc.
|
||||
- `status_desc`: human-readable description
|
||||
(RECOMMENDED to include actionable guidance
|
||||
on parameter rejection)
|
||||
- `response_data`: service-specific payload
|
||||
(included if and only if the request is served)
|
||||
|
||||
Status codes specific to this specification:
|
||||
|
||||
- `OK`: request served
|
||||
- `PARAMS_REJECTED`: stream parameters unacceptable;
|
||||
`VaultProof` NOT marked as spent;
|
||||
user MAY retry with adjusted parameters
|
||||
- `PROOF_INVALID`: `VaultProof` or `StreamProof` verification failed
|
||||
- `STREAM_NOT_ACTIVE`: referenced stream
|
||||
is no longer active on-chain
|
||||
|
||||
The provider SHOULD limit parameter-rejection retries
|
||||
to a RECOMMENDED maximum of 5 per vault
|
||||
within a RECOMMENDED time window of 600 seconds.
|
||||
|
||||
#### ServiceTermination
|
||||
|
||||
The provider SHOULD send a `ServiceTermination` message
|
||||
before stopping service.
|
||||
A `ServiceTermination` message MAY be sent regardless of whether
|
||||
a stream has been established on-chain.
|
||||
|
||||
This message MUST include:
|
||||
|
||||
- `termination_type`: `TEMPORARY` or `PERMANENT`
|
||||
- `resume_after`: timestamp after which service MAY resume
|
||||
(REQUIRED for `TEMPORARY`, empty for `PERMANENT`)
|
||||
|
||||
For temporary termination,
|
||||
the user MAY pause the stream until the `resume_after` time.
|
||||
For permanent termination,
|
||||
the user SHOULD close the stream to recover unaccrued funds.
|
||||
|
||||
### Protocol Flow
|
||||
|
||||
1. The user discovers a provider via the discovery protocol.
|
||||
The provider's advertisement includes
|
||||
accepted eligibility types and service parameters.
|
||||
|
||||
2. The user sends the first `ServiceRequest`
|
||||
with `eligibility_proof` containing a `StreamProposal`
|
||||
(`VaultProof` + `StreamParams` + `public_key`)
|
||||
and `request_data`.
|
||||
|
||||
3. The provider verifies `VaultProof` on-chain
|
||||
and evaluates `StreamParams`:
|
||||
|
||||
- If parameters are unacceptable:
|
||||
the provider responds with `PARAMS_REJECTED`.
|
||||
The `VaultProof` is not marked as spent.
|
||||
The user MAY retry with adjusted `StreamParams`.
|
||||
- If the proof is invalid:
|
||||
the provider responds with `PROOF_INVALID`.
|
||||
- If accepted:
|
||||
the provider serves the request immediately,
|
||||
responding with `OK` and `response_data`.
|
||||
The provider notes the pending session.
|
||||
The provider SHOULD limit this response
|
||||
to the advertised `VaultProof` response cap.
|
||||
|
||||
4. The user creates the stream on-chain
|
||||
before `open_stream_by`.
|
||||
|
||||
5. The user sends subsequent `ServiceRequest`s
|
||||
with `eligibility_proof` containing a `StreamProof`.
|
||||
|
||||
6. The provider monitors the chain for a matching stream.
|
||||
If no matching stream appears by `open_stream_by`,
|
||||
the provider SHOULD discard the session
|
||||
and release planned capacity.
|
||||
If the stream is established before `open_stream_by`,
|
||||
the first `StreamProof`-backed request
|
||||
MAY arrive after `open_stream_by`.
|
||||
|
||||
A user MUST NOT have more than one pending
|
||||
`StreamProposal`-backed session per vault-provider pair at a time.
|
||||
To open multiple streams to the same provider,
|
||||
the user MUST complete each stream establishment
|
||||
before initiating the next.
|
||||
If the vault is drained between `VaultProof` verification
|
||||
and stream creation, this constitutes a protocol violation;
|
||||
the provider SHOULD send a `ServiceTermination`
|
||||
with `termination_type` `PERMANENT`.
|
||||
|
||||
## Protocol Extensions
|
||||
|
||||
This section describes optional modifications
|
||||
that MAY be applied to the base protocol.
|
||||
Each extension is independent.
|
||||
|
||||
### Auto-Pause
|
||||
|
||||
The user MAY specify an auto-pause duration when creating a stream.
|
||||
When the specified duration elapses since stream creation or last resume,
|
||||
the stream MUST automatically transition to PAUSED state.
|
||||
The user MAY resume the stream, resetting the auto-pause timer.
|
||||
|
||||
Auto-pause limits loss if service stops and the user is offline.
|
||||
Per-stream allocation already bounds total risk;
|
||||
auto-pause adds periodic check-ins for long-running streams.
|
||||
|
||||
### Delivery Receipts
|
||||
|
||||
The claim operation MAY require delivery receipts as proof of service.
|
||||
A delivery receipt is a user-signed message that MUST include
|
||||
stream identifier, service delivery details, and signature.
|
||||
If a stream has delivery receipts enabled,
|
||||
the protocol MUST only allow claims with valid receipts.
|
||||
|
||||
Receipt granularity presents a trade-off.
|
||||
Per-message receipts allow the user to approve each message individually
|
||||
but require signing each receipt, increasing interaction overhead.
|
||||
Batched receipts reduce signing overhead
|
||||
but require the user to approve multiple messages at once.
|
||||
|
||||
### Automatic Claim on Closure
|
||||
|
||||
This extension adds an optional auto-claim flag.
|
||||
When auto-claim is enabled,
|
||||
closing the stream MUST automatically claim accrued funds for the provider.
|
||||
|
||||
Auto-claim simplifies the protocol
|
||||
by ensuring closed streams hold no funds,
|
||||
eliminating the need to track balances in closed streams.
|
||||
|
||||
However, auto-claim has potential issues:
|
||||
|
||||
- Prevents provider from batching claims.
|
||||
- May create timing correlations that leak privacy.
|
||||
- Requires user to pay for provider's claim operation.
|
||||
- May cause the entire close operation to fail if claim fails.
|
||||
|
||||
Assessing these trade-offs requires clarity on LEZ,
|
||||
particularly gas model, batching techniques, and timing privacy.
|
||||
|
||||
### Activation Fee
|
||||
|
||||
A user can exploit the pause/resume mechanism
|
||||
by keeping a stream paused
|
||||
and resuming briefly only when querying a service.
|
||||
This results in minimal payment for actual service usage.
|
||||
|
||||
The activation fee addresses this attack.
|
||||
When the activation fee is enabled,
|
||||
a fixed amount MUST accrue to the provider
|
||||
immediately upon the stream becoming `ACTIVE`.
|
||||
The activation fee SHOULD reflect
|
||||
the minimum acceptable payment for a service session.
|
||||
The activation fee applies to stream creation, resume, and top-up operations,
|
||||
as only user actions transition a stream to `ACTIVE` state.
|
||||
If stream allocation is lower than activation fee,
|
||||
stream activation MUST fail.
|
||||
|
||||
Providers MAY alternatively address this attack via off-chain policy
|
||||
by refusing service to users who pause and resume excessively.
|
||||
|
||||
### Load Cap
|
||||
|
||||
A load cap represents
|
||||
cumulative resource consumption per stream per time window
|
||||
(e.g. total bytes or requests per minute).
|
||||
It applies to the entire stream session,
|
||||
not to individual responses.
|
||||
The provider SHOULD advertise the load cap
|
||||
via the discovery protocol.
|
||||
|
||||
For `VaultProof`-backed requests (the first `ServiceRequest`),
|
||||
the provider SHOULD advertise a separate `VaultProof` response cap:
|
||||
the maximum response size for a single `VaultProof`-backed response.
|
||||
This limits provider exposure
|
||||
to requests not yet backed by an on-chain stream.
|
||||
|
||||
The user MUST NOT exceed the applicable cap.
|
||||
If the user exceeds it,
|
||||
the provider SHOULD terminate service.
|
||||
|
||||
A user who requires a higher load cap
|
||||
SHOULD open multiple streams to the same provider.
|
||||
|
||||
### Multi-round Stream Parameter Negotiation
|
||||
|
||||
A future extension MAY allow the provider
|
||||
to include counter-proposed parameters
|
||||
in a `PARAMS_REJECTED` response,
|
||||
enabling iterative negotiation
|
||||
before the first request is served.
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
This section outlines how the protocol maps onto LEZ.
|
||||
|
||||
The stream protocol MAY be deployed as an LEZ program
|
||||
with three account types:
|
||||
|
||||
- StreamDefinition: stream parameters and status.
|
||||
- VaultDefinition: list of streams backed by a vault, controlled by payer.
|
||||
- VaultHolding: token account funded by payer, used to pay providers.
|
||||
|
||||
Stream lifecycle rules and balance constraints
|
||||
are encoded and enforced through program logic.
|
||||
|
||||
Stream state is evaluated lazily.
|
||||
On-chain storage holds stream parameters,
|
||||
but the effective state depends on the block timestamp at execution time.
|
||||
State transitions (such as auto-pause) are reflected on-chain
|
||||
only when an on-chain operation is executed.
|
||||
|
||||
Whether shielded execution can access block timestamps
|
||||
for time-based accrual calculation is an open question.
|
||||
Given a mechanism for elapsed time in shielded execution,
|
||||
all protocol operations MAY be performed within shielded execution.
|
||||
|
||||
## Security and Privacy Considerations
|
||||
|
||||
An initial privacy goal is unlinkability
|
||||
between off-chain requests and on-chain funding.
|
||||
Vault deposits MUST NOT reveal the depositor's identity.
|
||||
Stream creation SHOULD NOT reveal which vault funded the stream.
|
||||
|
||||
Each account MAY be public or private, configured per-account.
|
||||
The payer decides whether stream operations use
|
||||
transparent or shielded execution.
|
||||
The protocol design SHOULD NOT fix this decision.
|
||||
A provider MAY reject stream requests
|
||||
that do not match their privacy preferences.
|
||||
|
||||
On-chain state of a stream MUST be verifiable by both parties.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [Incentivization for Waku Light Protocols](https://github.com/logos-messaging/specs/blob/master/standards/core/incentivization.md)
|
||||
|
||||
### Informative
|
||||
|
||||
#### Related Work
|
||||
|
||||
- [Off-Chain Payment Protocols: Classification and Architectural Choice](https://forum.vac.dev/t/off-chain-payment-protocols-classification-and-architectural-choice/596)
|
||||
- [Logos Execution Zone](https://github.com/logos-blockchain/logos-execution-zone)
|
||||
|
||||
#### Payment Streaming Protocols
|
||||
|
||||
Existing payment streaming protocols
|
||||
(Sablier Flow, Sablier Lockup, LlamaPay V2, Superfluid)
|
||||
target EVM-like state architectures.
|
||||
They use time-based accrual with ERC-20 tokens.
|
||||
Protocols differ in stream duration.
|
||||
Some support fixed-duration streams (Sablier Lockup),
|
||||
while others allow open-ended streams (Sablier Flow).
|
||||
Deposit architecture also varies.
|
||||
Singleton managers (Sablier Flow, Sablier Lockup)
|
||||
require separate deposits per stream.
|
||||
Per-payer vaults (LlamaPay V2)
|
||||
allow one deposit to back multiple streams.
|
||||
|
||||
- [Sablier Flow](https://github.com/sablier-labs/flow)
|
||||
- [Sablier Lockup](https://github.com/sablier-labs/lockup)
|
||||
- [LlamaPay V2](https://github.com/LlamaPay/llamapay-v2)
|
||||
- [Superfluid Protocol](https://github.com/superfluid-org/protocol-monorepo)
|
||||
|
||||
## Appendix A: Illustrative EVM Implementation
|
||||
|
||||
This appendix provides an illustrative EVM-based implementation outline.
|
||||
The actual implementation will target LEZ.
|
||||
|
||||
### A.1 Contract Structure
|
||||
|
||||
```solidity
|
||||
contract PaymentVault {
|
||||
enum StreamState { ACTIVE, PAUSED, CLOSED }
|
||||
|
||||
struct Stream {
|
||||
address token;
|
||||
address provider;
|
||||
uint128 ratePerSecond;
|
||||
uint128 allocation;
|
||||
uint64 lastUpdatedAt;
|
||||
uint128 accruedBalance;
|
||||
StreamState state;
|
||||
}
|
||||
|
||||
address public user;
|
||||
mapping(address token => uint256) public vaultBalance;
|
||||
uint256 public nextStreamId;
|
||||
mapping(uint256 => Stream) public streams;
|
||||
}
|
||||
```
|
||||
|
||||
### A.2 Vault Operations
|
||||
|
||||
```solidity
|
||||
event Deposited(address indexed token, uint256 amount);
|
||||
event Withdrawn(address indexed token, uint256 amount, address indexed to);
|
||||
|
||||
function deposit(address token, uint256 amount) external;
|
||||
function withdraw(address token, uint256 amount, address to) external;
|
||||
```
|
||||
|
||||
### A.3 Stream Lifecycle
|
||||
|
||||
```solidity
|
||||
event StreamCreated(
|
||||
uint256 indexed streamId,
|
||||
address indexed provider,
|
||||
address indexed token,
|
||||
uint128 ratePerSecond,
|
||||
uint128 allocation
|
||||
);
|
||||
event StreamPaused(uint256 indexed streamId);
|
||||
event StreamResumed(uint256 indexed streamId);
|
||||
event StreamToppedUp(uint256 indexed streamId, uint128 additionalAllocation);
|
||||
event StreamClosed(uint256 indexed streamId, uint128 refundedToVault);
|
||||
event Claimed(uint256 indexed streamId, address indexed provider, uint128 amount);
|
||||
|
||||
/// @notice Create a new stream in ACTIVE state (user only)
|
||||
/// @dev MUST revert if allocation exceeds available vault balance
|
||||
function createStream(
|
||||
address provider,
|
||||
address token,
|
||||
uint128 ratePerSecond,
|
||||
uint128 allocation
|
||||
) external returns (uint256 streamId);
|
||||
|
||||
/// @notice Pause an ACTIVE stream (user only)
|
||||
function pauseStream(uint256 streamId) external;
|
||||
|
||||
/// @notice Resume a PAUSED stream (user only)
|
||||
/// @dev MUST revert if remaining allocation (allocation - accruedBalance) is zero
|
||||
function resumeStream(uint256 streamId) external;
|
||||
|
||||
/// @notice Add funds to stream allocation; transitions to ACTIVE (user only)
|
||||
/// @dev MUST revert if additionalAllocation exceeds available vault balance
|
||||
function topUpStream(uint256 streamId, uint128 additionalAllocation) external;
|
||||
|
||||
/// @notice Close stream permanently
|
||||
/// @dev Callable by user or provider. Unaccrued funds (allocation - accruedBalance)
|
||||
/// MUST be returned to vaultBalance. Accrued funds remain claimable by provider.
|
||||
function closeStream(uint256 streamId) external;
|
||||
|
||||
/// @notice Provider claims accrued funds from a stream
|
||||
/// @dev Callable in any state (ACTIVE, PAUSED, or CLOSED).
|
||||
/// Transfers full accruedBalance to provider and resets it to zero.
|
||||
function claim(uint256 streamId) external;
|
||||
```
|
||||
|
||||
### A.4 Internal Accrual
|
||||
|
||||
```solidity
|
||||
/// @notice Update accruedBalance based on elapsed time since lastUpdatedAt
|
||||
/// @dev Called by pauseStream, resumeStream, topUpStream, closeStream, and claim
|
||||
/// before modifying stream state. Caps accrual at allocation and
|
||||
/// transitions to PAUSED when fully accrued (lazy evaluation:
|
||||
/// state updates on next interaction, not at exact depletion time).
|
||||
function _accrue(uint256 streamId) internal;
|
||||
```
|
||||
@@ -1,160 +0,0 @@
|
||||
# RLN-INTEREP-SPEC
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Interep as group management for RLN |
|
||||
| Slug | 100 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Aaryamann Challani <p1ge0nh8er@proton.me> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/rln-interep-spec.md) — chore: add math support (#287)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/rln-interep-spec.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/rln-interep-spec.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/rln-interep-spec.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/rln-interep-spec.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/rln-interep-spec.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/rln-interep-spec.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/raw/rln-interep-spec.md) — Fix Files for Linting (#94)
|
||||
- **2024-08-05** — [`eb25cd0`](https://github.com/logos-co/logos-lips/blob/eb25cd06d679e94409072a96841de16a6b3910d5/vac/raw/rln-interep-spec.md) — chore: replace email addresses (#86)
|
||||
- **2024-05-27** — [`99be3b9`](https://github.com/logos-co/logos-lips/blob/99be3b974509ea03561c7ef4b1b02a56f24e9297/vac/raw/rln-interep-spec.md) — Move Raw Specs (#37)
|
||||
- **2024-02-01** — [`860bae2`](https://github.com/logos-co/logos-lips/blob/860bae20d9eb9f17ac6c3839f939d545bf796835/vac/48/rln-interep-spec.md) — Update rln-interep-spec.md
|
||||
- **2024-02-01** — [`3f722d9`](https://github.com/logos-co/logos-lips/blob/3f722d945c53b8356be6282f9c20646e099a2122/vac/48/rln-interep-spec.md) — Update and rename README.md to rln-interep-spec.md
|
||||
- **2024-01-30** — [`ea62398`](https://github.com/logos-co/logos-lips/blob/ea623980770922d0dfe2f861db885ca1ae9dd84e/vac/48/README.md) — Create README.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This spec integrates [Interep](https://interep.link)
|
||||
into the [RLN](32/rln-v1.md) spec.
|
||||
Interep is a group management protocol
|
||||
that allows for the creation of groups of users and
|
||||
the management of their membership.
|
||||
It is used to manage the membership of the RLN group.
|
||||
|
||||
Interep ties in web2 identities with reputation, and
|
||||
sorts the users into groups based on their reputation score.
|
||||
For example, a GitHub user with over 100 followers is considered to have "gold" reputation.
|
||||
|
||||
Interep uses [Semaphore](https://semaphore.appliedzkp.org/)
|
||||
under the hood to allow anonymous signaling of membership in a group.
|
||||
Therefore, a user with a "gold" reputation can prove the existence
|
||||
of their membership without revealing their identity.
|
||||
|
||||
RLN is used for spam prevention, and Interep is used for group management.
|
||||
|
||||
By using Interep with RLN,
|
||||
we allow users to join RLN membership groups
|
||||
without the need for on-chain financial stake.
|
||||
|
||||
## Motivation
|
||||
|
||||
To have Sybil-Resistant group management,
|
||||
there are [implementations](https://github.com/vacp2p/rln-contract)
|
||||
of RLN which make use of financial stake on-chain.
|
||||
However, this is not ideal because it reduces the barrier of entry for honest participants.
|
||||
|
||||
In this case,
|
||||
honest participants will most likely have a web2 identity accessible to them,
|
||||
which can be used for joining an Interep reputation group.
|
||||
By modifying the RLN spec to use Interep,
|
||||
we can have Sybil-Resistant group management
|
||||
without the need for on-chain financial stake.
|
||||
|
||||
Since RLN and Interep both use Semaphore-style credentials,
|
||||
it is possible to use the same set of credentials for both.
|
||||
|
||||
## Functional Operation
|
||||
|
||||
Using Interep with RLN involves the following steps -
|
||||
|
||||
1. Generate Semaphore credentials
|
||||
2. Verify reputation and join Interep group
|
||||
3. Join RLN membership group via interaction with Smart Contract,
|
||||
by passing a proof of membership to the Interep group
|
||||
|
||||
### 1. Generate Semaphore credentials
|
||||
|
||||
Semaphore credentials are generated in a standard way,
|
||||
depicted in the [Semaphore documentation](https://semaphore.appliedzkp.org/docs/guides/identities#create-deterministic-identities).
|
||||
|
||||
### 2. Verify reputation and join Interep group
|
||||
|
||||
Using the Interep app deployed on [Goerli](https://goerli.interep.link/),
|
||||
the user can check their reputation tier and join the corresponding group.
|
||||
This results in a transaction to the Interep contract, which adds them to the group.
|
||||
|
||||
### 3. Join RLN membership group
|
||||
|
||||
Instead of sending funds to the RLN contract to join the membership group,
|
||||
the user can send a proof of membership to the Interep group.
|
||||
This proof is generated by the user, and
|
||||
is verified by the contract.
|
||||
The contract ensures that the user is a member of the Interep group, and
|
||||
then adds them to the RLN membership group.
|
||||
|
||||
Following is the modified signature of the register function
|
||||
in the RLN contract -
|
||||
|
||||
```solidity
|
||||
/// @param groupId: Id of the group.
|
||||
/// @param signal: Semaphore signal.
|
||||
/// @param nullifierHash: Nullifier hash.
|
||||
/// @param externalNullifier: External nullifier.
|
||||
/// @param proof: Zero-knowledge proof.
|
||||
/// @param idCommitment: ID Commitment of the member.
|
||||
function register(
|
||||
uint256 groupId,
|
||||
bytes32 signal,
|
||||
uint256 nullifierHash,
|
||||
uint256 externalNullifier,
|
||||
uint256[8] calldata proof,
|
||||
uint256 idCommitment
|
||||
)
|
||||
```
|
||||
|
||||
## Verification of messages
|
||||
|
||||
Messages are verified the same way as in the [RLN spec](32/rln-v1.md#verification).
|
||||
|
||||
## Slashing
|
||||
|
||||
The slashing mechanism is the same as in the [RLN spec](32/rln-v1.md#slashing).
|
||||
It is important to note that the slashing
|
||||
may not have the intended effect on the user,
|
||||
since the only consequence is that they cannot send messages.
|
||||
This is due to the fact that the user
|
||||
can send a identity commitment in the registration to the RLN contract,
|
||||
which is different than the one used in the Interep group.
|
||||
|
||||
## Proof of Concept
|
||||
|
||||
A proof of concept is available at
|
||||
[vacp2p/rln-interp-contract](https://github.com/vacp2p/rln-interep-contract)
|
||||
which integrates Interep with RLN.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. As mentioned in [Slashing](#slashing),
|
||||
the slashing mechanism may not have the intended effect on the user.
|
||||
2. This spec inherits the security considerations of the [RLN spec](32/rln-v1.md#security-considerations).
|
||||
3. This spec inherits the security considerations of [Interep](https://docs.interep.link/).
|
||||
4. A user may make multiple registrations using the same Interep proofs but
|
||||
different identity commitments.
|
||||
The way to mitigate this is to check if the nullifier hash has been detected
|
||||
previously in proof verification.
|
||||
|
||||
## References
|
||||
|
||||
1. [RLN spec](32/rln-v1.md)
|
||||
2. [Interep](https://interep.link)
|
||||
3. [Semaphore](https://semaphore.appliedzkp.org/)
|
||||
4. [Decentralized cloudflare using Interep](https://ethresear.ch/t/decentralised-cloudflare-using-rln-and-rich-user-identities/10774)
|
||||
5. [Interep contracts](https://github.com/interep-project/contracts)
|
||||
6. [RLN contract](https://github.com/vacp2p/rln-contract)
|
||||
7. [RLNP2P](https://rlnp2p.vac.dev/)
|
||||
@@ -1,144 +0,0 @@
|
||||
# RLN-STEALTH-COMMITMENTS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | RLN Stealth Commitment Usage |
|
||||
| Slug | 102 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Aaryamann Challani <p1ge0nh8er@proton.me> |
|
||||
| Contributors | Jimmy Debe <jimmy@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/rln-stealth-commitments.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/rln-stealth-commitments.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/rln-stealth-commitments.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/rln-stealth-commitments.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/rln-stealth-commitments.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/rln-stealth-commitments.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/raw/rln-stealth-commitments.md) — Fix Files for Linting (#94)
|
||||
- **2024-08-05** — [`eb25cd0`](https://github.com/logos-co/logos-lips/blob/eb25cd06d679e94409072a96841de16a6b3910d5/vac/raw/rln-stealth-commitments.md) — chore: replace email addresses (#86)
|
||||
- **2024-04-15** — [`0b0e00f`](https://github.com/logos-co/logos-lips/blob/0b0e00f510f5995b612b4ac8c50c51f9d938dfc8/vac/raw/rln-stealth-commitments.md) — feat(rln-stealth-commitments): add initial tech writeup (#23)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification describes the usage of stealth commitments
|
||||
to add prospective users to a network-governed
|
||||
[32/RLN-V1](32/rln-v1.md) membership set.
|
||||
|
||||
## Motivation
|
||||
|
||||
When [32/RLN-V1](32/rln-v1.md) is enforced in [10/Waku2](../../messaging/standards/core/10/waku2.md),
|
||||
all users are required to register to a membership set.
|
||||
The membership set will store user identities
|
||||
allowing the secure interaction within an application.
|
||||
Forcing a user to do an on-chain transaction
|
||||
to join a membership set is an onboarding friction,
|
||||
and some projects may be opposed to this method.
|
||||
To improve the user experience,
|
||||
stealth commitments can be used by a counterparty
|
||||
to register identities on the user's behalf,
|
||||
while maintaining the user's anonymity.
|
||||
|
||||
This document specifies a privacy-preserving mechanism,
|
||||
allowing a counterparty to utilize [32/RLN-V1](32/rln-v1.md)
|
||||
to register an `identityCommitment` on-chain.
|
||||
Counterparties will be able to register members
|
||||
to a RLN membership set without exposing the user's private keys.
|
||||
|
||||
## Background
|
||||
|
||||
The [32/RLN-V1](32/rln-v1.md) protocol,
|
||||
consists of a smart contract that stores a `idenitityCommitment`
|
||||
in a membership set.
|
||||
In order for a user to join the membership set,
|
||||
the user is required to make a transaction on the blockchain.
|
||||
A set of public keys is used to compute a stealth commitment for a user,
|
||||
as described in [ERC-5564](https://eips.ethereum.org/EIPS/eip-5564).
|
||||
This specification is an implementation of the
|
||||
[ERC-5564](https://eips.ethereum.org/EIPS/eip-5564) scheme,
|
||||
tailored to the curve that is used in the [32/RLN-V1](32/rln-v1.md) protocol.
|
||||
|
||||
This can be used in a couple of ways in applications:
|
||||
|
||||
1. Applications can add users
|
||||
to the [32/RLN-V1](32/rln-v1.md) membership set in a batch.
|
||||
2. Users of the application
|
||||
can register other users to the [32/RLN-V1](32/rln-v1.md) membership set.
|
||||
|
||||
This is useful when the prospective user does not have access to funds
|
||||
on the network that [32/RLN-V1](32/rln-v1.md) is deployed on.
|
||||
|
||||
## Wire Format Specification
|
||||
|
||||
The two parties, the requester and the receiver,
|
||||
MUST exchange the following information:
|
||||
|
||||
```protobuf
|
||||
|
||||
message Request {
|
||||
// The spending public key of the requester
|
||||
bytes spending_public_key = 1;
|
||||
|
||||
// The viewing public key of the requester
|
||||
bytes viewing_public_key = 2;
|
||||
}
|
||||
```
|
||||
|
||||
### Generate Stealth Commitment
|
||||
|
||||
The application or user SHOULD generate a `stealth_commitment`
|
||||
after a request to do so is received.
|
||||
This commitment MAY be inserted into the corresponding application membership set.
|
||||
|
||||
Once the membership set is updated,
|
||||
the receiver SHOULD exchange the following as a response to the request:
|
||||
|
||||
```protobuf
|
||||
|
||||
message Response {
|
||||
|
||||
// The used to check if the stealth_commitment belongs to the requester
|
||||
bytes view_tag = 2;
|
||||
|
||||
// The stealth commitment for the requester
|
||||
bytes stealth_commitment = 3;
|
||||
|
||||
// The ephemeral public key used to generate the commitment
|
||||
bytes ephemeral_public_key = 4;
|
||||
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The receiver MUST generate an `ephemeral_public_key`,
|
||||
`view_tag` and `stealth_commitment`.
|
||||
This will be used to check the stealth commitment
|
||||
used to register to the membership set,
|
||||
and the user MUST be able to check ownership with their `viewing_public_key`.
|
||||
|
||||
## Implementation Suggestions
|
||||
|
||||
An implementation of the Stealth Address scheme is available in the
|
||||
[erc-5564-bn254](https://github.com/rymnc/erc-5564-bn254) repository,
|
||||
which also includes a test to generate a stealth commitment for a given user.
|
||||
|
||||
## Security/Privacy Considerations
|
||||
|
||||
This specification inherits the security and privacy considerations of the
|
||||
[Stealth Address](https://eips.ethereum.org/EIPS/eip-5564) scheme.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
- [10/Waku2](../../messaging/standards/core/10/waku2.md)
|
||||
- [32/RLN-V1](32/rln-v1.md)
|
||||
- [ERC-5564](https://eips.ethereum.org/EIPS/eip-5564)
|
||||
@@ -1,253 +0,0 @@
|
||||
# RLN-V2
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Rate Limit Nullifier V2 |
|
||||
| Slug | 106 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Rasul Ibragimov <curryrasul@gmail.com> |
|
||||
| Contributors | Lev Soukhanov <0xdeadfae@gmail.com> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-02-09** — [`afd94c8`](https://github.com/logos-co/logos-lips/blob/afd94c8bc1420376ae9af7e14a4feb246f2ed621/docs/ift-ts/raw/rln-v2.md) — chore: add math support (#287)
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/rln-v2.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`f01d5b9`](https://github.com/logos-co/logos-lips/blob/f01d5b9d9f2ef977b8c089d616991b24f2ee4efe/docs/ift-ts/raw/rln-v2.md) — chore: fix links (#260)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/rln-v2.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/rln-v2.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/rln-v2.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/rln-v2.md) — ci: add mdBook configuration (#233)
|
||||
- **2024-09-13** — [`3ab314d`](https://github.com/logos-co/logos-lips/blob/3ab314d87d4525ff1296bf3d9ec634d570777b91/vac/raw/rln-v2.md) — Fix Files for Linting (#94)
|
||||
- **2024-05-27** — [`99be3b9`](https://github.com/logos-co/logos-lips/blob/99be3b974509ea03561c7ef4b1b02a56f24e9297/vac/raw/rln-v2.md) — Move Raw Specs (#37)
|
||||
- **2024-02-01** — [`8342636`](https://github.com/logos-co/logos-lips/blob/83426365f7b85619052172be4e55a5f2b9b052a0/vac/58/rln-v2.md) — Update and rename RLN-V2.md to rln-v2.md
|
||||
- **2024-01-27** — [`d7e84b4`](https://github.com/logos-co/logos-lips/blob/d7e84b4762946426e7317ec43e27dfd0738ec39b/vac/58/RLN-V2.md) — Create RLN-V2.md
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
The protocol specified in this document is an improvement of [32/RLN-V1](32/rln-v1.md),
|
||||
being more general construct, that allows to set various limits for an epoch
|
||||
(it's 1 message per epoch in [32/RLN-V1](32/rln-v1.md))
|
||||
while remaining almost as simple as it predecessor.
|
||||
Moreover, it allows to set different rate-limits
|
||||
for different RLN app users based on some public data,
|
||||
e.g. stake or reputation.
|
||||
|
||||
## Motivation
|
||||
|
||||
The main goal of this RFC is to generalize [32/RLN-V1](32/rln-v1.md) and
|
||||
expand its applications.
|
||||
There are two different subprotocols based on this protocol:
|
||||
|
||||
* RLN-Same - RLN with the same rate-limit for all users;
|
||||
* RLN-Diff - RLN that allows to set different rate-limits for different users.
|
||||
|
||||
It is important to note that by using a large epoch limit value,
|
||||
users will be able to remain anonymous,
|
||||
because their `internal_nullifiers` will not be repeated until they exceed the limit.
|
||||
|
||||
## Flow
|
||||
|
||||
As in [32/RLN-V1](32/rln-v1.md), the general flow can be described by three steps:
|
||||
|
||||
1. Registration
|
||||
2. Signaling
|
||||
3. Verification and slashing
|
||||
|
||||
The two sub-protocols have different flows, and
|
||||
hence are defined separately.
|
||||
|
||||
### Important note
|
||||
|
||||
All terms and parameters used remain the same as in [32/RLN-V1](32/rln-v1.md),
|
||||
more details [here](32/rln-v1.md#technical-overview)
|
||||
|
||||
## RLN-Same flow
|
||||
|
||||
### Registration
|
||||
|
||||
The registration process in the RLN-Same subprotocol does not differ from [32/RLN-V1](32/rln-v1.md).
|
||||
|
||||
Signalling
|
||||
|
||||
For proof generation, the user needs to submit the following fields to the circuit:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret: identity_secret_hash,
|
||||
path_elements: Merkle_proof.path_elements,
|
||||
identity_path_index: Merkle_proof.indices,
|
||||
x: signal_hash,
|
||||
message_id: message_id,
|
||||
external_nullifier: external_nullifier,
|
||||
message_limit: message_limit
|
||||
}
|
||||
```
|
||||
|
||||
Calculating output
|
||||
|
||||
The following fields are needed for proof output calculation:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret_hash: bigint,
|
||||
external_nullifier: bigint,
|
||||
message_id: bigint,
|
||||
x: bigint,
|
||||
}
|
||||
```
|
||||
|
||||
The output `[y, internal_nullifier]` is calculated in the following way:
|
||||
|
||||
```js
|
||||
a_0 = identity_secret_hash
|
||||
a_1 = poseidonHash([a0, external_nullifier, message_id])
|
||||
|
||||
y = a_0 + x * a_1
|
||||
|
||||
internal_nullifier = poseidonHash([a_1])
|
||||
```
|
||||
|
||||
## RLN-Diff flow
|
||||
|
||||
Registration
|
||||
|
||||
**id_commitment** in [32/RLN-V1](32/rln-v1.md) is equal to `poseidonHash(identity_secret)`.
|
||||
The goal of RLN-Diff is to set different rate-limits for different users.
|
||||
It follows that **id_commitment** must somehow depend
|
||||
on the `user_message_limit` parameter,
|
||||
where 0 <= `user_message_limit` <= `message_limit`.
|
||||
There are few ways to do that:
|
||||
|
||||
1. Sending `identity_secret_hash` = `poseidonHash(identity_secret, userMessageLimit)`
|
||||
and zk proof that `user_message_limit` is valid (is in the right range).
|
||||
This approach requires zkSNARK verification,
|
||||
which is an expensive operation on the blockchain.
|
||||
2. Sending the same `identity_secret_hash` as in [32/RLN-V1](32/rln-v1.md)
|
||||
(`poseidonHash(identity_secret)`) and a user_message_limit publicly to a server
|
||||
or smart-contract where
|
||||
`rate_commitment` = `poseidonHash(identity_secret_hash, userMessageLimit)` is calculated.
|
||||
The leaves in the membership Merkle tree would be the rate_commitments of the users.
|
||||
This approach requires additional hashing in the Circuit, but
|
||||
it eliminates the need for zk proof verification for the registration.
|
||||
|
||||
Both methods are correct, and the choice of the method is left to the implementer.
|
||||
It is recommended to use second method for the reasons already described.
|
||||
The following flow description will also be based on the second method.
|
||||
|
||||
Signalling
|
||||
|
||||
For proof generation, the user need to submit the following fields to the circuit:
|
||||
|
||||
```js
|
||||
{
|
||||
identity_secret: identity_secret_hash,
|
||||
path_elements: Merkle_proof.path_elements,
|
||||
identity_path_index: Merkle_proof.indices,
|
||||
x: signal_hash,
|
||||
message_id: message_id,
|
||||
external_nullifier: external_nullifier,
|
||||
user_message_limit: message_limit
|
||||
}
|
||||
```
|
||||
|
||||
Calculating output
|
||||
|
||||
The Output is calculated in the same way as the RLN-Same sub-protocol.
|
||||
|
||||
### Verification and slashing
|
||||
|
||||
Verification and slashing in both subprotocols remain the same as in [32/RLN-V1](32/rln-v1.md).
|
||||
The only difference that may arise is the `message_limit` check in RLN-Same,
|
||||
since it is now a public input of the Circuit.
|
||||
|
||||
### ZK Circuits specification
|
||||
|
||||
The design of the [32/RLN-V1](32/rln-v1.md) circuits
|
||||
is different from the circuits of this protocol.
|
||||
RLN-v2 requires additional algebraic constraints.
|
||||
The membership proof and Shamir's Secret Sharing constraints remain unchanged.
|
||||
|
||||
The ZK Circuit is implemented using a [Groth-16 ZK-SNARK](https://eprint.iacr.org/2016/260.pdf),
|
||||
using the [circomlib](https://docs.circom.io/) library.
|
||||
Both schemes contain compile-time constants/system parameters:
|
||||
|
||||
* DEPTH - depth of membership Merkle tree
|
||||
* LIMIT_BIT_SIZE - bit size of `limit` numbers,
|
||||
e.g. for the 16 - maximum `limit` number is 65535.
|
||||
|
||||
The main difference of the protocol is that instead of a new polynomial
|
||||
(a new value `a_1`) for a new epoch, a new polynomial is generated for each message.
|
||||
The user assigns an identifier to each message;
|
||||
the main requirement is that this identifier be in the range from 1 to `limit`.
|
||||
This is proven using range constraints.
|
||||
|
||||
### RLN-Same circuit
|
||||
|
||||
#### Circuit parameters
|
||||
|
||||
Public Inputs
|
||||
|
||||
* `x`
|
||||
* `external_nullifier`
|
||||
* `message_limit` - limit per epoch
|
||||
|
||||
Private Inputs
|
||||
|
||||
* `identity_secret_hash`
|
||||
* `path_elements`
|
||||
* `identity_path_index`
|
||||
* `message_id`
|
||||
|
||||
Outputs
|
||||
|
||||
* `y`
|
||||
* `root`
|
||||
* `internal_nullifier`
|
||||
|
||||
### RLN-Diff circuit
|
||||
|
||||
In the RLN-Diff scheme, instead of the public parameter `message_limit`,
|
||||
a parameter is used that is set for each user during registration (`user_message_limit`);
|
||||
the `message_id` value is compared to it in the same way
|
||||
as it is compared to `message_limit` in the case of RLN-Same.
|
||||
|
||||
Circuit parameters
|
||||
|
||||
Public Inputs
|
||||
|
||||
* `x`
|
||||
* `external_nullifier`
|
||||
|
||||
Private Inputs
|
||||
|
||||
* `identity_secret_hash`
|
||||
* `path_elements`
|
||||
* `identity_path_index`
|
||||
* `message_id`
|
||||
* `user_message_limit`
|
||||
|
||||
Outputs
|
||||
|
||||
* `y`
|
||||
* `root`
|
||||
* `internal_nullifier`
|
||||
|
||||
## Appendix A: Security considerations
|
||||
|
||||
Although there are changes in the circuits,
|
||||
this spec inherits all the security considerations of [32/RLN-V1](32/rln-v1.md).
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
|
||||
## References
|
||||
|
||||
* [1](https://zkresear.ch/t/rate-limit-nullifier-v2-circuits/102)
|
||||
* [2](https://github.com/Rate-Limiting-Nullifier/rln-circuits-v2)
|
||||
* [3](32/rln-v1.md#technical-overview)
|
||||
@@ -1,521 +0,0 @@
|
||||
# SDS
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Scalable Data Sync protocol for distributed logs |
|
||||
| Slug | 109 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Hanno Cornelius <hanno@status.im>|
|
||||
| Contributors | Akhil Peddireddy <akhil@status.im>|
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-19** — [`f24e567`](https://github.com/logos-co/logos-lips/blob/f24e567d0b1e10c178bfa0c133495fe83b969b76/docs/ift-ts/raw/sds.md) — Chore/updates mdbook (#262)
|
||||
- **2026-01-16** — [`89f2ea8`](https://github.com/logos-co/logos-lips/blob/89f2ea89fc1d69ab238b63c7e6fb9e4203fd8529/docs/ift-ts/raw/sds.md) — Chore/mdbook updates (#258)
|
||||
- **2025-12-22** — [`0f1855e`](https://github.com/logos-co/logos-lips/blob/0f1855edcf68ef982c4ce478b67d660809aa9830/docs/vac/raw/sds.md) — Chore/fix headers (#239)
|
||||
- **2025-12-22** — [`b1a5783`](https://github.com/logos-co/logos-lips/blob/b1a578393edf8487ccc97a5f25b25af9bf41efb3/docs/vac/raw/sds.md) — Chore/mdbook updates (#237)
|
||||
- **2025-12-18** — [`d03e699`](https://github.com/logos-co/logos-lips/blob/d03e699084774ebecef9c6d4662498907c5e2080/docs/vac/raw/sds.md) — ci: add mdBook configuration (#233)
|
||||
- **2025-10-24** — [`6980237`](https://github.com/logos-co/logos-lips/blob/69802377a8c1df53659ac05c7aa93543be4b3e4a/vac/raw/sds.md) — Fix Linting Errors (#204)
|
||||
- **2025-10-13** — [`171e934`](https://github.com/logos-co/logos-lips/blob/171e934d6186a5952f0458bbe42c966859fe2a31/vac/raw/sds.md) — docs: add SDS-Repair extension (#176)
|
||||
- **2025-10-02** — [`6672c5b`](https://github.com/logos-co/logos-lips/blob/6672c5bedf5a08d3045d3b7d23d2b7a2e5d3aa2f/vac/raw/sds.md) — docs: update lamport timestamps to uint64, pegged to current time (#196)
|
||||
- **2025-09-15** — [`b1da703`](https://github.com/logos-co/logos-lips/blob/b1da70386edb15303fb8aa587b8a5da784a2d644/vac/raw/sds.md) — fix: use milliseconds for Lamport timestamp initialization (#179)
|
||||
- **2025-08-22** — [`3505da6`](https://github.com/logos-co/logos-lips/blob/3505da6bd66d2830e5711deb0b5c2b4de9212a4d/vac/raw/sds.md) — sds lint fix (#177)
|
||||
- **2025-08-19** — [`536d31b`](https://github.com/logos-co/logos-lips/blob/536d31b5b7641bd451cf35b94e8de1aa8a6c9f64/vac/raw/sds.md) — docs: re-add sender ID to messages (#170)
|
||||
- **2025-03-07** — [`8ee2a6d`](https://github.com/logos-co/logos-lips/blob/8ee2a6d6b232838d83374c35e2413f84436ecf64/vac/raw/sds.md) — docs: add optional retrieval hint to causal history in sds (#130)
|
||||
- **2025-02-20** — [`235c1d5`](https://github.com/logos-co/logos-lips/blob/235c1d5aa676d8278036003d4493c7c32afc033b/vac/raw/sds.md) — docs: clarify receiving sync messages (#131)
|
||||
- **2025-02-18** — [`7182459`](https://github.com/logos-co/logos-lips/blob/718245979fd1c67d04d1eab7ea31a4aad6dbc1d2/vac/raw/sds.md) — docs: update sds sync message requirements (#129)
|
||||
- **2025-01-28** — [`7a01711`](https://github.com/logos-co/logos-lips/blob/7a01711ffc5b50186e111bc2f4fa2e4b02b26bf3/vac/raw/sds.md) — fix(sds): remove optional from causal history field in Message protobuf (#123)
|
||||
- **2024-12-17** — [`08b363d`](https://github.com/logos-co/logos-lips/blob/08b363d67e34277e7881a20e36f788978cb0f93c/vac/raw/sds.md) — Update SDS.md: Remove Errors (#115)
|
||||
- **2024-11-28** — [`bee78c4`](https://github.com/logos-co/logos-lips/blob/bee78c40b9a94ed4c40fe6ba2505b6b0654206b4/vac/raw/sds.md) — docs: add SDS protocol for scalable e2e reliability (#108)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification introduces the Scalable Data Sync (SDS) protocol
|
||||
to achieve end-to-end reliability
|
||||
when consolidating distributed logs in a decentralized manner.
|
||||
The protocol is designed for a peer-to-peer (p2p) topology
|
||||
where an append-only log is maintained by each member of a group of nodes
|
||||
who may individually append new entries to their local log at any time and
|
||||
is interested in merging new entries from other nodes in real-time or close to real-time
|
||||
while maintaining a consistent order.
|
||||
The outcome of the log consolidation procedure is
|
||||
that all nodes in the group eventually reflect in their own logs
|
||||
the same entries in the same order.
|
||||
The protocol aims to scale to very large groups.
|
||||
|
||||
## Motivation
|
||||
|
||||
A common application that fits this model is a p2p group chat (or group communication),
|
||||
where the participants act as log nodes
|
||||
and the group conversation is modelled as the consolidated logs
|
||||
maintained on each node.
|
||||
The problem of end-to-end reliability can then be stated as
|
||||
ensuring that all participants eventually see the same sequence of messages
|
||||
in the same causal order,
|
||||
despite the challenges of network latency, message loss,
|
||||
and scalability present in any communications transport layer.
|
||||
The rest of this document will assume the terminology of a group communication:
|
||||
log nodes being the _participants_ in the group chat
|
||||
and the logged entries being the _messages_ exchanged between participants.
|
||||
|
||||
## Design Assumptions
|
||||
|
||||
We make the following simplifying assumptions for a proposed reliability protocol:
|
||||
|
||||
* **Broadcast routing:**
|
||||
Messages are broadcast disseminated by the underlying transport.
|
||||
The selected transport takes care of routing messages
|
||||
to all participants of the communication.
|
||||
* **Store nodes:**
|
||||
There are high-availability caches (a.k.a. Store nodes)
|
||||
from which missed messages can be retrieved.
|
||||
These caches maintain the full history of all messages that have been broadcast.
|
||||
This is an optional element in the protocol design,
|
||||
but improves scalability by reducing direct interactions between participants.
|
||||
* **Message ID:**
|
||||
Each message has a globally unique, immutable ID (or hash).
|
||||
Messages can be requested from the high-availability caches or
|
||||
other participants using the corresponding message ID.
|
||||
* **Participant ID:**
|
||||
Each participant has a globally unique, immutable ID
|
||||
visible to other participants in the communication.
|
||||
* **Sender ID:**
|
||||
The **Participant ID** of the original sender of a message,
|
||||
often coupled with a **Message ID**.
|
||||
|
||||
## Wire protocol
|
||||
|
||||
The keywords “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”,
|
||||
“SHOULD NOT”, “RECOMMENDED”, “MAY”, and
|
||||
“OPTIONAL” in this document are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Message
|
||||
|
||||
Messages MUST adhere to the following meta structure:
|
||||
|
||||
```protobuf
|
||||
syntax = "proto3";
|
||||
|
||||
message HistoryEntry {
|
||||
string message_id = 1; // Unique identifier of the SDS message, as defined in `Message`
|
||||
optional bytes retrieval_hint = 2; // Optional information to help remote parties retrieve this SDS message; For example, A Waku deterministic message hash or routing payload hash
|
||||
|
||||
optional string sender_id = 3; // Participant ID of original message sender. Only populated if using optional SDS Repair extension
|
||||
}
|
||||
|
||||
message Message {
|
||||
string sender_id = 1; // Participant ID of the message sender
|
||||
string message_id = 2; // Unique identifier of the message
|
||||
string channel_id = 3; // Identifier of the channel to which the message belongs
|
||||
optional uint64 lamport_timestamp = 10; // Logical timestamp for causal ordering in channel
|
||||
repeated HistoryEntry causal_history = 11; // List of preceding message IDs that this message causally depends on. Generally 2 or 3 message IDs are included.
|
||||
optional bytes bloom_filter = 12; // Bloom filter representing received message IDs in channel
|
||||
|
||||
repeated HistoryEntry repair_request = 13; // Capped list of history entries missing from sender's causal history. Only populated if using the optional SDS Repair extension.
|
||||
|
||||
optional bytes content = 20; // Actual content of the message
|
||||
}
|
||||
```
|
||||
|
||||
The sending participant MUST include its own globally unique identifier in the `sender_id` field.
|
||||
In addition, it MUST include a globally unique identifier for the message in the `message_id` field,
|
||||
likely based on a message hash.
|
||||
The `channel_id` field MUST be set to the identifier of the channel of group communication
|
||||
that is being synchronized.
|
||||
For simple group communications without individual channels,
|
||||
the `channel_id` SHOULD be set to `0`.
|
||||
The `lamport_timestamp`, `causal_history` and
|
||||
`bloom_filter` fields MUST be set according to the [protocol steps](#protocol-steps)
|
||||
set out below.
|
||||
These fields MAY be left unset in the case of [ephemeral messages](#ephemeral-messages).
|
||||
The message `content` MAY be left empty for [periodic sync messages](#periodic-sync-message),
|
||||
otherwise it MUST contain the application-level content
|
||||
|
||||
> **_Note:_** Close readers may notice that,
|
||||
outside of filtering messages originating from the sender itself,
|
||||
the `sender_id` field is not used for much.
|
||||
Its importance is expected to increase once a p2p retrieval mechanism is added to SDS,
|
||||
as is planned for the protocol.
|
||||
|
||||
### Participant state
|
||||
|
||||
Each participant MUST maintain:
|
||||
|
||||
* A Lamport timestamp for each channel of communication,
|
||||
initialized to current epoch time in millisecond resolution.
|
||||
The Lamport timestamp is increased as described in the [protocol steps](#protocol-steps)
|
||||
to maintain a logical ordering of events while staying close to the current epoch time.
|
||||
This allows the messages from new joiners to be correctly ordered with other recent messages,
|
||||
without these new participants first having to synchronize past messages to discover the current Lamport timestamp.
|
||||
* A bloom filter for received message IDs per channel.
|
||||
The bloom filter SHOULD be rolled over and
|
||||
recomputed once it reaches a predefined capacity of message IDs.
|
||||
Furthermore,
|
||||
it SHOULD be designed to minimize false positives through an optimal selection of
|
||||
size and hash functions.
|
||||
* A buffer for unacknowledged outgoing messages
|
||||
* A buffer for incoming messages with unmet causal dependencies
|
||||
* A local log (or history) for each channel,
|
||||
containing all message IDs in the communication channel,
|
||||
ordered by Lamport timestamp.
|
||||
|
||||
Messages in the unacknowledged outgoing buffer can be in one of three states:
|
||||
|
||||
1. **Unacknowledged** - there has been no acknowledgement of message receipt
|
||||
by any participant in the channel
|
||||
2. **Possibly acknowledged** - there has been ambiguous indication that the message
|
||||
has been _possibly_ received by at least one participant in the channel
|
||||
3. **Acknowledged** - there has been sufficient indication that the message
|
||||
has been received by at least some of the participants in the channel.
|
||||
This state will also remove the message from the outgoing buffer.
|
||||
|
||||
### Protocol Steps
|
||||
|
||||
For each channel of communication,
|
||||
participants MUST follow these protocol steps to populate and interpret
|
||||
the `lamport_timestamp`, `causal_history` and `bloom_filter` fields.
|
||||
|
||||
#### Send Message
|
||||
|
||||
Before broadcasting a message:
|
||||
|
||||
* the participant MUST set its local Lamport timestamp
|
||||
to the maximum between the current value + `1`
|
||||
and the current epoch time in milliseconds.
|
||||
In other words the local Lamport timestamp is set to `max(timeNowInMs, current_lamport_timestamp + 1)`.
|
||||
* the participant MUST include the increased Lamport timestamp in the message's `lamport_timestamp` field.
|
||||
* the participant MUST determine the preceding few message IDs in the local history
|
||||
and include these in an ordered list in the `causal_history` field.
|
||||
The number of message IDs to include in the `causal_history` depends on the application.
|
||||
We recommend a causal history of two message IDs.
|
||||
* the participant MAY include a `retrieval_hint` in the `HistoryEntry`
|
||||
for each message ID in the `causal_history` field.
|
||||
This is an application-specific field to facilitate retrieval of messages,
|
||||
e.g. from high-availability caches.
|
||||
* the participant MUST include the current `bloom_filter`
|
||||
state in the broadcast message.
|
||||
|
||||
After broadcasting a message,
|
||||
the message MUST be added to the participant’s buffer
|
||||
of unacknowledged outgoing messages.
|
||||
|
||||
#### Receive Message
|
||||
|
||||
Upon receiving a message,
|
||||
|
||||
* the participant SHOULD ignore the message if it has a `sender_id` matching its own.
|
||||
* the participant MAY deduplicate the message by comparing its `message_id` to previously received message IDs.
|
||||
* the participant MUST [review the ACK status](#review-ack-status) of messages
|
||||
in its unacknowledged outgoing buffer
|
||||
using the received message's causal history and bloom filter.
|
||||
* if the message has a populated `content` field,
|
||||
the participant MUST include the received message ID in its local bloom filter.
|
||||
* the participant MUST verify that all causal dependencies are met
|
||||
for the received message.
|
||||
Dependencies are met if the message IDs in the `causal_history` of the received message
|
||||
appear in the local history of the receiving participant.
|
||||
|
||||
If all dependencies are met and the message has a populated `content` field,
|
||||
the participant MUST [deliver the message](#deliver-message).
|
||||
If dependencies are unmet,
|
||||
the participant MUST add the message to the incoming buffer of messages
|
||||
with unmet causal dependencies.
|
||||
|
||||
#### Deliver Message
|
||||
|
||||
Triggered by the [Receive Message](#receive-message) procedure.
|
||||
|
||||
If the received message’s Lamport timestamp is greater than the participant's
|
||||
local Lamport timestamp,
|
||||
the participant MUST update its local Lamport timestamp to match the received message.
|
||||
The participant MUST insert the message ID into its local log,
|
||||
based on Lamport timestamp.
|
||||
If one or more message IDs with the same Lamport timestamp already exists,
|
||||
the participant MUST follow the [Resolve Conflicts](#resolve-conflicts) procedure.
|
||||
|
||||
#### Resolve Conflicts
|
||||
|
||||
Triggered by the [Deliver Message](#deliver-message) procedure.
|
||||
|
||||
The participant MUST order messages with the same Lamport timestamp
|
||||
in ascending order of message ID.
|
||||
If the message ID is implemented as a hash of the message,
|
||||
this means the message with the lowest hash would precede
|
||||
other messages with the same Lamport timestamp in the local log.
|
||||
|
||||
#### Review ACK Status
|
||||
|
||||
Triggered by the [Receive Message](#receive-message) procedure.
|
||||
|
||||
For each message in the unacknowledged outgoing buffer,
|
||||
based on the received `bloom_filter` and `causal_history`:
|
||||
|
||||
* the participant MUST mark all messages in the received `causal_history` as **acknowledged**.
|
||||
* the participant MUST mark all messages included in the `bloom_filter`
|
||||
as **possibly acknowledged**.
|
||||
If a message appears as **possibly acknowledged** in multiple received bloom filters,
|
||||
the participant MAY mark it as acknowledged based on probabilistic grounds,
|
||||
taking into account the bloom filter size and hash number.
|
||||
|
||||
#### Periodic Incoming Buffer Sweep
|
||||
|
||||
The participant MUST periodically check causal dependencies for each message
|
||||
in the incoming buffer.
|
||||
For each message in the incoming buffer:
|
||||
|
||||
* the participant MAY attempt to retrieve missing dependencies from the Store node
|
||||
(high-availability cache) or other peers.
|
||||
It MAY use the application-specific `retrieval_hint` in the `HistoryEntry` to facilitate retrieval.
|
||||
* if all dependencies of a message are met,
|
||||
the participant MUST proceed to [deliver the message](#deliver-message).
|
||||
|
||||
If a message's causal dependencies have failed to be met
|
||||
after a predetermined amount of time,
|
||||
the participant MAY mark them as **irretrievably lost**.
|
||||
|
||||
#### Periodic Outgoing Buffer Sweep
|
||||
|
||||
The participant MUST rebroadcast **unacknowledged** outgoing messages
|
||||
after a set period.
|
||||
The participant SHOULD use distinct resend periods for **unacknowledged** and
|
||||
**possibly acknowledged** messages,
|
||||
prioritizing **unacknowledged** messages.
|
||||
|
||||
#### Periodic Sync Message
|
||||
|
||||
For each channel of communication,
|
||||
participants SHOULD periodically send sync messages to maintain state.
|
||||
These sync messages:
|
||||
|
||||
* MUST be sent with empty content
|
||||
* MUST include a Lamport timestamp increased to `max(timeNowInMs, current_lamport_timestamp + 1)`,
|
||||
where `timeNowInMs` is the current epoch time in milliseconds.
|
||||
* MUST include causal history and bloom filter according to regular message rules
|
||||
* MUST NOT be added to the unacknowledged outgoing buffer
|
||||
* MUST NOT be included in causal histories of subsequent messages
|
||||
* MUST NOT be included in bloom filters
|
||||
* MUST NOT be added to the local log
|
||||
|
||||
Since sync messages are not persisted,
|
||||
they MAY have non-unique message IDs without impacting the protocol.
|
||||
To avoid network activity bursts in large groups,
|
||||
a participant MAY choose to only send periodic sync messages
|
||||
if no other messages have been broadcast in the channel after a random backoff period.
|
||||
|
||||
Participants MUST process the causal history and bloom filter of these sync messages
|
||||
following the same steps as regular messages,
|
||||
but MUST NOT persist the sync messages themselves.
|
||||
|
||||
#### Ephemeral Messages
|
||||
|
||||
Participants MAY choose to send short-lived messages for which no synchronization
|
||||
or reliability is required.
|
||||
These messages are termed _ephemeral_.
|
||||
|
||||
Ephemeral messages SHOULD be sent with `lamport_timestamp`, `causal_history`, and
|
||||
`bloom_filter` unset.
|
||||
Ephemeral messages SHOULD NOT be added to the unacknowledged outgoing buffer
|
||||
after broadcast.
|
||||
Upon reception,
|
||||
ephemeral messages SHOULD be delivered immediately without buffering for causal dependencies
|
||||
or including in the local log.
|
||||
|
||||
### SDS Repair (SDS-R)
|
||||
|
||||
SDS Repair (SDS-R) is an optional extension module for SDS,
|
||||
allowing participants in a communication to collectively repair any gaps in causal history (missing messages)
|
||||
preferably over a limited time window.
|
||||
Since SDS-R acts as coordinated rebroadcasting of missing messages,
|
||||
which involves all participants of the communication,
|
||||
it is most appropriate in a limited use case for repairing relatively recent missed dependencies.
|
||||
It is not meant to replace mechanisms for long-term consistency,
|
||||
such as peer-to-peer syncing or the use of a high-availability centralised cache (Store node).
|
||||
|
||||
#### SDS-R message fields
|
||||
|
||||
SDS-R adds the following fields to SDS messages:
|
||||
|
||||
* `sender_id` in `HistoryEntry`:
|
||||
the original message sender's participant ID.
|
||||
This is used to determine the group of participants who will respond to a repair request.
|
||||
* `repair_request` in `Message`:
|
||||
a capped list of history entries missing for the message sender
|
||||
and for which it's requesting a repair.
|
||||
|
||||
#### SDS-R participant state
|
||||
|
||||
SDS-R adds the following to each participant state:
|
||||
|
||||
* Outgoing **repair request buffer**:
|
||||
a list of locally missing `HistoryEntry`s
|
||||
each mapped to a future request timestamp, `T_req`,
|
||||
after which this participant will request a repair if at that point the missing dependency has not been repaired yet.
|
||||
`T_req` is computed as a pseudorandom backoff from the timestamp when the dependency was detected missing.
|
||||
[Determining `T_req`](#determine-t_req) is described below.
|
||||
We RECOMMEND that the outgoing repair request buffer be chronologically ordered in ascending order of `T_req`.
|
||||
|
||||
* Incoming **repair request buffer**:
|
||||
a list of locally available `HistoryEntry`s
|
||||
that were requested for repair by a remote participant
|
||||
AND for which this participant might be an eligible responder,
|
||||
each mapped to a future response timestamp, `T_resp`,
|
||||
after which this participant will rebroadcast the corresponding requested `Message` if at that point no other participant had rebroadcast the `Message`.
|
||||
`T_resp` is computed as a pseudorandom backoff from the timestamp when the repair was first requested.
|
||||
[Determining `T_resp`](#determine-t_resp) is described below.
|
||||
We describe below how a participant can [determine if they're an eligible responder](#determine-response-group) for a specific repair request.
|
||||
|
||||
* Augmented local history log:
|
||||
for each message ID kept in the local log for which the participant could be a repair responder,
|
||||
the full SDS `Message` must be cached rather than just the message ID,
|
||||
in case this participant is called upon to rebroadcast the message.
|
||||
We describe below how a participant can [determine if they're an eligible responder](#determine-response-group) for a specific message.
|
||||
|
||||
**_Note:_** The required state can likely be significantly reduced in future by simply requiring that a responding participant should _reconstruct_ the original `Message` when rebroadcasting, rather than the simpler, but heavier,
|
||||
requirement of caching the entire received `Message` content in local history.
|
||||
|
||||
#### SDS-R global state
|
||||
|
||||
For a specific channel (that is, within a specific SDS-controlled communication)
|
||||
the following SDS-R configuration state SHOULD be common for all participants in the conversation:
|
||||
|
||||
* `T_min`: the _minimum_ time period to wait before a missing causal entry can be repaired.
|
||||
We RECOMMEND a value of at least 30 seconds.
|
||||
* `T_max`: the _maximum_ time period over which missing causal entries can be repaired.
|
||||
We RECOMMEND a value of between 120 and 600 seconds.
|
||||
|
||||
Furthermore, to avoid a broadcast storm with multiple participants responding to a repair request,
|
||||
participants in a single channel MAY be divided into discrete response groups.
|
||||
Participants will only respond to a repair request if they are in the response group for that request.
|
||||
The global `num_response_groups` variable configures the number of response groups for this communication.
|
||||
Its use is described below.
|
||||
A reasonable default value for `num_response_groups` is one response group for every `128` participants.
|
||||
In other words, if the (roughly) expected number of participants is expressed as `num_participants`, then
|
||||
`num_response_groups = num_participants div 128 + 1`.
|
||||
In other words, if there are fewer than 128 participants in a communication,
|
||||
they will all belong to the same response group.
|
||||
|
||||
We RECOMMEND that the global state variables `T_min`, `T_max` and `num_response_groups`
|
||||
be set _statically_ for a specific SDS-R application,
|
||||
based on expected number of group participants and volume of traffic.
|
||||
|
||||
**_Note:_** Future versions of this protocol will recommend dynamic global SDS-R variables,
|
||||
based on the current number of participants.
|
||||
|
||||
#### SDS-R send message
|
||||
|
||||
SDS-R adds the following steps when sending a message:
|
||||
|
||||
Before broadcasting a message,
|
||||
|
||||
* the participant SHOULD populate the `repair_request` field in the message
|
||||
with _eligible_ entries from the outgoing repair request buffer.
|
||||
An entry is eligible to be included in a `repair_request`
|
||||
if its corresponding request timestamp, `T_req`, has expired (in other words,
|
||||
`T_req <= current_time`).
|
||||
The maximum number of repair request entries to include is up to the application.
|
||||
We RECOMMEND that this quota be filled by the eligible entries from the outgoing repair request buffer with the lowest `T_req`.
|
||||
We RECOMMEND a maximum of 3 entries.
|
||||
If there are no eligible entries in the buffer,
|
||||
this optional field MUST be left unset.
|
||||
|
||||
#### SDS-R receive message
|
||||
|
||||
On receiving a message,
|
||||
|
||||
* the participant MUST remove entries matching the received message ID from its _outgoing_ repair request buffer.
|
||||
This ensures that the participant does not request repairs for dependencies that have now been met.
|
||||
* the participant MUST remove entries matching the received message ID from its _incoming_ repair request buffer.
|
||||
This ensures that the participant does not respond to repair requests that another participant has already responded to.
|
||||
* the participant SHOULD add any unmet causal dependencies to its outgoing repair request buffer against a unique `T_req` timestamp for that entry.
|
||||
It MUST compute the `T_req` for each such HistoryEntry according to the steps outlined in [_Determine T_req_](#determine-t_req).
|
||||
* for each item in the `repair_request` field:
|
||||
* the participant MUST remove entries matching the repair message ID from its own outgoing repair request buffer.
|
||||
This limits the number of participants that will request a common missing dependency.
|
||||
* if the participant has the requested `Message` in its local history _and_ is an eligible responder for the repair request,
|
||||
it SHOULD add the request to its incoming repair request buffer against a unique `T_resp` timestamp for that entry.
|
||||
It MUST compute the `T_resp` for each such repair request according to the steps outlined in [_Determine T_resp_](#determine-t_resp).
|
||||
It MUST determine if it's an eligible responder for a repair request according to the steps outlined in [_Determine response group_](#determine-response-group).
|
||||
|
||||
#### Determine T_req
|
||||
|
||||
A participant determines the repair request timestamp, `T_req`,
|
||||
for a missing `HistoryEntry` as follows:
|
||||
|
||||
```text
|
||||
T_req = current_time + hash(participant_id, message_id) % (T_max - T_min) + T_min
|
||||
```
|
||||
|
||||
where `current_time` is the current timestamp,
|
||||
`participant_id` is the participant's _own_ participant ID
|
||||
(not the `sender_id` in the missing `HistoryEntry`),
|
||||
`message_id` is the missing `HistoryEntry`'s message ID,
|
||||
and `T_min` and `T_max` are as set out in [SDS-R global state](#sds-r-global-state).
|
||||
|
||||
This allows `T_req` to be pseudorandomly and linearly distributed as a backoff of between `T_min` and `T_max` from current time.
|
||||
|
||||
> **_Note:_** placing `T_req` values on an exponential backoff curve will likely be more appropriate and is left for a future improvement.
|
||||
|
||||
#### Determine T_resp
|
||||
|
||||
A participant determines the repair response timestamp, `T_resp`,
|
||||
for a `HistoryEntry` that it could repair as follows:
|
||||
|
||||
```text
|
||||
distance = hash(participant_id) XOR hash(sender_id)
|
||||
T_resp = current_time + distance*hash(message_id) % T_max
|
||||
```
|
||||
|
||||
where `current_time` is the current timestamp,
|
||||
`participant_id` is the participant's _own_ (local) participant ID,
|
||||
`sender_id` is the requested `HistoryEntry` sender ID,
|
||||
`message_id` is the requested `HistoryEntry` message ID,
|
||||
and `T_max` is as set out in [SDS-R global state](#sds-r-global-state).
|
||||
|
||||
We first calculate the logical `distance` between the local `participant_id` and
|
||||
the original `sender_id`.
|
||||
If this participant is the original sender, the `distance` will be `0`.
|
||||
It should then be clear that the original participant will have a response backoff time of `0`,
|
||||
making it the most likely responder.
|
||||
The `T_resp` values for other eligible participants will be pseudorandomly and
|
||||
linearly distributed as a backoff of up to `T_max` from current time.
|
||||
|
||||
> **_Note:_** placing `T_resp` values on an exponential backoff curve will likely be more appropriate and
|
||||
is left for a future improvement.
|
||||
|
||||
#### Determine response group
|
||||
|
||||
Given a message with `sender_id` and `message_id`,
|
||||
a participant with `participant_id` is in the response group for that message if
|
||||
|
||||
```text
|
||||
hash(participant_id, message_id) % num_response_groups == hash(sender_id, message_id) % num_response_groups
|
||||
```
|
||||
|
||||
where `num_response_groups` is as set out in [SDS-R global state](#sds-r-global-state).
|
||||
This ensures that a participant will always be in the response group for its own published messages.
|
||||
It also allows participants to determine immediately on first reception of a message or
|
||||
a history entry if they are in the associated response group.
|
||||
|
||||
#### SDS-R incoming repair request buffer sweep
|
||||
|
||||
An SDS-R participant MUST periodically check if there are any incoming requests in the **incoming** repair request buffer* that is due for a response.
|
||||
For each item in the buffer,
|
||||
the participant SHOULD broadcast the corresponding `Message` from local history
|
||||
if its corresponding response timestamp, `T_resp`, has expired
|
||||
(in other words, `T_resp <= current_time`).
|
||||
|
||||
#### SDS-R Periodic Sync Message
|
||||
|
||||
If the participant is due to send a periodic sync message,
|
||||
it SHOULD send the message according to [SDS-R send message](#sds-r-send-message)
|
||||
if there are any eligible items in the outgoing repair request buffer,
|
||||
regardless of whether other participants have also recently broadcast a Periodic Sync message.
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||
@@ -1,415 +0,0 @@
|
||||
# STATUS-RLN-DEPLOYMENT
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | RLN deployment to the Status network for gasless L2 |
|
||||
| Slug | 156 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Ugur Sen [ugur@status.im](mailto:ugur@status.im) |
|
||||
| Contributors | Sylvain [sylvain@status.im](mailto:sylvain@status.im) |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-04-01** — [`b042c3e`](https://github.com/logos-co/logos-lips/blob/b042c3eb64faa0202b3c80d8726d57aacf8ec82c/docs/ift-ts/raw/status-rln-deployment.md) — docs: Initial PR for SN RLN deployment RFC (#286)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies the Status L2 RLN deployment
|
||||
architecture for enabling gasless transactions with
|
||||
built-in spam resistance based on the RLN-V2 protocol.
|
||||
The specification defines system roles, on-chain and off-chain components,
|
||||
and the end-to-end transaction flow across users, smart contracts, Layer2 services,
|
||||
prover and verifier modules, and decentralized slashers.
|
||||
It describes Karma-based tier management, RLN membership registration,
|
||||
RLN proof generation and verification, deny-list enforcement, gas-aware message accounting,
|
||||
and decentralized slashing.
|
||||
The document further outlines storage requirements and synchronization mechanisms
|
||||
between on-chain events and off-chain state, providing a cohesive framework
|
||||
for scalable and abuse-resistant transaction processing on Status L2.
|
||||
|
||||
The architecture separates cryptographic soundness from operational deployment.
|
||||
While operational components may be centralized in this version,
|
||||
cryptographic enforcement of rate limits and slashing remains verifiable and non-custodial.
|
||||
|
||||
## Roles
|
||||
|
||||
Status L2 RLN deployment consists of six roles: `user`, `Karma contract`,
|
||||
`RLN contract`, `Layer2`, and `Linea ecosystem`, `Slashers`.
|
||||
|
||||
- `user`: Uses the Status L2 in a gasless manner who MAY pay premium gas for the transaction (TX).
|
||||
- `Karma contract`: The contract maintains user karma balances, enforces karma slashing, and manages updateable tier limits.
|
||||
- `RLN contract`: The contract that stores the RLN memberships.
|
||||
- `Layer2`: Trusted components that are operated by Status L2 team.
|
||||
- `Linea ecosystem:` : Linea L2 components
|
||||
- `Slashers` : External identities responsible for tracking RLN proofs
|
||||
and related metadata in order to identify spam and trigger slashing when applicable.
|
||||
|
||||
## General Flow
|
||||
|
||||
- `User` creates a TX send it to the network
|
||||
- The RPC node submits the TX to the mempool, where it is forwarded to the Prover via gRPC,
|
||||
before P2P propagation to the sequencer.
|
||||
- `Prover module` bootstraps by querying the `Karma contract` for current user tier limits,
|
||||
then listens to events and updates the local tier limit table upon any changes.
|
||||
- `Prover module` checks user has enough Karma `minK` (which equals the `minKarma` of the first tier)
|
||||
for registration, if so prover module registers RLN membership on behalf of user, otherwise skips registration.
|
||||
- Prover creates the RLN proof using the [Zerokit](https://github.com/vacp2p/zerokit) backend `prover module`
|
||||
if user is registered and is in Tier limit and stores in a database.
|
||||
If registered user exceeds the tier limit, the user needs to pay premium gas. Then prover streams the proofs and metadata via gRPC.
|
||||
- RLN [verifier module](#2-verifier-module) fetches the RLN proofs and metadata from
|
||||
prover module and try to find of RLN proof for each submitted Transaction (TX).
|
||||
The Sequencer forwards the transaction to the mempool if the RLN verifier module
|
||||
has not detected spam and the transaction is accompanied by a valid RLN proof.
|
||||
- In parallel with the sequencer’s operation, `slashers` independently subscribe
|
||||
to and fetch RLN proofs from the prover and monitor them for spam behavior.
|
||||
Upon detecting spam, `slashers` extract corresponding `secret-hash` from proofs
|
||||
and submit it to the `RLN contract` . As result, the spammer’s RLN membership is revoked on-chain
|
||||
by removing the registration, and the prover module updates its local state accordingly
|
||||
by removing the user based on the emitted slashing event.
|
||||
Finally, spammers Karma amount is mapped to the `minK`-1.
|
||||
|
||||
The tier table is as follows:
|
||||
|
||||
| Tier | Daily Quota (Tier limits) | Equivalent Rate | Karma Range |
|
||||
| --- | --- | --- | --- |
|
||||
| Entry | 1 tx/day | 1 tx every 24 hours | 0-1 |
|
||||
| Newbie | 5 tx/day | 1 tx every ~5 hours | 2-49 |
|
||||
| Basic | 15 tx/day | 1 tx every 90 minutes | 50-499 |
|
||||
| Active | 96 tx/day | 1 tx every 15 minutes | 500-4999 |
|
||||
| Regular | 480 tx/day | 1 tx every 3 minutes | 5000-19999 |
|
||||
| Power User | 960 tx/day | 1 tx every 90 seconds | 20000-99999 |
|
||||
| Pro User | 10080 tx/day | 1 tx every 9 seconds | 100000-499999 |
|
||||
| High-Throughput | 108000 tx/day | 1 TPS | 500000-4999999 |
|
||||
| S-Tier | 240000 tx/day | 5 TPS | 5000000-9999999 |
|
||||
| Legendary | 480000 tx/day | 10 TPS | 10000000-∞ |
|
||||
|
||||
## 1. Prover module
|
||||
|
||||
Prover module is a stand-alone gRPC service module
|
||||
that is mainly responsible for three functionality,
|
||||
Karma service, RLN registration, creating RLN proofs.
|
||||
This module is operated by `Layer2`.
|
||||
|
||||
### 1.1. Karma service
|
||||
|
||||
Prover module requires to amounts of Karma
|
||||
of users to manage tier levels.
|
||||
To this Karma service has two functionality with querying `Karma contract`,
|
||||
|
||||
- Get amount of Karma for a user
|
||||
- Get Tier limits
|
||||
|
||||
Karma service query is triggered if the user has no more free TX right,
|
||||
in case the user can move to higher tier without user interaction.
|
||||
Otherwise, if the `user` has enough free TX, we don’t update `user`'s tier.
|
||||
|
||||
Tier proto file for Tier Query info:
|
||||
|
||||
```protobuf
|
||||
message GetUserTierInfoRequest {
|
||||
Address user = 1;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message GetUserTierInfoReply {
|
||||
oneof resp {
|
||||
UserTierInfoResult res = 1;
|
||||
UserTierInfoError error = 2;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message UserTierInfoResult {
|
||||
sint64 current_epoch = 1;
|
||||
sint64 current_epoch_slice = 2;
|
||||
uint64 tx_count = 3;
|
||||
optional Tier tier = 4;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message Tier {
|
||||
string name = 1;
|
||||
uint64 quota = 2;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message UserTierInfoError {
|
||||
string message = 1;
|
||||
}
|
||||
```
|
||||
|
||||
### 1.2. RLN registration
|
||||
|
||||
Prover module MUST register the users who has at least `minK` Karma
|
||||
to the RLN contract for corresponding global rate limit `rateR` automatically,
|
||||
where `minK` and `rateR` are fixed for every user.
|
||||
After setting this values, the registration as follows:
|
||||
|
||||
- Creates the `id-commitment` based on `rateR` on behalf of `user`.
|
||||
- Sends the `id-commitment` to the RLN contract without Karma stake.
|
||||
- Receive and stores the membership proof information such as leaf index
|
||||
from the RLN contract in `registeredUsers` list.
|
||||
|
||||
Finally, `registeredUsers` consists of as follows:
|
||||
|
||||
- User address: `0xabc...`
|
||||
- User `treeInfo`: (`treePath`,`treeIndex`) since `id-commitment` are stored in multiple tree in DB.
|
||||
|
||||
With the registration, user allows to use free gas transaction within its Tier
|
||||
|
||||
```protobuf
|
||||
enum RegistrationStatus {
|
||||
Success = 0;
|
||||
Failure = 1;
|
||||
AlreadyRegistered = 2;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message RegisterUserReply {
|
||||
RegistrationStatus status = 1;
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3. Proof generation
|
||||
|
||||
Prover module MUST create `RLNproof` for user who is in `registeredUsers` table,
|
||||
upon a TX as shown in previous step for a gasless TX.
|
||||
For `RLNproof` generation for the TX done by Prover module as follows:
|
||||
|
||||
- Receive the TX from the RPC node asynchronously, user is the owner of the TX
|
||||
- Checks the user is indeed in `registeredUsers`
|
||||
- Creates RLN proof on TX by using Zerokit with checking membership information `treeInfo` in `registeredUsers`
|
||||
then streams the proof for a specific epoch.
|
||||
- Serializes then streams RLN proofs via gRPC.
|
||||
- Outputs `RLNProof` metadata named `proof_value` contains `y` and `internal_nullifier` value
|
||||
see the [RLN specification](https://rfc.vac.dev/vac/raw/rln-v2/) for details.
|
||||
|
||||
```protobuf
|
||||
message RlnProofFilter {
|
||||
optional string address = 1;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message RlnProofReply {
|
||||
oneof resp {
|
||||
RlnProof proof = 1;
|
||||
RlnProofError error = 2;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message RlnProof {
|
||||
bytes sender = 1;
|
||||
bytes tx_hash = 2;
|
||||
bytes proof = 3;
|
||||
}
|
||||
```
|
||||
|
||||
```protobuf
|
||||
message RlnProofError {
|
||||
string error = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Note that the prover module always creates an RLN proof upon user request,
|
||||
regardless of whether the user exceeds tier or RLN limits.
|
||||
Enforcement of tier limits is performed via deny-list interactions,
|
||||
while RLN limits are enforced by revealing the `secret-hash` extracted
|
||||
from spam proofs and submitting it to the `RLN contract`.
|
||||
|
||||
### 1.4. Storage
|
||||
|
||||
RLN proofs are stored in a persistent database (DB) with other informations as follows:
|
||||
|
||||
- **table “user”**: Stores the `RlnUserIdentity` which consists of three field elements: `id-commitment`, `secret-hash` and `rateR`.
|
||||
- key = Serialized(`User address`)
|
||||
- value = Serialized (`RlnUserIdentity` , `TreeIndex` , `IndexInMerkleTree`)
|
||||
|
||||
Since `RlnUserIdentity` are stored in multiple merkle tree, prover locates them with `TreeIndex` and `IndexInMerkleTree.`
|
||||
|
||||
- table “idx”:
|
||||
- there is only 1 key = “COUNT” and value = “Number of Merkle tree”
|
||||
- table “tx_counter”:
|
||||
- key = Serialized (User adress)
|
||||
- value = Serialized(EpochCounters structure) = Serialized(~ `Epoch`, `tx_counter`)
|
||||
- table “tier_limits”:
|
||||
- **Key** = Only 2 keys `CURRENT` ‖ `NEXT`
|
||||
- **Value** = Serialized `Tier Limit list`
|
||||
|
||||
### 1.5. Tier List Management
|
||||
|
||||
Tiers list are stored on-chain in `Karma contract` and this is a dynamic list
|
||||
that is adjusted by Status L2 team according to the inflation of Karma bound.
|
||||
This section specifies the changes that initiates by `Karma contract` then affects prover module.
|
||||
Each update starts by invoking the tier list in `Karma contract` with some requirements as follows:
|
||||
|
||||
- Each updates MUST be contiguous which means no gap or overlap between different tiers.
|
||||
Other saying, the intersection of two sequential tiers’ maxKarma and minKarma range should be distinct,
|
||||
where minKarma and MaxKarma values are the local values for each karma tier
|
||||
unlike `minK` is the minimum Karma amount for user can use gasless transaction.
|
||||
- For a tier, minKarma MUST less than maxKarma.
|
||||
- First tier’s minKarma MUST be equal to `minK`
|
||||
|
||||
```solidity
|
||||
struct Tier {
|
||||
uint256 minKarma;
|
||||
uint256 maxKarma;
|
||||
string name;
|
||||
uint32 txPerEpoch;
|
||||
}
|
||||
```
|
||||
|
||||
Updating tiers phase starts with updating tier list in `Karma contract`
|
||||
which is static writing the new tiers that MAY be change the number of tiers and their bounds.
|
||||
Then, the check method in `Karma contract` checks three requirements above.
|
||||
|
||||
As the second phase, prover module listen for a specific event then fetch
|
||||
the new tier list from `Karma contract` and update the local list.
|
||||
Note that, updating the contract require a delay till updating the local tier table of prover.
|
||||
|
||||
### 1.6 Gas Checking
|
||||
|
||||
Prover is also responsible for checking that the gas requirements of TXs are at the limit
|
||||
since the RLN protects the network in terms of number of TXs not the total gas consumptions.
|
||||
When a TX is submitted to the prover, it has a field named: `estimated_gas_used` (type uint64 - unit gas unit e.g. not in wei).
|
||||
|
||||
For now, prover has TX and its gas estimation , namely `currentGas`.
|
||||
Prover checks that `gasQuota` cannot be larger than `currentGas` for a single proof.
|
||||
If `currentGas` is equal or lower than the `gasQuota`,
|
||||
the prover continues with the [proof generation section](#13-proof-generation).
|
||||
|
||||
Otherwise, prover calculates the `txCounterIncrease` as the number of ceil (==`currentGas`/`gasQuota`),
|
||||
expecting a value greater than 2 since the `currentGas` > `gasQuota`.
|
||||
Then the prover creates a proof burns `txCounterIncrease` many message allocation
|
||||
for the TX due to the [multi-message_id burn RLN](https://lip.logos.co/ift-ts/raw/multi-message_id-burn-rln.html).
|
||||
|
||||
## 2. Verifier Module
|
||||
|
||||
The verifier module is composed of an identity operating
|
||||
in the sequencer environment together the decentralized external slashers.
|
||||
Also verifier module manually conducts the slashing by invoking the `Karma contract`
|
||||
with authorized callers as owner in case spamming.
|
||||
Prover module outputs `RLNproof` with proof metadata named `proof_values`
|
||||
that includes `y` and `internal_nullifier` value.
|
||||
The verifier module MUST record and store all `internal_nullifier` to use them for detecting spam.
|
||||
|
||||
The detection of spam is requiring the `internal_nullifier` in an epoch.
|
||||
In this case, when verifier module detects the recurring them,
|
||||
verifier module MUST extracts the `secret-hash` from two different message with same `message_id`
|
||||
(see [RLN Specification](https://lip.logos.co/ift-ts/raw/rln-v2.html)), and invoke the `Karma contract`
|
||||
for slashing which maps user’s Karma to `MinK-1` then adds the `user` to `denylist`.
|
||||
|
||||
Note that, Zerokit contains [a function named comput_id_secret](https://github.com/vacp2p/zerokit/blob/master/rln/src/protocol.rs#L526)
|
||||
for extracting the secret-hash for a given two recurring `internal_nullifier`.
|
||||
|
||||
## 3. Smart Contracts
|
||||
|
||||
There are two contracts as `Karma contract` and `RLN contract` that former regulates
|
||||
the tier management and slashing in case spam and later holds the RLN membership tree.
|
||||
Prover is listening slashing event so that update its state by removing the slashed user
|
||||
as spammer from the local DB. Prover also listening the tier-limits from `karma contract` to update local tier limits.
|
||||
|
||||
`Karma contract`:
|
||||
|
||||
- Modified ERC20 contract without transfer option.
|
||||
- Can be queried to get any user’s Karma balance.
|
||||
- Stored updatable tier table that shows min and max Karma that prover module fetches this information.
|
||||
|
||||
`RLN contract`:
|
||||
|
||||
- Stores the RLN membership tree that consists of `id-commitment`
|
||||
- Does not store stake since Karma is non-transferable
|
||||
- Contains the slashing function as mentioned in [Decentralized Slashing](#5-decentralized-slashing) section which takes a `secret-hash` and get reward
|
||||
for invoker also spammers `id-commitment` is dropped off from contract and prover.
|
||||
|
||||
## 4. Deny List
|
||||
|
||||
Deny list behaves a black list for a `user` who act maliciously in two ways:
|
||||
|
||||
1. Exceeds the tier limit and still trying to use gasless TX.
|
||||
The prover module marked as this user as in deny list but still continue to create the RLN proof for the TX.
|
||||
2. Exceeds the global rate limit `rateR` that results with slashing by mapping user’s Karma to `MinK-1.`
|
||||
|
||||
The `user` who is on the deny list MUST NOT be able to submit gasless transactions.
|
||||
A user can regain access to gasless transactions only after being removed from the deny list.
|
||||
Escaping from the deny list is possible under the following conditions
|
||||
|
||||
- **TTL expiration:** Deny list entries MAY be configured with an expiration time.
|
||||
If a deny list participation is not intended to be permanent, the entry is assigned a predefined time window.
|
||||
Upon expiration of this period, the user address is automatically considered removed from the deny list.
|
||||
The sequencer is responsible for checking expiration timestamps and removing expired user addresses from the deny list accordingly.
|
||||
- **Explicit removal:** In this type removel of deny list occurs in two cases: **(i)** when a user submits a transaction
|
||||
with a gas price exceeding the configured premium gas threshold, in which case the sequencer removes the user from the deny list,
|
||||
and **(ii)** through manual deletion performed by the Layer2 operator.
|
||||
|
||||
## 5. Decentralized Slashing
|
||||
|
||||
Decentralized slashing is a capability provided by specialized nodes,
|
||||
called `slashers`, which operate alongside sequencer-side RLN verifiers
|
||||
to externally detect RLN-based spam.
|
||||
|
||||
In `RLN contract`, the user `id-commitment` is stored as mapping.
|
||||
The `slashers` receive all proofs by subscribing gRPC to the prover.
|
||||
In the event of spam, any `slasher` can extract the `secret-hash`
|
||||
from the proof and submit it to the `RLN contract`.
|
||||
|
||||
`RLN Contract` does as following:
|
||||
|
||||
- Receives the `secret-hash` in plaintext
|
||||
- Calculates the `id-commitment` by hashing `secret-hash` with Poseidon hash.
|
||||
- Look up the list whether it includes the `id-commitment` returns 1 if there is, returns 0 otherwise.
|
||||
- If it returns 1, the slasher who is the caller of the contract, is rewarded with Karma tokens.
|
||||
- The prover module listens this activity (an event is sent by the smart contract when slashing)
|
||||
and drop the particular `id-commitment` from its local DB.
|
||||
- Upon detecting spam, the `RLN Contract` invokes the slashing function in the `Karma Contract`,
|
||||
which burns the spammer’s Karma tokens.
|
||||
|
||||
Note that the `secret-hash` are derived by a high entropy randomness
|
||||
that implies all `id-commitment` are unique.
|
||||
Plus, the spammers’ `id-commitment` are dropped from the list.
|
||||
Under this conditions, double slashing is not feasible.
|
||||
|
||||
### 5.1. Proof Aggregation Layer
|
||||
|
||||
Instead of having slashers connect directly to the prover,
|
||||
an intermediate aggregation layer is introduced between the prover and the slashers.
|
||||
An `aggregator` is an entity that subscribes to the prover via gRPC,
|
||||
collects RLN proofs and associated metadata, and forwards them to slashers.
|
||||
|
||||
The aggregator MUST:
|
||||
|
||||
- Subscribe to the prover module via gRPC to receive all RLN proofs and metadata.
|
||||
- Maintain an up-to-date list of active slashers and forward received proofs to each of them.
|
||||
- Be stateless with respect to slashing decisions. The aggregator is responsible only
|
||||
for proof distribution, not detection or submission.
|
||||
|
||||
To avoid a single point of failure, multiple aggregator instances MAY be deployed.
|
||||
Each aggregator instance operates independently and subscribes to the prover separately.
|
||||
Slashers MAY connect to one or more aggregators to ensure redundant proof delivery.
|
||||
|
||||
The prover module MUST NOT impose a connection limit on aggregators.
|
||||
Any entity MAY act as a slasher by connecting to an aggregator without restrictions
|
||||
under normal operating conditions.
|
||||
In cases where slasher capacity limits are exceeded,
|
||||
access control MAY be enforced based on the Karma balance of the requesting entity,
|
||||
prioritizing slashers with higher Karma amounts.
|
||||
|
||||
## References
|
||||
|
||||
- [Zerokit](https://github.com/vacp2p/zerokit)
|
||||
- [Linea](https://linea.build/)
|
||||
- [RLN-Prover](https://github.com/vacp2p/status-rln-prover)
|
||||
- [RLN Specification](https://lip.logos.co/ift-ts/raw/rln-v2.html)
|
||||
- [Multi-message_id burn RLN](https://lip.logos.co/ift-ts/raw/multi-message_id-burn-rln.html)
|
||||
@@ -1,514 +0,0 @@
|
||||
# Zerokit API
|
||||
|
||||
| Field | Value |
|
||||
| --- | --- |
|
||||
| Name | Zerokit API |
|
||||
| Slug | 142 |
|
||||
| Status | raw |
|
||||
| Category | Standards Track |
|
||||
| Editor | Vinh Trinh <vinh@status.im> |
|
||||
|
||||
<!-- timeline:start -->
|
||||
|
||||
## Timeline
|
||||
|
||||
- **2026-01-21** — [`70f3cfb`](https://github.com/logos-co/logos-lips/blob/70f3cfb4df4e9a94e56b1284e98ee1dc9df50ac7/docs/ift-ts/raw/zerokit-api.md) — chore: mdbook font fix (#266)
|
||||
|
||||
<!-- timeline:end -->
|
||||
|
||||
## Abstract
|
||||
|
||||
This document specifies the Zerokit API, an implementation of the RLN-V2 protocol.
|
||||
The specification covers the unified interface exposed through **native Rust**,
|
||||
C-compatible Foreign Function Interface (FFI) bindings,
|
||||
and WebAssembly (WASM) bindings.
|
||||
|
||||
## Motivation
|
||||
|
||||
The main goal of this RFC is to define the API contract,
|
||||
serialization formats,
|
||||
and architectural guidance for integrating the Zerokit library
|
||||
across all supported platforms.
|
||||
Zerokit is the reference implementation of the RLN-V2 protocol.
|
||||
|
||||
## Format Specification
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”,
|
||||
“SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document
|
||||
are to be interpreted as described in [2119](https://www.ietf.org/rfc/rfc2119.txt).
|
||||
|
||||
### Important Note
|
||||
|
||||
All terms and parameters used remain the same as in [RLN-V2](rln-v2.md) and [RLN-V1](32/rln-v1.md#technical-overview).
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
Zerokit follows a layered architecture where
|
||||
the core RLN logic is implemented once in **Rust** and
|
||||
exposed through platform-specific bindings.
|
||||
The protocol layer handles zero-knowledge proof generation and verification,
|
||||
Merkle tree operations, and cryptographic primitives.
|
||||
This core is wrapped by three interface layers:
|
||||
**native Rust** for direct library integration,
|
||||
**FFI** for C-compatible bindings consumed by languages (such as C and Nim),
|
||||
and **WASM** for browser and Node.js environments.
|
||||
All three interfaces maintain functional parity and
|
||||
share identical serialization formats for inputs and outputs.
|
||||
|
||||
```text
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Application Layer │
|
||||
└──────────┬───────────────┬───────────────┬──────────┘
|
||||
│ │ │
|
||||
┌──────▼───────┐ ┌─────▼─────┐ ┌───────▼─────┐
|
||||
│ FFI API │ │ WASM API │ │ Rust API │
|
||||
│ (C/Nim/..) │ │ (Browser) │ │ (Native) │
|
||||
└──────┬───────┘ └─────┬─────┘ └───────┬─────┘
|
||||
└───────────────┼───────────────┘
|
||||
│
|
||||
┌─────────▼─────────┐
|
||||
│ RLN Protocol │
|
||||
│ (Rust Core) │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
### Supported Features
|
||||
|
||||
Zerokit provides [compile-time feature flags](https://github.com/vacp2p/zerokit/blob/c35e62a63517b0d32e91677422de4603760e41fa/rln/Cargo.toml#L65) that
|
||||
select the Merkle tree storage backend,
|
||||
configure the RLN operational mode (e.g., stateful vs. stateless),
|
||||
and enable or disable parallel execution.
|
||||
|
||||
#### Merkle Tree Backends
|
||||
|
||||
`fullmerkletree` allocates the complete tree structure in memory.
|
||||
This backend provides the fastest performance but consumes the most memory.
|
||||
|
||||
`optimalmerkletree` uses sparse HashMap storage that only allocates nodes as needed.
|
||||
This backend balances performance and memory efficiency.
|
||||
|
||||
`pmtree` persists the tree to disk using a sled database.
|
||||
This backend enables state durability across process restarts.
|
||||
|
||||
#### Operational Modes
|
||||
|
||||
`stateless` disables the internal Merkle tree.
|
||||
Applications MUST provide the Merkle root and
|
||||
membership proof externally when generating proofs.
|
||||
|
||||
When `stateless` is not enabled,
|
||||
the library operates in **Stateful** mode and
|
||||
requires one of the Merkle tree backends.
|
||||
|
||||
#### Parallelization
|
||||
|
||||
`parallel` enables rayon-based parallel computation for
|
||||
proof generation and tree operations.
|
||||
|
||||
This flag SHOULD be enabled for end-user clients where
|
||||
fastest individual proof generation time is required.
|
||||
For server-side proof services handling multiple concurrent requests,
|
||||
this flag SHOULD be disabled and
|
||||
applications SHOULD use dedicated worker threads per proof instead.
|
||||
The worker thread approach provides significantly higher throughput for
|
||||
concurrent proof generation.
|
||||
|
||||
## The API
|
||||
|
||||
### Overview
|
||||
|
||||
The API exposes functional interfaces with strongly-typed parameters.
|
||||
All three platform bindings share the same function signatures,
|
||||
differing only in language-specific conventions.
|
||||
Function signatures documented below are from the Rust perspective.
|
||||
|
||||
- Rust: <https://github.com/vacp2p/zerokit/blob/master/rln/src/public.rs>
|
||||
- FFI: <https://github.com/vacp2p/zerokit/tree/master/rln/src/ffi>
|
||||
- WASM: <https://github.com/vacp2p/zerokit/tree/master/rln-wasm>
|
||||
|
||||
### Error Handling
|
||||
|
||||
Error handling differs across platform bindings.
|
||||
|
||||
For **native Rust**,
|
||||
functions return `Result<T, RLNError>` where `RLNError` is an enum
|
||||
representing specific error conditions.
|
||||
The enum variants provide type-safe error handling and
|
||||
pattern matching capabilities.
|
||||
|
||||
For **WASM** and **FFI** bindings,
|
||||
errors are returned as human-readable string messages.
|
||||
This simplifies cross-language error propagation at
|
||||
the cost of type safety.
|
||||
Applications consuming these bindings SHOULD parse error strings or
|
||||
use error message prefixes to distinguish error types when needed.
|
||||
|
||||
### Initialization
|
||||
|
||||
Functions with the same name but different signatures are **conditional compilation variants**.
|
||||
This means that multiple definitions exist in the source code,
|
||||
but only one variant is compiled and available at runtime based on the enabled feature flags.
|
||||
|
||||
`RLN::new(tree_depth, tree_config)` - *Available in Rust, FFI | Stateful mode*
|
||||
|
||||
- Creates a new RLN instance by loading circuit resources from the default folder.
|
||||
- The `tree_config` parameter accepts multiple types via the `TreeConfigInput` trait: a JSON string, a direct config object (with `pmtree` feature), or an empty string for defaults.
|
||||
|
||||
`RLN::new()` - *Available in Rust, FFI | Stateless mode*
|
||||
|
||||
- Creates a new stateless RLN instance by loading circuit resources from the default folder.
|
||||
|
||||
`RLN::new_with_params(tree_depth, zkey_data, graph_data, tree_config)` - *Available in Rust, FFI | Stateful mode*
|
||||
|
||||
- Creates a new RLN instance with pre-loaded circuit parameters passed as byte vectors.
|
||||
- The `tree_config` parameter accepts multiple types via the `TreeConfigInput` trait.
|
||||
|
||||
`RLN::new_with_params(zkey_data, graph_data)` - *Available in Rust, FFI | Stateless mode*
|
||||
|
||||
- Creates a new stateless RLN instance with pre-loaded circuit parameters.
|
||||
|
||||
`RLN::new_with_params(zkey_data)` - *Available in WASM | Stateless mode*
|
||||
|
||||
- Creates a new stateless RLN instance for WASM with pre-loaded zkey data.
|
||||
- Graph data is not required as witness calculation is handled externally in WASM environments (e.g., using [witness_calculator.js](https://github.com/vacp2p/zerokit/blob/master/rln-wasm/resources/witness_calculator.js)).
|
||||
|
||||
### Key Generation
|
||||
|
||||
`keygen()`
|
||||
|
||||
- Generates a random identity keypair returning `(identity_secret, id_commitment)`.
|
||||
|
||||
`seeded_keygen(seed)`
|
||||
|
||||
- Generates a deterministic identity keypair from a seed returning `(identity_secret, id_commitment)`.
|
||||
|
||||
`extended_keygen()`
|
||||
|
||||
- Generates a random extended identity keypair returning `(identity_trapdoor, identity_nullifier, identity_secret, id_commitment)`.
|
||||
|
||||
`extended_seeded_keygen(seed)`
|
||||
|
||||
- Generates a deterministic extended identity keypair from a seed returning `(identity_trapdoor, identity_nullifier, identity_secret, id_commitment)`.
|
||||
|
||||
### Merkle Tree Management
|
||||
|
||||
All tree management functions are only available when `stateless` feature is **NOT** enabled.
|
||||
|
||||
`set_tree(tree_depth)`
|
||||
|
||||
- Initializes the internal Merkle tree with the specified depth.
|
||||
- Leaves are set to the default zero value.
|
||||
|
||||
`set_leaf(index, leaf)`
|
||||
|
||||
- Sets a leaf value at the specified index.
|
||||
|
||||
`get_leaf(index)`
|
||||
|
||||
- Returns the leaf value at the specified index.
|
||||
|
||||
`set_leaves_from(index, leaves)`
|
||||
|
||||
- Sets multiple leaves starting from the specified index.
|
||||
- Updates `next_index` to `max(next_index, index + n)`.
|
||||
- If `n` leaves are passed, they will be set at positions `index`, `index+1`, ..., `index+n-1`.
|
||||
|
||||
`init_tree_with_leaves(leaves)`
|
||||
|
||||
- Resets the tree state to default and initializes it with the provided leaves starting from index 0.
|
||||
- Resets the internal `next_index` to 0 before setting the leaves.
|
||||
|
||||
`atomic_operation(index, leaves, indices)`
|
||||
|
||||
- Atomically inserts leaves starting from index and removes leaves at the specified indices.
|
||||
- Updates `next_index` to `max(next_index, index + n)` where `n` is the number of leaves inserted.
|
||||
|
||||
`set_next_leaf(leaf)`
|
||||
|
||||
- Sets a leaf at the next available index and increments `next_index`.
|
||||
- The leaf is set at the current `next_index` value, then `next_index` is incremented.
|
||||
|
||||
`delete_leaf(index)`
|
||||
|
||||
- Sets the leaf at the specified index to the default zero value.
|
||||
- Does not change the internal `next_index` value.
|
||||
|
||||
`leaves_set()`
|
||||
|
||||
- Returns the number of leaves that have been set in the tree.
|
||||
|
||||
`get_root()`
|
||||
|
||||
- Returns the current Merkle tree root.
|
||||
|
||||
`get_subtree_root(level, index)`
|
||||
|
||||
- Returns the root of a subtree at the specified level and index.
|
||||
|
||||
`get_merkle_proof(index)`
|
||||
|
||||
- Returns the Merkle proof for the leaf at the specified index as `(path_elements, identity_path_index)`.
|
||||
|
||||
`get_empty_leaves_indices()`
|
||||
|
||||
- Returns indices of leaves set to zero up to the final leaf that was set.
|
||||
|
||||
`set_metadata(metadata)`
|
||||
|
||||
- Stores arbitrary metadata in the RLN object for application use.
|
||||
- This metadata is not used by the RLN module.
|
||||
|
||||
`get_metadata()`
|
||||
|
||||
- Returns the metadata stored in the RLN object.
|
||||
|
||||
`flush()`
|
||||
|
||||
- Closes the connection to the Merkle tree database.
|
||||
- Should be called before dropping the RLN object when using persistent storage.
|
||||
|
||||
### Witness Construction
|
||||
|
||||
`RLNWitnessInput::new(identity_secret, user_message_limit, message_id, path_elements, identity_path_index, x, external_nullifier)`
|
||||
|
||||
- Constructs a witness input for proof generation.
|
||||
- Validates that `message_id <= user_message_limit` and `path_elements` and `identity_path_index` have the same length.
|
||||
|
||||
### Witness Calculation
|
||||
|
||||
For **native Rust**** environments, witness calculation is handled internally by the proof generation functions.
|
||||
The circuit witness is computed from the `RLNWitnessInput` and passed to the zero-knowledge proof system.
|
||||
|
||||
For **WASM** environments, witness calculation must be performed externally using a JavaScript witness calculator.
|
||||
The workflow is:
|
||||
|
||||
1. Create a `WasmRLNWitnessInput` with the required parameters
|
||||
2. Export to JSON format using `toBigIntJson()` method
|
||||
3. Pass the JSON to an external JavaScript witness calculator
|
||||
4. Use the calculated witness with `generate_rln_proof_with_witness`
|
||||
|
||||
The witness calculator computes all intermediate values required by the RLN circuit.
|
||||
|
||||
### Proof Generation
|
||||
|
||||
`generate_zk_proof(witness)` - *Available in Rust, FFI*
|
||||
|
||||
- Generates a Groth16 zkSNARK proof from a witness.
|
||||
- Extract proof values separately using `proof_values_from_witness`.
|
||||
|
||||
`generate_rln_proof(witness)` - *Available in Rust, FFI*
|
||||
|
||||
- Generates a complete RLN proof returning both the zkSNARK proof and proof values as `(proof, proof_values)`.
|
||||
- Combines proof generation and proof values extraction.
|
||||
|
||||
`generate_rln_proof_with_witness(calculated_witness, witness)`
|
||||
|
||||
- Generates an RLN proof using a pre-calculated witness from an external witness calculator.
|
||||
- The `calculated_witness` should be a `Vec<BigInt>` obtained from the external witness calculator.
|
||||
- Returns `(proof, proof_values)`.
|
||||
- This is the primary proof generation method for **WASM** where witness calculation is handled by **JavaScript**.
|
||||
|
||||
### Proof Verification
|
||||
|
||||
`verify_zk_proof(proof, proof_values)`
|
||||
|
||||
- Verifies only the zkSNARK proof without root or signal validation.
|
||||
- Returns `true` if the proof is valid.
|
||||
|
||||
`verify_rln_proof(proof, proof_values, x)` - *Stateful mode*
|
||||
|
||||
- Verifies the proof against the internal Merkle tree root and validates that `x` matches the proof signal.
|
||||
- Returns an error if verification fails (invalid proof, invalid root, or invalid signal).
|
||||
|
||||
`verify_with_roots(proof, proof_values, x, roots)`
|
||||
|
||||
- Verifies the proof against a set of acceptable roots and validates the signal.
|
||||
- If the roots slice is empty, root verification is skipped.
|
||||
- Returns an error if verification fails.
|
||||
|
||||
### Slashing
|
||||
|
||||
`recover_id_secret(proof_values_1, proof_values_2)`
|
||||
|
||||
- Recovers the identity secret from two proof values that share the same external nullifier.
|
||||
- Used to detect and penalize rate limit violations.
|
||||
|
||||
### Hash Utilities
|
||||
|
||||
`poseidon_hash(inputs)`
|
||||
|
||||
- Computes the Poseidon hash of the input field elements.
|
||||
|
||||
`hash_to_field_le(input)`
|
||||
|
||||
- Hashes arbitrary bytes to a field element using little-endian byte order.
|
||||
|
||||
`hash_to_field_be(input)`
|
||||
|
||||
- Hashes arbitrary bytes to a field element using big-endian byte order.
|
||||
|
||||
### Serialization Utilities
|
||||
|
||||
`rln_witness_to_bytes_le` / `rln_witness_to_bytes_be`
|
||||
|
||||
- Serializes an RLN witness to bytes.
|
||||
|
||||
`bytes_le_to_rln_witness` / `bytes_be_to_rln_witness`
|
||||
|
||||
- Deserializes bytes to an RLN witness.
|
||||
|
||||
`rln_proof_to_bytes_le` / `rln_proof_to_bytes_be`
|
||||
|
||||
- Serializes an RLN proof to bytes.
|
||||
|
||||
`bytes_le_to_rln_proof` / `bytes_be_to_rln_proof`
|
||||
|
||||
- Deserializes bytes to an RLN proof.
|
||||
|
||||
`rln_proof_values_to_bytes_le` / `rln_proof_values_to_bytes_be`
|
||||
|
||||
- Serializes proof values to bytes.
|
||||
|
||||
`bytes_le_to_rln_proof_values` / `bytes_be_to_rln_proof_values`
|
||||
|
||||
- Deserializes bytes to proof values.
|
||||
|
||||
`fr_to_bytes_le` / `fr_to_bytes_be`
|
||||
|
||||
- Serializes a field element to 32 bytes.
|
||||
|
||||
`bytes_le_to_fr` / `bytes_be_to_fr`
|
||||
|
||||
- Deserializes 32 bytes to a field element.
|
||||
|
||||
`vec_fr_to_bytes_le` / `vec_fr_to_bytes_be`
|
||||
|
||||
- Serializes a vector of field elements to bytes.
|
||||
|
||||
`bytes_le_to_vec_fr` / `bytes_be_to_vec_fr`
|
||||
|
||||
- Deserializes bytes to a vector of field elements.
|
||||
|
||||
### WASM-Specific Notes
|
||||
|
||||
WASM bindings wrap the Rust API with JavaScript-compatible types. Key differences:
|
||||
|
||||
- Field elements are wrapped as `WasmFr` with `fromBytesLE`, `fromBytesBE`, `toBytesLE`, `toBytesBE` methods.
|
||||
- Vectors of field elements use `VecWasmFr` with `push`, `get`, `length` methods.
|
||||
- Identity generation uses `Identity.generate()` and `Identity.generateSeeded(seed)` static methods.
|
||||
- Extended identity uses `ExtendedIdentity.generate()` and `ExtendedIdentity.generateSeeded(seed)`.
|
||||
- Witness input uses `WasmRLNWitnessInput` constructor and `toBigIntJson()` for witness calculator integration.
|
||||
- Proof generation requires external witness calculation via `generateRLNProofWithWitness(calculatedWitness, witness)`.
|
||||
- When `parallel` feature is enabled, call `initThreadPool()` to initialize the thread pool.
|
||||
- Errors are returned as JavaScript strings that can be caught via try-catch blocks.
|
||||
|
||||
### FFI-Specific Notes
|
||||
|
||||
FFI bindings use C-compatible types with the `ffi_` prefix. Key differences:
|
||||
|
||||
- Field elements are wrapped as `CFr` with corresponding conversion functions.
|
||||
- Results use `CResult` or `CBoolResult` structs with `ok` and `err` fields.
|
||||
- Errors are returned as C-compatible strings in the `err` field of result structs.
|
||||
- Memory must be explicitly freed using `ffi_*_free` functions.
|
||||
- Vectors use `repr_c::Vec` with `ffi_vec_*` helper functions.
|
||||
- Configuration is passed via file path to a JSON configuration file.
|
||||
|
||||
## Usage Patterns
|
||||
|
||||
This section describes common deployment scenarios and
|
||||
the recommended API combinations for each.
|
||||
|
||||
### Stateful with Changing Root
|
||||
|
||||
Applies when membership changes over time with members joining and slashing continuously.
|
||||
|
||||
Applications MUST maintain a sliding window of recent roots externally.
|
||||
When members are added or removed via `set_leaf`, `delete_leaf`, or `atomic_operation`,
|
||||
capture the new root using `get_root` and append it to the history buffer.
|
||||
Verify incoming proofs using `verify_with_roots` with the root history buffer,
|
||||
accepting proofs valid against any recent root.
|
||||
|
||||
The window size depends on network propagation delays and epoch duration.
|
||||
|
||||
### Stateful with Fixed Root
|
||||
|
||||
Applies when membership is established once and remains static during an operation period.
|
||||
|
||||
Initialize the tree using `init_tree_with_leaves` with the complete membership set.
|
||||
No root history is required.
|
||||
Verify proofs using `verify_rln_proof` which checks against the internal tree root directly.
|
||||
|
||||
### Stateless
|
||||
|
||||
Applies when membership state is managed externally,
|
||||
such as by a smart contract or relay network.
|
||||
|
||||
Enable the `stateless` feature flag.
|
||||
Obtain Merkle proofs and valid roots from the external source.
|
||||
Pass externally provided `path_elements` and `identity_path_index` to `RLNWitnessInput::new`.
|
||||
Verify using `verify_with_roots` with externally provided roots.
|
||||
|
||||
### WASM Browser Integration
|
||||
|
||||
WASM environments require external witness calculation.
|
||||
Use `WasmRLNWitnessInput::toBigIntJson()` to export the witness for
|
||||
JavaScript witness calculators,
|
||||
then pass the result to `generateRLNProofWithWitness`.
|
||||
|
||||
When `parallel` feature is enabled,
|
||||
call `initThreadPool()` before proof operations.
|
||||
This requires COOP/COEP headers for SharedArrayBuffer support.
|
||||
|
||||
#### Epoch and Rate Limit Configuration
|
||||
|
||||
The external nullifier is computed as `poseidon_hash([epoch, rln_identifier])`.
|
||||
The `rln_identifier` is a field element that uniquely identifies your application (e.g., a hash of your app name).
|
||||
|
||||
All values that will be hashed MUST be represented as field elements.
|
||||
For converting arbitrary data to field elements,
|
||||
use `hash_to_field_le` or `hash_to_field_be` functions which internally use Poseidon hash.
|
||||
|
||||
Each application SHOULD use a unique `rln_identifier` to
|
||||
prevent cross-application nullifier collisions.
|
||||
|
||||
The `user_message_limit` in the rate commitment determines messages allowed per epoch. The `message_id` must be less than `user_message_limit` and
|
||||
should increment with each message.
|
||||
|
||||
Applications MUST persist the `message_id` counter to avoid violations after restarts.
|
||||
|
||||
## Security/Privacy Considerations
|
||||
|
||||
The security of Zerokit depends on the correct implementation of the RLN-V2 protocol
|
||||
and the underlying zero-knowledge proof system.
|
||||
Applications MUST ensure that:
|
||||
|
||||
- Identity secrets are kept confidential and never transmitted or logged
|
||||
- The `message_id` counter is properly persisted to prevent accidental rate limit violations
|
||||
- External nullifiers are constructed correctly to prevent cross-application attacks
|
||||
- Merkle tree roots are validated when using stateless mode
|
||||
- Circuit parameters (zkey and graph data) are obtained from trusted sources
|
||||
|
||||
When using the `parallel` feature in WASM,
|
||||
applications MUST serve content with appropriate COOP/COEP headers to
|
||||
enable SharedArrayBuffer support securely.
|
||||
|
||||
The slashing mechanism exposes identity secrets when rate limits are violated.
|
||||
Applications SHOULD educate users about this risk and
|
||||
implement safeguards to prevent accidental violations.
|
||||
|
||||
## References
|
||||
|
||||
### Normative
|
||||
|
||||
- [RLN-V1 Specification](32/rln-v1.md) - Rate Limit Nullifier V1 protocol
|
||||
|
||||
### Informative
|
||||
|
||||
- [Zerokit GitHub Repository](https://github.com/vacp2p/zerokit) - Reference implementation
|
||||
- [RLN-V2 Specification](rln-v2.md) - Rate Limit Nullifier V2 protocol
|
||||
- [Sled Database](https://sled.rs) - Embedded database for persistent Merkle tree storage
|
||||
- [Witness Calculator](https://github.com/vacp2p/zerokit/blob/master/rln-wasm/resources/witness_calculator.js) - JavaScript witness calculator for WASM environments
|
||||
|
||||
## Copyright
|
||||
|
||||
Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/).
|
||||