commit f841a9814554792e6477f5be609b9aa64201b691 Author: Jenkins Date: Wed Jul 17 04:13:26 2024 +0000 Update documentation diff --git a/404.html b/404.html new file mode 100644 index 00000000..52e60189 --- /dev/null +++ b/404.html @@ -0,0 +1,26 @@ + + + + + +Page Not Found | Vac Research + + + + + + + + + + +
+
Skip to main content

Page Not Found

We could not find what you were looking for.
Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

+ + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..dce455d3 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +dev.vac.dev \ No newline at end of file diff --git a/GossipSub Improvements/index.html b/GossipSub Improvements/index.html new file mode 100644 index 00000000..d7c51adf --- /dev/null +++ b/GossipSub Improvements/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/Nescience-A-zkVM-leveraging-hiding-properties/index.html b/Nescience-A-zkVM-leveraging-hiding-properties/index.html new file mode 100644 index 00000000..c6a6a050 --- /dev/null +++ b/Nescience-A-zkVM-leveraging-hiding-properties/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/_og/155b0a17201a7353cb7a0b44848491c819704c1c.png b/_og/155b0a17201a7353cb7a0b44848491c819704c1c.png new file mode 100644 index 00000000..2c8926a0 Binary files /dev/null and b/_og/155b0a17201a7353cb7a0b44848491c819704c1c.png differ diff --git a/_og/20ce1dfd1cb5720107a286391bd0e2ed1cfc975c.png b/_og/20ce1dfd1cb5720107a286391bd0e2ed1cfc975c.png new file mode 100644 index 00000000..2e03c97f Binary files /dev/null and b/_og/20ce1dfd1cb5720107a286391bd0e2ed1cfc975c.png differ diff --git a/_og/25862de817a428dfdd6c662ee453f35e9e277db3.png b/_og/25862de817a428dfdd6c662ee453f35e9e277db3.png new file mode 100644 index 00000000..cedba89e Binary files /dev/null and b/_og/25862de817a428dfdd6c662ee453f35e9e277db3.png differ diff --git a/_og/25a9021010dcf8acedf95d00db7c92121f695cc1.png b/_og/25a9021010dcf8acedf95d00db7c92121f695cc1.png new file mode 100644 index 00000000..4d36e405 Binary files /dev/null and b/_og/25a9021010dcf8acedf95d00db7c92121f695cc1.png differ diff --git a/_og/2ad4545239f59aac174a1cab2945a014cb5b8bd5.png b/_og/2ad4545239f59aac174a1cab2945a014cb5b8bd5.png new file mode 100644 index 00000000..be0d6926 Binary files /dev/null and b/_og/2ad4545239f59aac174a1cab2945a014cb5b8bd5.png differ diff --git a/_og/2c73972e891926e787c283280001cad7293ae28e.png b/_og/2c73972e891926e787c283280001cad7293ae28e.png new file mode 100644 index 00000000..913edbb0 Binary files /dev/null and b/_og/2c73972e891926e787c283280001cad7293ae28e.png differ diff --git a/_og/2f143dc8e139f7218879ecc0caeabcceaca9eea3.png b/_og/2f143dc8e139f7218879ecc0caeabcceaca9eea3.png new file mode 100644 index 00000000..aeb94c34 Binary files /dev/null and b/_og/2f143dc8e139f7218879ecc0caeabcceaca9eea3.png differ diff --git a/_og/2f550d2e360bfcc02b797171d6b8f7b85b0483b6.png b/_og/2f550d2e360bfcc02b797171d6b8f7b85b0483b6.png new file mode 100644 index 00000000..2292707b Binary files /dev/null and b/_og/2f550d2e360bfcc02b797171d6b8f7b85b0483b6.png differ diff --git a/_og/48145b063be6b05adcecc3c973bd7ae8dc353090.png b/_og/48145b063be6b05adcecc3c973bd7ae8dc353090.png new file mode 100644 index 00000000..fec4958c Binary files /dev/null and b/_og/48145b063be6b05adcecc3c973bd7ae8dc353090.png differ diff --git a/_og/4c430d9c145e087cbe8aa9e1d5d0aabc8d3b463c.png b/_og/4c430d9c145e087cbe8aa9e1d5d0aabc8d3b463c.png new file mode 100644 index 00000000..765095dc Binary files /dev/null and b/_og/4c430d9c145e087cbe8aa9e1d5d0aabc8d3b463c.png differ diff --git a/_og/4fb0f69300a9c3471222cb2331fff338fdee1324.png b/_og/4fb0f69300a9c3471222cb2331fff338fdee1324.png new file mode 100644 index 00000000..5995bf75 Binary files /dev/null and b/_og/4fb0f69300a9c3471222cb2331fff338fdee1324.png differ diff --git a/_og/5013eb600c6b7d4cf7450a8586491fb0899993f6.png b/_og/5013eb600c6b7d4cf7450a8586491fb0899993f6.png new file mode 100644 index 00000000..83c705e1 Binary files /dev/null and b/_og/5013eb600c6b7d4cf7450a8586491fb0899993f6.png differ diff --git a/_og/5277bcb9b4ce26c094a69b8680b82f95b2a1861c.png b/_og/5277bcb9b4ce26c094a69b8680b82f95b2a1861c.png new file mode 100644 index 00000000..8e7d2302 Binary files /dev/null and b/_og/5277bcb9b4ce26c094a69b8680b82f95b2a1861c.png differ diff --git a/_og/5b4d0998a5ee97473506bc460b87d66e2baf5980.png b/_og/5b4d0998a5ee97473506bc460b87d66e2baf5980.png new file mode 100644 index 00000000..5be24f22 Binary files /dev/null and b/_og/5b4d0998a5ee97473506bc460b87d66e2baf5980.png differ diff --git a/_og/6ed71ddd5cb47aede5f4435ef75d558887c12978.png b/_og/6ed71ddd5cb47aede5f4435ef75d558887c12978.png new file mode 100644 index 00000000..97e6d1eb Binary files /dev/null and b/_og/6ed71ddd5cb47aede5f4435ef75d558887c12978.png differ diff --git a/_og/719e4ba5a4ed9ba85f34f968395fc2bffd666795.png b/_og/719e4ba5a4ed9ba85f34f968395fc2bffd666795.png new file mode 100644 index 00000000..807912ea Binary files /dev/null and b/_og/719e4ba5a4ed9ba85f34f968395fc2bffd666795.png differ diff --git a/_og/759fd40efa5df89d67147c46bbbb354ceadb7981.png b/_og/759fd40efa5df89d67147c46bbbb354ceadb7981.png new file mode 100644 index 00000000..7188f847 Binary files /dev/null and b/_og/759fd40efa5df89d67147c46bbbb354ceadb7981.png differ diff --git a/_og/78309406172987d2a2e49f052c1abbe7dbe60e65.png b/_og/78309406172987d2a2e49f052c1abbe7dbe60e65.png new file mode 100644 index 00000000..d5565bab Binary files /dev/null and b/_og/78309406172987d2a2e49f052c1abbe7dbe60e65.png differ diff --git a/_og/86a1b68e624538beff672092e8cbbce864456dda.png b/_og/86a1b68e624538beff672092e8cbbce864456dda.png new file mode 100644 index 00000000..a2088d57 Binary files /dev/null and b/_og/86a1b68e624538beff672092e8cbbce864456dda.png differ diff --git a/_og/87f3d6931e6bc62797c7ef32398316194c1bd923.png b/_og/87f3d6931e6bc62797c7ef32398316194c1bd923.png new file mode 100644 index 00000000..141e6ea5 Binary files /dev/null and b/_og/87f3d6931e6bc62797c7ef32398316194c1bd923.png differ diff --git a/_og/8818abbc1364759c3680043de1c8f5c452ec195a.png b/_og/8818abbc1364759c3680043de1c8f5c452ec195a.png new file mode 100644 index 00000000..dea8ed4e Binary files /dev/null and b/_og/8818abbc1364759c3680043de1c8f5c452ec195a.png differ diff --git a/_og/8a7292c4f736c1537aadb877afdccf17241ee14b.png b/_og/8a7292c4f736c1537aadb877afdccf17241ee14b.png new file mode 100644 index 00000000..8c18ca41 Binary files /dev/null and b/_og/8a7292c4f736c1537aadb877afdccf17241ee14b.png differ diff --git a/_og/8ab89a7876190b5a80d20acf3792eed1632b3176.png b/_og/8ab89a7876190b5a80d20acf3792eed1632b3176.png new file mode 100644 index 00000000..9e5b6009 Binary files /dev/null and b/_og/8ab89a7876190b5a80d20acf3792eed1632b3176.png differ diff --git a/_og/96384e501123c5045cd44e97827e443a28520ed9.png b/_og/96384e501123c5045cd44e97827e443a28520ed9.png new file mode 100644 index 00000000..e7ae0a73 Binary files /dev/null and b/_og/96384e501123c5045cd44e97827e443a28520ed9.png differ diff --git a/_og/9d82bd34b4da2d2ef8b0c71a989a5b9e79d1e49c.png b/_og/9d82bd34b4da2d2ef8b0c71a989a5b9e79d1e49c.png new file mode 100644 index 00000000..b56762d6 Binary files /dev/null and b/_og/9d82bd34b4da2d2ef8b0c71a989a5b9e79d1e49c.png differ diff --git a/_og/a4aefb44f64684374ea3602d23a82f182822abca.png b/_og/a4aefb44f64684374ea3602d23a82f182822abca.png new file mode 100644 index 00000000..82238469 Binary files /dev/null and b/_og/a4aefb44f64684374ea3602d23a82f182822abca.png differ diff --git a/_og/ab6119a81316093d616e89b83d9720ba0009eec4.png b/_og/ab6119a81316093d616e89b83d9720ba0009eec4.png new file mode 100644 index 00000000..9cd365d5 Binary files /dev/null and b/_og/ab6119a81316093d616e89b83d9720ba0009eec4.png differ diff --git a/_og/aca0aa4197ad1a1adebb40cd3e25f40f1f44a118.png b/_og/aca0aa4197ad1a1adebb40cd3e25f40f1f44a118.png new file mode 100644 index 00000000..e699bfbf Binary files /dev/null and b/_og/aca0aa4197ad1a1adebb40cd3e25f40f1f44a118.png differ diff --git a/_og/b0ee154cd20300127a1fd8ca6f9e8d0bc06bb522.png b/_og/b0ee154cd20300127a1fd8ca6f9e8d0bc06bb522.png new file mode 100644 index 00000000..6a767215 Binary files /dev/null and b/_og/b0ee154cd20300127a1fd8ca6f9e8d0bc06bb522.png differ diff --git a/_og/b390ad99a1b62447645b5f6d241475c553cc60cd.png b/_og/b390ad99a1b62447645b5f6d241475c553cc60cd.png new file mode 100644 index 00000000..b14117e2 Binary files /dev/null and b/_og/b390ad99a1b62447645b5f6d241475c553cc60cd.png differ diff --git a/_og/b61e1e14e1c90c20c11ca42120e1439898e1528c.png b/_og/b61e1e14e1c90c20c11ca42120e1439898e1528c.png new file mode 100644 index 00000000..7094a8b4 Binary files /dev/null and b/_og/b61e1e14e1c90c20c11ca42120e1439898e1528c.png differ diff --git a/_og/b917bd001fd4bc6351b80cbf938f898a4a7c8d17.png b/_og/b917bd001fd4bc6351b80cbf938f898a4a7c8d17.png new file mode 100644 index 00000000..3f4db1e2 Binary files /dev/null and b/_og/b917bd001fd4bc6351b80cbf938f898a4a7c8d17.png differ diff --git a/_og/c05f83f42c7cf8c705e42288377b3e1268e41c4c.png b/_og/c05f83f42c7cf8c705e42288377b3e1268e41c4c.png new file mode 100644 index 00000000..d4c2f506 Binary files /dev/null and b/_og/c05f83f42c7cf8c705e42288377b3e1268e41c4c.png differ diff --git a/_og/c4bd246dc4b3b867d7c0a2d4651a36dec5b7d1ff.png b/_og/c4bd246dc4b3b867d7c0a2d4651a36dec5b7d1ff.png new file mode 100644 index 00000000..a9e36eb6 Binary files /dev/null and b/_og/c4bd246dc4b3b867d7c0a2d4651a36dec5b7d1ff.png differ diff --git a/_og/c54198631ecabf6cc6b0ed357a7e7761c1ad7ba5.png b/_og/c54198631ecabf6cc6b0ed357a7e7761c1ad7ba5.png new file mode 100644 index 00000000..ab4c79f1 Binary files /dev/null and b/_og/c54198631ecabf6cc6b0ed357a7e7761c1ad7ba5.png differ diff --git a/_og/c9b48cb9beba3cc3c60952e3bae6dc9d11bd6f47.png b/_og/c9b48cb9beba3cc3c60952e3bae6dc9d11bd6f47.png new file mode 100644 index 00000000..c093c6c3 Binary files /dev/null and b/_og/c9b48cb9beba3cc3c60952e3bae6dc9d11bd6f47.png differ diff --git a/_og/d032ccad5031ebf425b65e61e61953ee7c3a4877.png b/_og/d032ccad5031ebf425b65e61e61953ee7c3a4877.png new file mode 100644 index 00000000..ed6c0537 Binary files /dev/null and b/_og/d032ccad5031ebf425b65e61e61953ee7c3a4877.png differ diff --git a/_og/d79cf273f9899be2eed536095869952c6912fb37.png b/_og/d79cf273f9899be2eed536095869952c6912fb37.png new file mode 100644 index 00000000..04a24339 Binary files /dev/null and b/_og/d79cf273f9899be2eed536095869952c6912fb37.png differ diff --git a/_og/e40218f393902a10518de47edfc810879e937ace.png b/_og/e40218f393902a10518de47edfc810879e937ace.png new file mode 100644 index 00000000..0532ba8b Binary files /dev/null and b/_og/e40218f393902a10518de47edfc810879e937ace.png differ diff --git a/_og/e5bc60f1045ecf004ed9c230feb5326d2eddcd26.png b/_og/e5bc60f1045ecf004ed9c230feb5326d2eddcd26.png new file mode 100644 index 00000000..61381e41 Binary files /dev/null and b/_og/e5bc60f1045ecf004ed9c230feb5326d2eddcd26.png differ diff --git a/_og/e86d6f5bc629b967df32771853364224240610e4.png b/_og/e86d6f5bc629b967df32771853364224240610e4.png new file mode 100644 index 00000000..99522533 Binary files /dev/null and b/_og/e86d6f5bc629b967df32771853364224240610e4.png differ diff --git a/_og/eaaf36083bf7ce7be7a04999f3318810e3d216c3.png b/_og/eaaf36083bf7ce7be7a04999f3318810e3d216c3.png new file mode 100644 index 00000000..4e89d892 Binary files /dev/null and b/_og/eaaf36083bf7ce7be7a04999f3318810e3d216c3.png differ diff --git a/_og/eb3da81173286446c2e0940a695d829a165383a1.png b/_og/eb3da81173286446c2e0940a695d829a165383a1.png new file mode 100644 index 00000000..81f82159 Binary files /dev/null and b/_og/eb3da81173286446c2e0940a695d829a165383a1.png differ diff --git a/_og/f0dc2f9c7351165f486821509b7c281775d6e174.png b/_og/f0dc2f9c7351165f486821509b7c281775d6e174.png new file mode 100644 index 00000000..5000ab78 Binary files /dev/null and b/_og/f0dc2f9c7351165f486821509b7c281775d6e174.png differ diff --git a/_og/f278fab3c548f92716b3878a00bcf8c9b707fa60.png b/_og/f278fab3c548f92716b3878a00bcf8c9b707fa60.png new file mode 100644 index 00000000..41731e99 Binary files /dev/null and b/_og/f278fab3c548f92716b3878a00bcf8c9b707fa60.png differ diff --git a/_og/f56275d6f463a5db79d44003d33ab32143f55ab3.png b/_og/f56275d6f463a5db79d44003d33ab32143f55ab3.png new file mode 100644 index 00000000..e67993ad Binary files /dev/null and b/_og/f56275d6f463a5db79d44003d33ab32143f55ab3.png differ diff --git a/_og/fc6becd739c2e826a04a7dd5aef0c610158cb0cb.png b/_og/fc6becd739c2e826a04a7dd5aef0c610158cb0cb.png new file mode 100644 index 00000000..a92ef4f7 Binary files /dev/null and b/_og/fc6becd739c2e826a04a7dd5aef0c610158cb0cb.png differ diff --git a/archive/index.html b/archive/index.html new file mode 100644 index 00000000..f9948b7e --- /dev/null +++ b/archive/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/assets/css/styles.08d697e6.css b/assets/css/styles.08d697e6.css new file mode 100644 index 00000000..11ea1b64 --- /dev/null +++ b/assets/css/styles.08d697e6.css @@ -0,0 +1 @@ +.col,.container{padding:0 var(--ifm-spacing-horizontal);width:100%}.markdown>h2,.markdown>h3,.markdown>h4,.markdown>h5,.markdown>h6{margin-bottom:calc(var(--ifm-heading-vertical-rhythm-bottom)*var(--ifm-leading))}.markdown li,body{word-wrap:break-word}body,ol ol,ol ul,ul ol,ul ul{margin:0}blockquote,pre{margin:0 0 var(--ifm-spacing-vertical)}.breadcrumbs__link,.button{transition-timing-function:var(--ifm-transition-timing-default)}.button,code{vertical-align:middle}.button--outline.button--active,.button--outline:active,.button--outline:hover,:root{--ifm-button-color:var(--ifm-font-color-base-inverse)}.menu__link:hover,a{transition:color var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.navbar--dark,:root{--ifm-navbar-link-hover-color:var(--ifm-color-primary)}:root,html[data-theme=dark]{--ifm-color-emphasis-500:var(--ifm-color-gray-500)}[data-theme=dark],html[data-theme=dark]{--ifm-color-scheme:dark}:root,[data-theme=light]{--ifm-color-scheme:light}.menu,.navbar-sidebar,body,html{overflow-x:hidden}code,pre code{--lsd-typography-generic-font-family:monospace}.button,.dropdown__link,.footer__item,.text--truncate{white-space:nowrap}.toggleButton_dl49,html{-webkit-tap-highlight-color:transparent}*,.buttonGroup_Qu4e button,.dropdownNavbarItem_o23I,.root_xZfc,.toggle_K23S{box-sizing:border-box}.clean-list,.containsTaskList_mC6p,.dropdown__menu,.menu__list,.root_N57j ul li{list-style:none}:root{--ifm-dark-value:10%;--ifm-darker-value:15%;--ifm-darkest-value:30%;--ifm-light-value:15%;--ifm-lighter-value:30%;--ifm-lightest-value:50%;--ifm-contrast-background-value:90%;--ifm-contrast-foreground-value:70%;--ifm-contrast-background-dark-value:70%;--ifm-contrast-foreground-dark-value:90%;--ifm-color-primary:#3578e5;--ifm-color-secondary:#ebedf0;--ifm-color-success:#00a400;--ifm-color-info:#54c7ec;--ifm-color-warning:#ffba00;--ifm-color-danger:#fa383e;--ifm-color-primary-dark:#306cce;--ifm-color-primary-darker:#2d66c3;--ifm-color-primary-darkest:#2554a0;--ifm-color-primary-light:#538ce9;--ifm-color-primary-lighter:#72a1ed;--ifm-color-primary-lightest:#9abcf2;--ifm-color-primary-contrast-background:#ebf2fc;--ifm-color-primary-contrast-foreground:#102445;--ifm-color-secondary-dark:#d4d5d8;--ifm-color-secondary-darker:#c8c9cc;--ifm-color-secondary-darkest:#a4a6a8;--ifm-color-secondary-light:#eef0f2;--ifm-color-secondary-lighter:#f1f2f5;--ifm-color-secondary-lightest:#f5f6f8;--ifm-color-secondary-contrast-background:#fdfdfe;--ifm-color-secondary-contrast-foreground:#474748;--ifm-color-success-dark:#009400;--ifm-color-success-darker:#008b00;--ifm-color-success-darkest:#007300;--ifm-color-success-light:#26b226;--ifm-color-success-lighter:#4dbf4d;--ifm-color-success-lightest:#80d280;--ifm-color-success-contrast-background:#e6f6e6;--ifm-color-success-contrast-foreground:#003100;--ifm-color-info-dark:#4cb3d4;--ifm-color-info-darker:#47a9c9;--ifm-color-info-darkest:#3b8ba5;--ifm-color-info-light:#6ecfef;--ifm-color-info-lighter:#87d8f2;--ifm-color-info-lightest:#aae3f6;--ifm-color-info-contrast-background:#eef9fd;--ifm-color-info-contrast-foreground:#193c47;--ifm-color-warning-dark:#e6a700;--ifm-color-warning-darker:#d99e00;--ifm-color-warning-darkest:#b38200;--ifm-color-warning-light:#ffc426;--ifm-color-warning-lighter:#ffcf4d;--ifm-color-warning-lightest:#ffdd80;--ifm-color-warning-contrast-background:#fff8e6;--ifm-color-warning-contrast-foreground:#4d3800;--ifm-color-danger-dark:#e13238;--ifm-color-danger-darker:#d53035;--ifm-color-danger-darkest:#af272b;--ifm-color-danger-light:#fb565b;--ifm-color-danger-lighter:#fb7478;--ifm-color-danger-lightest:#fd9c9f;--ifm-color-danger-contrast-background:#ffebec;--ifm-color-danger-contrast-foreground:#4b1113;--ifm-color-gray-0:var(--ifm-color-white);--ifm-color-gray-100:#f5f6f7;--ifm-color-gray-200:#ebedf0;--ifm-color-gray-300:#dadde1;--ifm-color-gray-400:#ccd0d5;--ifm-color-gray-500:#bec3c9;--ifm-color-gray-600:#8d949e;--ifm-color-gray-700:#606770;--ifm-color-gray-800:#444950;--ifm-color-gray-900:#1c1e21;--ifm-color-emphasis-0:var(--ifm-color-gray-0);--ifm-color-emphasis-100:var(--ifm-color-gray-100);--ifm-color-emphasis-200:var(--ifm-color-gray-200);--ifm-color-emphasis-300:var(--ifm-color-gray-300);--ifm-color-emphasis-400:var(--ifm-color-gray-400);--ifm-color-emphasis-600:var(--ifm-color-gray-600);--ifm-color-emphasis-700:var(--ifm-color-gray-700);--ifm-color-emphasis-800:var(--ifm-color-gray-800);--ifm-color-emphasis-900:var(--ifm-color-gray-900);--ifm-color-emphasis-1000:var(--ifm-color-gray-1000);--ifm-color-content:var(--ifm-color-emphasis-900);--ifm-color-content-inverse:var(--ifm-color-emphasis-0);--ifm-color-content-secondary:#525860;--ifm-background-color:#0000;--ifm-background-surface-color:var(--ifm-color-content-inverse);--ifm-global-radius:0.4rem;--ifm-font-color-base:var(--ifm-color-content);--ifm-font-color-base-inverse:var(--ifm-color-content-inverse);--ifm-font-color-secondary:var(--ifm-color-content-secondary);--ifm-font-family-base:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,Cantarell,Noto Sans,sans-serif,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";--ifm-font-weight-light:300;--ifm-font-weight-semibold:500;--ifm-font-weight-bold:700;--ifm-line-height-base:1.65;--ifm-global-spacing:1rem;--ifm-spacing-horizontal:var(--ifm-global-spacing);--ifm-transition-fast:200ms;--ifm-transition-slow:400ms;--ifm-transition-timing-default:cubic-bezier(0.08,0.52,0.52,1);--ifm-global-shadow-lw:0 1px 2px 0 #0000001a;--ifm-global-shadow-md:0 5px 40px #0003;--ifm-global-shadow-tl:0 12px 28px 0 #0003,0 2px 4px 0 #0000001a;--ifm-z-index-dropdown:100;--ifm-z-index-fixed:200;--ifm-z-index-overlay:400;--ifm-container-width:1140px;--ifm-container-width-xl:1320px;--ifm-code-background:#f6f7f8;--ifm-code-border-radius:var(--ifm-global-radius);--ifm-code-font-size:90%;--ifm-code-padding-horizontal:0.1rem;--ifm-code-padding-vertical:0.1rem;--ifm-pre-background:var(--ifm-code-background);--ifm-pre-border-radius:var(--ifm-code-border-radius);--ifm-pre-color:inherit;--ifm-pre-line-height:1.45;--ifm-pre-padding:1rem;--ifm-heading-color:inherit;--ifm-heading-margin-top:0;--ifm-heading-margin-bottom:var(--ifm-spacing-vertical);--ifm-heading-font-family:var(--ifm-font-family-base);--ifm-heading-font-weight:var(--ifm-font-weight-bold);--ifm-heading-line-height:1.25;--ifm-h1-font-size:2rem;--ifm-h2-font-size:1.5rem;--ifm-h3-font-size:1.25rem;--ifm-h4-font-size:1rem;--ifm-h5-font-size:0.875rem;--ifm-h6-font-size:0.85rem;--ifm-leading:calc(var(--ifm-leading-desktop)*1rem);--ifm-table-cell-padding:0.75rem;--ifm-table-background:#0000;--ifm-table-stripe-background:#00000008;--ifm-table-border-width:1px;--ifm-table-border-color:var(--ifm-color-emphasis-300);--ifm-table-head-background:inherit;--ifm-table-head-color:inherit;--ifm-table-head-font-weight:var(--ifm-font-weight-bold);--ifm-table-cell-color:inherit;--ifm-link-color:var(--ifm-color-primary);--ifm-link-hover-color:var(--ifm-link-color);--ifm-link-hover-decoration:underline;--ifm-blockquote-font-size:var(--ifm-font-size-base);--ifm-blockquote-border-left-width:2px;--ifm-blockquote-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-blockquote-padding-vertical:0;--ifm-blockquote-color:var(--ifm-color-emphasis-800);--ifm-blockquote-border-color:var(--ifm-color-emphasis-300);--ifm-hr-background-color:var(--ifm-color-emphasis-500);--ifm-scrollbar-track-background-color:#f1f1f1;--ifm-scrollbar-thumb-background-color:silver;--ifm-scrollbar-thumb-hover-background-color:#a7a7a7;--ifm-alert-background-color:inherit;--ifm-alert-border-color:inherit;--ifm-alert-border-radius:var(--ifm-global-radius);--ifm-alert-color:var(--ifm-font-color-base);--ifm-badge-background-color:inherit;--ifm-badge-border-color:inherit;--ifm-badge-color:var(--ifm-color-white);--ifm-breadcrumb-color-active:var(--ifm-color-primary);--ifm-breadcrumb-item-background-active:var(--ifm-hover-overlay);--ifm-button-background-color:inherit;--ifm-button-border-color:var(--ifm-button-background-color);--ifm-button-border-width:var(--ifm-global-border-width);--ifm-button-font-weight:var(--ifm-font-weight-bold);--ifm-button-padding-horizontal:1.5rem;--ifm-button-padding-vertical:0.375rem;--ifm-card-background-color:var(--ifm-background-surface-color);--ifm-card-border-radius:calc(var(--ifm-global-radius)*2);--ifm-card-horizontal-spacing:var(--ifm-global-spacing);--ifm-card-vertical-spacing:var(--ifm-global-spacing);--ifm-toc-border-color:var(--ifm-color-emphasis-300);--ifm-toc-link-color:var(--ifm-color-content-secondary);--ifm-toc-padding-vertical:0.5rem;--ifm-toc-padding-horizontal:0.5rem;--ifm-dropdown-background-color:var(--ifm-background-surface-color);--ifm-dropdown-font-weight:var(--ifm-font-weight-semibold);--ifm-dropdown-link-color:var(--ifm-font-color-base);--ifm-dropdown-hover-background-color:var(--ifm-hover-overlay);--ifm-footer-background-color:var(--ifm-color-emphasis-100);--ifm-footer-color:inherit;--ifm-footer-link-color:var(--ifm-color-emphasis-700);--ifm-footer-link-hover-color:var(--ifm-color-primary);--ifm-footer-link-horizontal-spacing:0.5rem;--ifm-footer-padding-horizontal:calc(var(--ifm-spacing-horizontal)*2);--ifm-footer-padding-vertical:calc(var(--ifm-spacing-vertical)*2);--ifm-footer-title-color:inherit;--ifm-footer-logo-max-width:min(30rem,90vw);--ifm-hero-background-color:var(--ifm-background-surface-color);--ifm-hero-text-color:var(--ifm-color-emphasis-800);--ifm-menu-color:var(--ifm-color-emphasis-700);--ifm-menu-color-active:var(--ifm-color-primary);--ifm-menu-color-background-active:var(--ifm-hover-overlay);--ifm-menu-color-background-hover:var(--ifm-hover-overlay);--ifm-menu-link-padding-horizontal:0.75rem;--ifm-navbar-background-color:var(--ifm-background-surface-color);--ifm-navbar-height:3.75rem;--ifm-navbar-link-color:var(--ifm-font-color-base);--ifm-navbar-link-active-color:var(--ifm-link-color);--ifm-navbar-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-navbar-padding-vertical:calc(var(--ifm-spacing-vertical)*0.5);--ifm-navbar-search-input-background-color:var(--ifm-color-emphasis-200);--ifm-navbar-search-input-color:var(--ifm-color-emphasis-800);--ifm-navbar-search-input-placeholder-color:var(--ifm-color-emphasis-500);--ifm-navbar-sidebar-width:83vw;--ifm-pagination-color-active:var(--ifm-color-primary);--ifm-pagination-nav-color-hover:var(--ifm-color-primary);--ifm-pills-color-active:var(--ifm-color-primary);--ifm-pills-color-background-active:var(--ifm-hover-overlay);--ifm-tabs-color:var(--ifm-font-color-secondary);--ifm-tabs-color-active:var(--ifm-color-primary);--ifm-tabs-color-active-border:var(--ifm-tabs-color-active)}.badge--danger,.badge--info,.badge--primary,.badge--secondary,.badge--success,.badge--warning{--ifm-badge-border-color:var(--ifm-badge-background-color)}.button--link,.button--outline{--ifm-button-background-color:#0000}html{-webkit-font-smoothing:antialiased;-webkit-text-size-adjust:100%;text-size-adjust:100%;background-color:var(--ifm-background-color);color:var(--ifm-font-color-base);color-scheme:var(--ifm-color-scheme);font:var(--ifm-font-size-base)/var(--ifm-line-height-base) var(--ifm-font-family-base);text-rendering:optimizelegibility}body{background-color:rgb(var(--lsd-surface-primary));height:auto!important}iframe{border:0;color-scheme:auto}.container{margin:0 auto;max-width:var(--ifm-container-width)}.container--fluid{max-width:inherit}.row{display:flex;flex-wrap:wrap;margin:0 calc(var(--ifm-spacing-horizontal)*-1)}.blog-archive-page .main-wrapper main>section,.margin-top--none,.margin-vert--none{margin-top:0!important}.margin-bottom--none,.margin-vert--none,.markdown>:last-child{margin-bottom:0!important}.row--no-gutters{margin-left:0;margin-right:0}.margin-horiz--none,.margin-right--none{margin-right:0!important}.row--no-gutters>.col{padding-left:0;padding-right:0}.row--align-top{align-items:flex-start}.row--align-bottom{align-items:flex-end}.row--align-center{align-items:center}.row--align-stretch{align-items:stretch}.row--align-baseline{align-items:baseline}.col{--ifm-col-width:100%;flex:1 0;margin-left:0;max-width:var(--ifm-col-width)}.padding-bottom--none,.padding-vert--none{padding-bottom:0!important}.padding-top--none,.padding-vert--none{padding-top:0!important}.padding-horiz--none,.padding-left--none{padding-left:0!important}.padding-horiz--none,.padding-right--none{padding-right:0!important}.col[class*=col--]{flex:0 0 var(--ifm-col-width)}.col--1{--ifm-col-width:8.33333%}.col--offset-1{margin-left:8.33333%}.col--2{--ifm-col-width:16.66667%}.col--offset-2{margin-left:16.66667%}.col--3{--ifm-col-width:25%}.col--offset-3{margin-left:25%}.col--4{--ifm-col-width:33.33333%}.col--offset-4{margin-left:33.33333%}.col--5{--ifm-col-width:41.66667%}.col--offset-5{margin-left:41.66667%}.col--6{--ifm-col-width:50%}.col--offset-6{margin-left:50%}.col--7{--ifm-col-width:58.33333%}.col--offset-7{margin-left:58.33333%}.col--8{--ifm-col-width:66.66667%}.col--offset-8{margin-left:66.66667%}.col--9{--ifm-col-width:75%}.col--offset-9{margin-left:75%}.col--10{--ifm-col-width:83.33333%}.col--offset-10{margin-left:83.33333%}.col--11{--ifm-col-width:91.66667%}.col--offset-11{margin-left:91.66667%}.col--12{--ifm-col-width:100%}.col--offset-12{margin-left:100%}.blog-wrapper main,.margin-horiz--none,.margin-left--none{margin-left:0!important}.margin--none{margin:0!important}.margin-bottom--xs,.margin-vert--xs{margin-bottom:.25rem!important}.margin-top--xs,.margin-vert--xs{margin-top:.25rem!important}.margin-horiz--xs,.margin-left--xs{margin-left:.25rem!important}.margin-horiz--xs,.margin-right--xs{margin-right:.25rem!important}.margin--xs{margin:.25rem!important}.margin-bottom--sm,.margin-vert--sm{margin-bottom:.5rem!important}.margin-top--sm,.margin-vert--sm{margin-top:.5rem!important}.margin-horiz--sm,.margin-left--sm{margin-left:.5rem!important}.margin-horiz--sm,.margin-right--sm{margin-right:.5rem!important}.margin--sm{margin:.5rem!important}.margin-bottom--md,.margin-vert--md{margin-bottom:1rem!important}.margin-top--md,.margin-vert--md{margin-top:1rem!important}.margin-horiz--md,.margin-left--md{margin-left:1rem!important}.margin-horiz--md,.margin-right--md{margin-right:1rem!important}.margin--md{margin:1rem!important}.margin-bottom--lg,.margin-vert--lg{margin-bottom:2rem!important}.margin-top--lg,.margin-vert--lg{margin-top:2rem!important}.margin-horiz--lg,.margin-left--lg{margin-left:2rem!important}.margin-horiz--lg,.margin-right--lg{margin-right:2rem!important}.margin--lg{margin:2rem!important}.margin-bottom--xl,.margin-vert--xl{margin-bottom:5rem!important}.margin-top--xl,.margin-vert--xl{margin-top:5rem!important}.margin-horiz--xl,.margin-left--xl{margin-left:5rem!important}.margin-horiz--xl,.margin-right--xl{margin-right:5rem!important}.margin--xl{margin:5rem!important}.padding--none{padding:0!important}.padding-bottom--xs,.padding-vert--xs{padding-bottom:.25rem!important}.padding-top--xs,.padding-vert--xs{padding-top:.25rem!important}.padding-horiz--xs,.padding-left--xs{padding-left:.25rem!important}.padding-horiz--xs,.padding-right--xs{padding-right:.25rem!important}.padding--xs{padding:.25rem!important}.padding-bottom--sm,.padding-vert--sm{padding-bottom:.5rem!important}.padding-top--sm,.padding-vert--sm{padding-top:.5rem!important}.padding-horiz--sm,.padding-left--sm{padding-left:.5rem!important}.padding-horiz--sm,.padding-right--sm{padding-right:.5rem!important}.padding--sm{padding:.5rem!important}.padding-bottom--md,.padding-vert--md{padding-bottom:1rem!important}.padding-top--md,.padding-vert--md{padding-top:1rem!important}.padding-horiz--md,.padding-left--md{padding-left:1rem!important}.padding-horiz--md,.padding-right--md{padding-right:1rem!important}.padding--md{padding:1rem!important}.padding-bottom--lg,.padding-vert--lg{padding-bottom:2rem!important}.padding-top--lg,.padding-vert--lg{padding-top:2rem!important}.padding-horiz--lg,.padding-left--lg{padding-left:2rem!important}.padding-horiz--lg,.padding-right--lg{padding-right:2rem!important}.padding--lg{padding:2rem!important}.padding-bottom--xl,.padding-vert--xl{padding-bottom:5rem!important}.padding-top--xl,.padding-vert--xl{padding-top:5rem!important}.padding-horiz--xl,.padding-left--xl{padding-left:5rem!important}.padding-horiz--xl,.padding-right--xl{padding-right:5rem!important}.padding--xl{padding:5rem!important}code{background-color:var(--ifm-code-background);border:.1rem solid #0000001a;border-radius:var(--ifm-code-border-radius);font-family:var(--ifm-font-family-monospace);font-size:var(--ifm-code-font-size);padding:var(--ifm-code-padding-vertical) var(--ifm-code-padding-horizontal)}a code{color:inherit;color:rgb(var(--lsd-text-primary))}pre{background-color:var(--ifm-pre-background);border-radius:var(--ifm-pre-border-radius);color:var(--ifm-pre-color);font:var(--ifm-code-font-size)/var(--ifm-pre-line-height) var(--ifm-font-family-monospace);overflow:auto;padding:var(--ifm-pre-padding)}pre code{background-color:initial;border:none;font-size:100%;line-height:inherit;padding:0}kbd{background-color:var(--ifm-color-emphasis-0);border:1px solid var(--ifm-color-emphasis-400);border-radius:.2rem;box-shadow:inset 0 -1px 0 var(--ifm-color-emphasis-400);color:var(--ifm-color-emphasis-800);font:80% var(--ifm-font-family-monospace);padding:.15rem .3rem}h1,h2,h3,h4,h5,h6{color:var(--ifm-heading-color);font-family:var(--ifm-heading-font-family);font-weight:var(--ifm-heading-font-weight);line-height:var(--ifm-heading-line-height);margin:var(--ifm-heading-margin-top) 0 var(--ifm-heading-margin-bottom) 0}.sub1,h1{font-size:var(--ifm-h1-font-size)}.sub2,h2{font-size:var(--ifm-h2-font-size)}.sub3,h3{font-size:var(--ifm-h3-font-size)}.sub4,h4{font-size:var(--ifm-h4-font-size)}.sub5,h5{font-size:var(--ifm-h5-font-size)}.sub6,h6,small{font-size:var(--ifm-h6-font-size)}.container_lyt7,.container_lyt7>svg,img{max-width:100%}img[align=right]{padding-left:var(--image-alignment-padding)}img[align=left]{padding-right:var(--image-alignment-padding)}.markdown{--ifm-h1-vertical-rhythm-top:3;--ifm-h2-vertical-rhythm-top:2;--ifm-h3-vertical-rhythm-top:1.5;--ifm-heading-vertical-rhythm-top:1.25;--ifm-h1-vertical-rhythm-bottom:1.25;--ifm-heading-vertical-rhythm-bottom:1}.markdown:after,.markdown:before{content:"";display:table}.markdown:after{clear:both}.markdown h1:first-child{--ifm-h1-font-size:3rem;margin-bottom:calc(var(--ifm-h1-vertical-rhythm-bottom)*var(--ifm-leading))}.markdown>h2{--ifm-h2-font-size:2rem;margin-top:calc(var(--ifm-h2-vertical-rhythm-top)*var(--ifm-leading));letter-spacing:-1.5px}.markdown>h3{--ifm-h3-font-size:1.5rem;margin-top:calc(var(--ifm-h3-vertical-rhythm-top)*var(--ifm-leading))}.markdown>h4,.markdown>h5,.markdown>h6{margin-top:calc(var(--ifm-heading-vertical-rhythm-top)*var(--ifm-leading))}.markdown>p,.markdown>pre,.markdown>ul{margin-bottom:var(--ifm-leading)}.markdown li>p{margin-top:var(--ifm-list-paragraph-margin)}.markdown li+li{margin-top:var(--ifm-list-item-margin)}ol,ul{margin:0 0 var(--ifm-list-margin);padding-left:var(--ifm-list-left-padding)}ol ol,ul ol{list-style-type:lower-roman}ol ol ol,ol ul ol,ul ol ol,ul ul ol{list-style-type:lower-alpha}table thead tr{border-bottom:2px solid var(--ifm-table-border-color)}table thead,table tr:nth-child(2n){background-color:var(--ifm-table-stripe-background)}table tr{background-color:var(--ifm-table-background);border-top:var(--ifm-table-border-width) solid var(--ifm-table-border-color)}table td,table th{border:var(--ifm-table-border-width) solid var(--ifm-table-border-color);padding:var(--ifm-table-cell-padding)}table th{background-color:var(--ifm-table-head-background);color:var(--ifm-table-head-color);font-weight:var(--ifm-table-head-font-weight)}strong{font-weight:var(--ifm-font-weight-bold)}a{color:var(--ifm-link-color);text-decoration:var(--ifm-link-decoration)}a:hover{color:var(--ifm-link-hover-color);text-decoration:var(--ifm-link-hover-decoration)}.button:hover,.text--no-decoration,.text--no-decoration:hover,a:not([href]){text-decoration:none}p{margin:0 0 var(--ifm-paragraph-margin-bottom)}blockquote{border-left:var(--ifm-blockquote-border-left-width) solid var(--ifm-blockquote-border-color);box-shadow:var(--ifm-blockquote-shadow);color:var(--ifm-blockquote-color);font-size:var(--ifm-blockquote-font-size);padding:var(--ifm-blockquote-padding-vertical) var(--ifm-blockquote-padding-horizontal)}blockquote>:first-child{margin-top:0}blockquote>:last-child{margin-bottom:0}hr{background-color:var(--ifm-hr-background-color);border:0;height:var(--ifm-hr-height);margin:var(--ifm-hr-margin-vertical) 0}.shadow--lw{box-shadow:var(--ifm-global-shadow-lw)!important}.shadow--md{box-shadow:var(--ifm-global-shadow-md)!important}.shadow--tl{box-shadow:var(--ifm-global-shadow-tl)!important}.text--primary,.wordWrapButtonEnabled_EoeP .wordWrapButtonIcon_Bwma{color:var(--ifm-color-primary)}.text--secondary,[data-theme=dark] .root_JWD1>div>div{color:var(--ifm-color-secondary)}.text--success{color:var(--ifm-color-success)}.text--info{color:var(--ifm-color-info)}.text--warning{color:var(--ifm-color-warning)}.text--danger{color:var(--ifm-color-danger)}.text--center{text-align:center}.text--left{text-align:left}.text--justify{text-align:justify}.text--right{text-align:right}.text--capitalize{text-transform:capitalize}.text--lowercase{text-transform:lowercase}.alert__heading,.mdx-hero-title--uppercase,.text--uppercase,table td strong,table th{text-transform:uppercase}.text--light{font-weight:var(--ifm-font-weight-light)}.text--normal{font-weight:var(--ifm-font-weight-normal)}.text--semibold{font-weight:var(--ifm-font-weight-semibold)}.text--bold{font-weight:var(--ifm-font-weight-bold)}.text--italic{font-style:italic}.text--truncate{overflow:hidden;text-overflow:ellipsis}.text--break{word-wrap:break-word!important;word-break:break-word!important}.clean-btn{background:none;border:none;color:inherit;cursor:pointer;font-family:inherit;padding:0}.alert,.alert .close{color:var(--ifm-alert-foreground-color)}.clean-list{padding-left:0}.alert--primary{--ifm-alert-background-color:var(--ifm-color-primary-contrast-background);--ifm-alert-background-color-highlight:#3578e526;--ifm-alert-foreground-color:var(--ifm-color-primary-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-primary-dark)}.alert--secondary{--ifm-alert-background-color:var(--ifm-color-secondary-contrast-background);--ifm-alert-background-color-highlight:#ebedf026;--ifm-alert-foreground-color:var(--ifm-color-secondary-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-secondary-dark)}.alert--success{--ifm-alert-background-color:var(--ifm-color-success-contrast-background);--ifm-alert-background-color-highlight:#00a40026;--ifm-alert-foreground-color:var(--ifm-color-success-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-success-dark)}.alert--info{--ifm-alert-background-color:var(--ifm-color-info-contrast-background);--ifm-alert-background-color-highlight:#54c7ec26;--ifm-alert-foreground-color:var(--ifm-color-info-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-info-dark)}.alert--warning{--ifm-alert-background-color:var(--ifm-color-warning-contrast-background);--ifm-alert-background-color-highlight:#ffba0026;--ifm-alert-foreground-color:var(--ifm-color-warning-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-warning-dark)}.alert--danger{--ifm-alert-background-color:var(--ifm-color-danger-contrast-background);--ifm-alert-background-color-highlight:#fa383e26;--ifm-alert-foreground-color:var(--ifm-color-danger-contrast-foreground);--ifm-alert-border-color:var(--ifm-color-danger-dark)}.alert{--ifm-code-background:var(--ifm-alert-background-color-highlight);--ifm-link-color:var(--ifm-alert-foreground-color);--ifm-link-hover-color:var(--ifm-alert-foreground-color);--ifm-link-decoration:underline;--ifm-tabs-color:var(--ifm-alert-foreground-color);--ifm-tabs-color-active:var(--ifm-alert-foreground-color);--ifm-tabs-color-active-border:var(--ifm-alert-border-color);background-color:var(--ifm-alert-background-color);border:var(--ifm-alert-border-width) solid var(--ifm-alert-border-color);border-left-width:var(--ifm-alert-border-left-width);border-radius:var(--ifm-alert-border-radius);box-shadow:var(--ifm-alert-shadow);padding:var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal)}.alert__heading{align-items:center;display:flex;font:700 var(--ifm-h5-font-size)/var(--ifm-heading-line-height) var(--ifm-heading-font-family);margin-bottom:.5rem}.alert__icon{display:inline-flex;margin-right:.4em}.alert__icon svg{fill:var(--ifm-alert-foreground-color);stroke:var(--ifm-alert-foreground-color);stroke-width:0}.alert .close{margin:calc(var(--ifm-alert-padding-vertical)*-1) calc(var(--ifm-alert-padding-horizontal)*-1) 0 0;opacity:.75}.alert .close:focus,.alert .close:hover{opacity:1}.alert a{text-decoration-color:var(--ifm-alert-border-color)}.alert a:hover{text-decoration-thickness:2px}.avatar{column-gap:var(--ifm-avatar-intro-margin);display:flex}.avatar__photo{border-radius:50%;display:block;height:var(--ifm-avatar-photo-size);overflow:hidden;width:var(--ifm-avatar-photo-size)}.card--full-height,.navbar__logo img{height:100%}.avatar__photo--sm{--ifm-avatar-photo-size:2rem}.avatar__photo--lg{--ifm-avatar-photo-size:4rem}.avatar__photo--xl{--ifm-avatar-photo-size:6rem}.avatar__intro{display:flex;flex:1 1;flex-direction:column;justify-content:center;text-align:var(--ifm-avatar-intro-alignment)}.badge,.breadcrumbs__item,.breadcrumbs__link,.button{display:inline-block}.avatar__name{font:700 var(--ifm-h4-font-size)/var(--ifm-heading-line-height) var(--ifm-font-family-base)}.avatar__subtitle{margin-top:.25rem}.avatar--vertical{--ifm-avatar-intro-alignment:center;--ifm-avatar-intro-margin:0.5rem;align-items:center;flex-direction:column}.badge{background-color:var(--ifm-badge-background-color);border:var(--ifm-badge-border-width) solid var(--ifm-badge-border-color);border-radius:var(--ifm-badge-border-radius);color:var(--ifm-badge-color);font-size:75%;font-weight:var(--ifm-font-weight-bold);line-height:1;padding:var(--ifm-badge-padding-vertical) var(--ifm-badge-padding-horizontal)}.badge--primary{--ifm-badge-background-color:var(--ifm-color-primary)}.badge--secondary{--ifm-badge-background-color:var(--ifm-color-secondary);color:var(--ifm-color-black)}.breadcrumbs__link,.button.button--secondary.button--outline:not(.button--active):not(:hover){color:var(--ifm-font-color-base)}.badge--success{--ifm-badge-background-color:var(--ifm-color-success)}.badge--info{--ifm-badge-background-color:var(--ifm-color-info)}.badge--warning{--ifm-badge-background-color:var(--ifm-color-warning)}.badge--danger{--ifm-badge-background-color:var(--ifm-color-danger)}.breadcrumbs{margin-bottom:0;padding-left:0}.breadcrumbs__item:not(:last-child):after{background:var(--ifm-breadcrumb-separator) center;content:" ";display:inline-block;filter:var(--ifm-breadcrumb-separator-filter);height:calc(var(--ifm-breadcrumb-separator-size)*var(--ifm-breadcrumb-size-multiplier)*var(--ifm-breadcrumb-separator-size-multiplier));margin:0 var(--ifm-breadcrumb-spacing);opacity:.5;width:calc(var(--ifm-breadcrumb-separator-size)*var(--ifm-breadcrumb-size-multiplier)*var(--ifm-breadcrumb-separator-size-multiplier))}.breadcrumbs__item--active .breadcrumbs__link{background:var(--ifm-breadcrumb-item-background-active);color:var(--ifm-breadcrumb-color-active)}.breadcrumbs__link{border-radius:var(--ifm-breadcrumb-border-radius);font-size:calc(1rem*var(--ifm-breadcrumb-size-multiplier));padding:calc(var(--ifm-breadcrumb-padding-vertical)*var(--ifm-breadcrumb-size-multiplier)) calc(var(--ifm-breadcrumb-padding-horizontal)*var(--ifm-breadcrumb-size-multiplier));transition-duration:var(--ifm-transition-fast);transition-property:background,color}.breadcrumbs__link:any-link:hover,.breadcrumbs__link:link:hover,.breadcrumbs__link:visited:hover,area[href].breadcrumbs__link:hover{background:var(--ifm-breadcrumb-item-background-active);text-decoration:none}.breadcrumbs--sm{--ifm-breadcrumb-size-multiplier:0.8}.breadcrumbs--lg{--ifm-breadcrumb-size-multiplier:1.2}.button{background-color:var(--ifm-button-background-color);border:var(--ifm-button-border-width) solid var(--ifm-button-border-color);border-radius:var(--ifm-button-border-radius);cursor:pointer;font-size:calc(.875rem*var(--ifm-button-size-multiplier));font-weight:var(--ifm-button-font-weight);line-height:1.5;padding:calc(var(--ifm-button-padding-vertical)*var(--ifm-button-size-multiplier)) calc(var(--ifm-button-padding-horizontal)*var(--ifm-button-size-multiplier));text-align:center;transition-duration:var(--ifm-button-transition-duration);transition-property:color,background,border-color;-webkit-user-select:none;user-select:none}.button,.button:hover{color:var(--ifm-button-color)}.button--outline{--ifm-button-color:var(--ifm-button-border-color)}.button--outline:hover{--ifm-button-background-color:var(--ifm-button-border-color)}.button--link{--ifm-button-border-color:#0000;color:var(--ifm-link-color);text-decoration:var(--ifm-link-decoration)}.button--link.button--active,.button--link:active,.button--link:hover{color:var(--ifm-link-hover-color);text-decoration:var(--ifm-link-hover-decoration)}.button.disabled,.button:disabled,.button[disabled]{opacity:.65;pointer-events:none}.button--sm{--ifm-button-size-multiplier:0.8}.button--lg{--ifm-button-size-multiplier:1.35}.button--block{display:block;width:100%}.button.button--secondary{color:var(--ifm-color-gray-900)}:where(.button--primary){--ifm-button-background-color:var(--ifm-color-primary);--ifm-button-border-color:var(--ifm-color-primary)}:where(.button--primary):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-primary-dark);--ifm-button-border-color:var(--ifm-color-primary-dark)}.button--primary.button--active,.button--primary:active{--ifm-button-background-color:var(--ifm-color-primary-darker);--ifm-button-border-color:var(--ifm-color-primary-darker)}:where(.button--secondary){--ifm-button-background-color:var(--ifm-color-secondary);--ifm-button-border-color:var(--ifm-color-secondary)}:where(.button--secondary):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-secondary-dark);--ifm-button-border-color:var(--ifm-color-secondary-dark)}.button--secondary.button--active,.button--secondary:active{--ifm-button-background-color:var(--ifm-color-secondary-darker);--ifm-button-border-color:var(--ifm-color-secondary-darker)}:where(.button--success){--ifm-button-background-color:var(--ifm-color-success);--ifm-button-border-color:var(--ifm-color-success)}:where(.button--success):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-success-dark);--ifm-button-border-color:var(--ifm-color-success-dark)}.button--success.button--active,.button--success:active{--ifm-button-background-color:var(--ifm-color-success-darker);--ifm-button-border-color:var(--ifm-color-success-darker)}:where(.button--info){--ifm-button-background-color:var(--ifm-color-info);--ifm-button-border-color:var(--ifm-color-info)}:where(.button--info):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-info-dark);--ifm-button-border-color:var(--ifm-color-info-dark)}.button--info.button--active,.button--info:active{--ifm-button-background-color:var(--ifm-color-info-darker);--ifm-button-border-color:var(--ifm-color-info-darker)}:where(.button--warning){--ifm-button-background-color:var(--ifm-color-warning);--ifm-button-border-color:var(--ifm-color-warning)}:where(.button--warning):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-warning-dark);--ifm-button-border-color:var(--ifm-color-warning-dark)}.button--warning.button--active,.button--warning:active{--ifm-button-background-color:var(--ifm-color-warning-darker);--ifm-button-border-color:var(--ifm-color-warning-darker)}:where(.button--danger){--ifm-button-background-color:var(--ifm-color-danger);--ifm-button-border-color:var(--ifm-color-danger)}:where(.button--danger):not(.button--outline):hover{--ifm-button-background-color:var(--ifm-color-danger-dark);--ifm-button-border-color:var(--ifm-color-danger-dark)}.button--danger.button--active,.button--danger:active{--ifm-button-background-color:var(--ifm-color-danger-darker);--ifm-button-border-color:var(--ifm-color-danger-darker)}.button-group{display:inline-flex;gap:var(--ifm-button-group-spacing)}.button-group>.button:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.button-group>.button:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0}.button-group--block{display:flex;justify-content:stretch}.button-group--block>.button{flex-grow:1}.card{background-color:var(--ifm-card-background-color);border-radius:var(--ifm-card-border-radius);box-shadow:var(--ifm-global-shadow-lw);display:flex;flex-direction:column;overflow:hidden}.card__image{padding-top:var(--ifm-card-vertical-spacing)}.card__image:first-child{padding-top:0}.card__body,.card__footer,.card__header{padding:var(--ifm-card-vertical-spacing) var(--ifm-card-horizontal-spacing)}.card__body:not(:last-child),.card__footer:not(:last-child),.card__header:not(:last-child){padding-bottom:0}.card__body>:last-child,.card__footer>:last-child,.card__header>:last-child{margin-bottom:0}.card__footer,.mdx-timeline-item__description{margin-top:auto}.table-of-contents{font-size:.8rem;margin-bottom:0;padding:var(--ifm-toc-padding-vertical) 0}.table-of-contents,.table-of-contents ul{list-style:none;padding-left:var(--ifm-toc-padding-horizontal)}.table-of-contents li{margin:var(--ifm-toc-padding-vertical) var(--ifm-toc-padding-horizontal)}.table-of-contents__left-border{border-left:1px solid var(--ifm-toc-border-color)}.table-of-contents__link{color:var(--ifm-toc-link-color);display:block}.table-of-contents__link--active,.table-of-contents__link--active code,.table-of-contents__link:hover,.table-of-contents__link:hover code{color:var(--ifm-color-primary);text-decoration:none}.close{color:var(--ifm-color-black);float:right;font-size:1.5rem;font-weight:var(--ifm-font-weight-bold);line-height:1;opacity:.5;padding:1rem;transition:opacity var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.breadcrumbs__item,.close:hover,.table-of-contents__link--active code{opacity:.7}.close:focus,.theme-code-block-highlighted-line .codeLineNumber_Tfdd:before{opacity:.8}.dropdown{display:inline-flex;font-weight:var(--ifm-dropdown-font-weight);position:relative;vertical-align:top}.admonitionIcon_BGV6,.root_JWD1>span svg{vertical-align:middle}.dropdown--hoverable:hover .dropdown__menu,.dropdown--show .dropdown__menu{opacity:1;pointer-events:all;transform:translateY(-1px);visibility:visible}#nprogress,.dropdown__menu,.navWrapper_ybYI,.navbar__item.dropdown .navbar__link:not([href]){pointer-events:none}.dropdown--right .dropdown__menu{left:inherit;right:0}.dropdown--nocaret .navbar__link:after{content:none!important}.dropdown__menu{background-color:var(--ifm-dropdown-background-color);border-radius:var(--ifm-global-radius);box-shadow:var(--ifm-global-shadow-md);left:0;max-height:80vh;min-width:10rem;opacity:0;overflow-y:auto;position:absolute;top:calc(100% - var(--ifm-navbar-item-padding-vertical) + .3rem);transform:translateY(-.625rem);transition-duration:var(--ifm-transition-fast);transition-property:opacity,transform,visibility;transition-timing-function:var(--ifm-transition-timing-default);visibility:hidden;z-index:var(--ifm-z-index-dropdown)}.menu__caret,.menu__link,.menu__list-item-collapsible{border-radius:.25rem;transition:background var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.dropdown__link{color:var(--ifm-dropdown-link-color);display:block;font-size:.875rem}.dropdown__link--active,.dropdown__link:hover{background-color:var(--ifm-dropdown-hover-background-color);color:var(--ifm-dropdown-link-color);text-decoration:none}.dropdown__link--active,.dropdown__link--active:hover{--ifm-dropdown-link-color:var(--ifm-link-color)}.dropdown>.navbar__link:after{border-color:currentcolor #0000;border-style:solid;border-width:.4em .4em 0;content:"";margin-left:.3em;position:relative;top:2px;transform:translateY(-50%);display:none}.menu__list-item--collapsed .menu__caret:before,.menu__list-item--collapsed .menu__link--sublist:after,.menu__list-item-collapsible>a[aria-expanded=false]>svg{transform:rotate(90deg)}.footer{background-color:var(--ifm-footer-background-color);color:var(--ifm-footer-color);padding:var(--ifm-footer-padding-vertical) var(--ifm-footer-padding-horizontal)}.footer--dark{--ifm-footer-background-color:#303846;--ifm-footer-color:var(--ifm-footer-link-color);--ifm-footer-link-color:var(--ifm-color-secondary);--ifm-footer-title-color:var(--ifm-color-white)}.footer__links{margin-bottom:1rem}.footer__link-item{color:var(--ifm-footer-link-color);line-height:2}.footer__link-item:hover{color:var(--ifm-footer-link-hover-color)}.footer__link-separator{margin:0 var(--ifm-footer-link-horizontal-spacing)}.footer__title{color:var(--ifm-footer-title-color);font:700 var(--ifm-h4-font-size)/var(--ifm-heading-line-height) var(--ifm-font-family-base);margin-bottom:var(--ifm-heading-margin-bottom)}.menu,.navbar__link{font-weight:var(--ifm-font-weight-semibold)}.docItemContainer_hrrU article>:first-child,.docItemContainer_hrrU header+*,.footer__item{margin-top:0}.admonitionContent_iozl>:last-child,.footer__items{margin-bottom:0}.main-wrapper>main,.table-of-contents,[type=checkbox]{padding:0}.hero{align-items:center;background-color:var(--ifm-hero-background-color);color:var(--ifm-hero-text-color);display:flex;padding:4rem 2rem}.hero--primary{--ifm-hero-background-color:var(--ifm-color-primary);--ifm-hero-text-color:var(--ifm-font-color-base-inverse)}.hero--dark{--ifm-hero-background-color:#303846;--ifm-hero-text-color:var(--ifm-color-white)}.hero__title,.title_f1Hy{font-size:3rem}.hero__subtitle{font-size:1.5rem}.menu__list{margin:0;padding-left:0}.menu__caret,.menu__link{padding:var(--ifm-menu-link-padding-vertical) var(--ifm-menu-link-padding-horizontal)}.menu__list .menu__list{flex:0 0 100%;margin-top:.25rem;padding-left:var(--ifm-menu-link-padding-horizontal)}.menu__list-item:not(:first-child){margin-top:.25rem}.menu__list-item--collapsed .menu__list{height:0;overflow:hidden}.menu__list-item-collapsible{display:flex;flex-wrap:wrap;position:relative}.menu__caret:hover,.menu__link:hover,.menu__list-item-collapsible--active,.menu__list-item-collapsible:hover{background:var(--ifm-menu-color-background-hover)}.menu__list-item-collapsible .menu__link--active,.menu__list-item-collapsible .menu__link:hover{background:none!important}.menu__caret,.menu__link{align-items:center;display:flex}.navbar-sidebar,.navbar-sidebar__backdrop{bottom:0;opacity:0;top:0;transition-duration:var(--ifm-transition-fast);transition-timing-function:ease-in-out;left:0}.menu__link{color:var(--ifm-menu-color);flex:1;line-height:1.25}.menu__link:hover{color:var(--ifm-menu-color);text-decoration:none}.menu__caret:before,.menu__link--sublist-caret:after{content:"";filter:var(--ifm-menu-link-sublist-icon-filter);height:1.25rem;transform:rotate(180deg);transition:transform var(--ifm-transition-fast) linear;width:1.25rem}.menu__link--sublist-caret:after{background:var(--ifm-menu-link-sublist-icon) 50%/2rem 2rem;margin-left:auto;min-width:1.25rem}.menu__link--active,.menu__link--active:hover{color:var(--ifm-menu-color-active)}.navbar__brand,.navbar__link{color:var(--ifm-navbar-link-color)}.menu__link--active:not(.menu__link--sublist){background-color:var(--ifm-menu-color-background-active)}.menu__caret:before{background:var(--ifm-menu-link-sublist-icon) 50%/2rem 2rem}.navbar--dark,html[data-theme=dark]{--ifm-menu-link-sublist-icon-filter:invert(100%) sepia(94%) saturate(17%) hue-rotate(223deg) brightness(104%) contrast(98%)}.navbar{background-color:var(--ifm-navbar-background-color);box-shadow:var(--ifm-navbar-shadow);padding:var(--ifm-navbar-padding-vertical) var(--ifm-navbar-padding-horizontal)}.icon_S7Kx,.navbar,.navbar>.container,.navbar>.container-fluid{display:flex}.navbar--fixed-top{position:sticky;top:0;z-index:var(--ifm-z-index-fixed)}.navbar__inner{display:flex;flex-wrap:wrap;justify-content:space-between;width:100%}.navbar__brand{align-items:center;display:flex;margin-right:1rem;min-width:0}.navbar__brand:hover{color:var(--ifm-navbar-link-hover-color);text-decoration:none}.announcementBarContent_xLdY,.navbar__title{flex:1 1 auto}.navbar__toggle{display:none;margin-right:.5rem}.navbar__logo{flex:0 0 auto;margin-right:.5rem}.navbar__items{align-items:center;display:flex;flex:1;min-width:0}.navbar__items--center{flex:0 0 auto}.blog-wrapper>.container>.row>aside>nav>ul li,.navbar__items--center .navbar__brand,a[class^=sidebarLogo_]{margin:0}.navbar__items--center+.navbar__items--right{flex:1}.navbar__items--right{flex:0 0 auto;justify-content:flex-end}.navbar__items--right>:last-child{padding-right:0}.navbar__item{display:inline-block;padding:var(--ifm-navbar-item-padding-vertical) var(--ifm-navbar-item-padding-horizontal)}.navbar__link--active,.navbar__link:hover{color:var(--ifm-navbar-link-hover-color);text-decoration:none}.navbar--dark,.navbar--primary{--ifm-menu-color:var(--ifm-color-gray-300);--ifm-navbar-link-color:var(--ifm-color-gray-100);--ifm-navbar-search-input-background-color:#ffffff1a;--ifm-navbar-search-input-placeholder-color:#ffffff80;color:var(--ifm-color-white)}.navbar--dark{--ifm-navbar-background-color:#242526;--ifm-menu-color-background-active:#ffffff0d;--ifm-navbar-search-input-color:var(--ifm-color-white)}.navbar--primary{--ifm-navbar-background-color:var(--ifm-color-primary);--ifm-navbar-link-hover-color:var(--ifm-color-white);--ifm-menu-color-active:var(--ifm-color-white);--ifm-navbar-search-input-color:var(--ifm-color-emphasis-500)}.navbar__search-input{-webkit-appearance:none;appearance:none;background:var(--ifm-navbar-search-input-background-color) var(--ifm-navbar-search-input-icon) no-repeat .75rem center/1rem 1rem;border:none;border-radius:2rem;color:var(--ifm-navbar-search-input-color);cursor:text;display:inline-block;font-size:.9rem;height:2rem;padding:0 .5rem 0 2.25rem;width:12.5rem}.cursor-pointer,.pills__item,.tabs__item{cursor:pointer}.navbar__search-input::placeholder{color:var(--ifm-navbar-search-input-placeholder-color)}.navbar-sidebar{background-color:var(--ifm-navbar-background-color);box-shadow:var(--ifm-global-shadow-md);position:fixed;transform:translate3d(-100%,0,0);transition-property:opacity,visibility,transform;visibility:hidden;width:var(--ifm-navbar-sidebar-width)}.navbar-sidebar--show .navbar-sidebar,.navbar-sidebar__items{transform:translateZ(0)}.navbar-sidebar--show .navbar-sidebar,.navbar-sidebar--show .navbar-sidebar__backdrop{opacity:1;visibility:visible}.navbar-sidebar__backdrop{background-color:#0009;position:fixed;right:0;transition-property:opacity,visibility;visibility:hidden}.footer,.row{position:relative}.navbar-sidebar__brand{align-items:center;box-shadow:var(--ifm-navbar-shadow);display:flex;flex:1;height:var(--ifm-navbar-height);padding:var(--ifm-navbar-padding-vertical) var(--ifm-navbar-padding-horizontal)}.navbar-sidebar__items{display:flex;height:calc(100% - var(--ifm-navbar-height));transition:transform var(--ifm-transition-fast) ease-in-out}.navbar-sidebar__items--show-secondary{transform:translate3d(calc((var(--ifm-navbar-sidebar-width))*-1),0,0)}.navbar-sidebar__item{flex-shrink:0;padding:.5rem;width:calc(var(--ifm-navbar-sidebar-width))}.navbar-sidebar__back{background:var(--ifm-menu-color-background-active);font-size:15px;font-weight:var(--ifm-button-font-weight);position:relative;text-align:left;top:-.5rem}.navbar-sidebar__close{display:flex;margin-left:auto}.pagination{column-gap:var(--ifm-pagination-page-spacing);display:flex;font-size:var(--ifm-pagination-font-size);padding-left:0}.pagination--sm{--ifm-pagination-font-size:0.8rem;--ifm-pagination-padding-horizontal:0.8rem;--ifm-pagination-padding-vertical:0.2rem}.pagination--lg{--ifm-pagination-font-size:1.2rem;--ifm-pagination-padding-horizontal:1.2rem;--ifm-pagination-padding-vertical:0.3rem}.pagination__item{display:inline-flex}.pagination__item>span{padding:var(--ifm-pagination-padding-vertical)}.pagination__item--active .pagination__link{color:var(--ifm-pagination-color-active)}.pagination__item--active .pagination__link,.pagination__item:not(.pagination__item--active):hover .pagination__link{background:var(--ifm-pagination-item-active-background)}.pagination__item--disabled,.pagination__item[disabled]{opacity:.25;pointer-events:none}.pagination__link{border-radius:var(--ifm-pagination-border-radius);color:var(--ifm-font-color-base);display:inline-block;padding:var(--ifm-pagination-padding-vertical) var(--ifm-pagination-padding-horizontal);transition:background var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.pagination__link:hover{text-decoration:none}.pagination-nav{grid-gap:var(--ifm-spacing-horizontal);display:grid;gap:var(--ifm-spacing-horizontal);grid-template-columns:repeat(2,1fr)}.footer>div,.grid{grid-template-columns:repeat(24,1fr)}.pagination-nav__link{border:1px solid var(--ifm-color-emphasis-300);border-radius:var(--ifm-pagination-nav-border-radius);display:block;height:100%;line-height:var(--ifm-heading-line-height);padding:var(--ifm-global-spacing);transition:border-color var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.pagination-nav__link:hover{border-color:var(--ifm-pagination-nav-color-hover);text-decoration:none}.blog-wrapper>.container>.row>aside>nav>div:first-child,.blog-wrapper>.container>.row>aside>nav>ul li a{border-left:1px solid #0000;font-size:var(--lsd-subtitle2-fontSize)!important;font-weight:var(--lsd-subtitle2-fontWeight)!important;line-height:var(--lsd-subtitle2-lineHeight)!important}.content_knG7 a,.dropdown__link:hover,.footer__item a{text-decoration:underline}.pagination-nav__label{font-size:var(--ifm-h4-font-size);font-weight:var(--ifm-heading-font-weight);word-break:break-word}.pagination-nav__link--prev .pagination-nav__label:before{content:"« "}.pagination-nav__link--next .pagination-nav__label:after{content:" »"}.pagination-nav__sublabel{color:var(--ifm-color-content-secondary);font-size:var(--ifm-h5-font-size);font-weight:var(--ifm-font-weight-semibold);margin-bottom:.25rem}.pills__item,.tabs{font-weight:var(--ifm-font-weight-bold)}.pills{display:flex;gap:var(--ifm-pills-spacing);padding-left:0}.pills__item{border-radius:.5rem;display:inline-block;padding:.25rem 1rem;transition:background var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.navbar__left-items>.navbar__item:first-of-type,.tabs,:not(.containsTaskList_mC6p>li)>.containsTaskList_mC6p{padding-left:0}.pills__item--active{color:var(--ifm-pills-color-active)}.pills__item--active,.pills__item:not(.pills__item--active):hover{background:var(--ifm-pills-color-background-active)}.pills--block{justify-content:stretch}.pills--block .pills__item{flex-grow:1;text-align:center}.tabs{color:var(--ifm-tabs-color);display:flex;margin-bottom:0;overflow-x:auto}.tabs__item{border-bottom:3px solid #0000;border-radius:var(--ifm-global-radius);display:inline-flex;padding:var(--ifm-tabs-padding-vertical) var(--ifm-tabs-padding-horizontal);transition:background-color var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.tabs__item--active{border-bottom-color:var(--ifm-tabs-color-active-border);border-bottom-left-radius:0;border-bottom-right-radius:0;color:var(--ifm-tabs-color-active)}.tabs__item:hover{background-color:var(--ifm-hover-overlay)}.tabs--block{justify-content:stretch}.tabs--block .tabs__item{flex-grow:1;justify-content:center}html[data-theme=dark]{--ifm-color-emphasis-0:var(--ifm-color-gray-1000);--ifm-color-emphasis-100:var(--ifm-color-gray-900);--ifm-color-emphasis-200:var(--ifm-color-gray-800);--ifm-color-emphasis-300:var(--ifm-color-gray-700);--ifm-color-emphasis-400:var(--ifm-color-gray-600);--ifm-color-emphasis-600:var(--ifm-color-gray-400);--ifm-color-emphasis-700:var(--ifm-color-gray-300);--ifm-color-emphasis-800:var(--ifm-color-gray-200);--ifm-color-emphasis-900:var(--ifm-color-gray-100);--ifm-color-emphasis-1000:var(--ifm-color-gray-0);--ifm-background-color:#1b1b1d;--ifm-background-surface-color:#242526;--ifm-hover-overlay:#ffffff0d;--ifm-color-content:#e3e3e3;--ifm-color-content-secondary:#fff;--ifm-breadcrumb-separator-filter:invert(64%) sepia(11%) saturate(0%) hue-rotate(149deg) brightness(99%) contrast(95%);--ifm-code-background:#ffffff1a;--ifm-scrollbar-track-background-color:#444;--ifm-scrollbar-thumb-background-color:#686868;--ifm-scrollbar-thumb-hover-background-color:#7a7a7a;--ifm-table-stripe-background:#ffffff12;--ifm-toc-border-color:var(--ifm-color-emphasis-200);--ifm-color-primary-contrast-background:#102445;--ifm-color-primary-contrast-foreground:#ebf2fc;--ifm-color-secondary-contrast-background:#474748;--ifm-color-secondary-contrast-foreground:#fdfdfe;--ifm-color-success-contrast-background:#003100;--ifm-color-success-contrast-foreground:#e6f6e6;--ifm-color-info-contrast-background:#193c47;--ifm-color-info-contrast-foreground:#eef9fd;--ifm-color-warning-contrast-background:#4d3800;--ifm-color-warning-contrast-foreground:#fff8e6;--ifm-color-danger-contrast-background:#4b1113;--ifm-color-danger-contrast-foreground:#ffebec}:root{--docusaurus-progress-bar-color:var(--ifm-color-primary);--content-padding:16px;--container-max-width:1440px;--pagination-nav-margin-top:96px;--ifm-navbar-link-color:rgb(var(--lsd-text-primary));--ifm-navbar-background-color:rgb(var(--lsd-surface-primary));--ifm-dropdown-link-color:rgb(var(--lsd-text-primary));--ifm-dropdown-hover-background-color:rgb(var(--lsd-surface-secondary));--ifm-background-color:rgb(var(--lsd-surface-primary));--ifm-color-white:#fff;--ifm-color-black:#000;--ifm-color-primary:rgb(var(--lsd-theme-primary));--ifm-color-secondary:rgb(var(--lsd-theme-secondary));--prism-background-color:rgb(var(--lsd-theme-primary));--ifm-color-gray-0:rgb(var(--lsd-surface-primary));--ifm-color-gray-100:#f8f8fa;--ifm-color-gray-200:#f7f7f8;--ifm-color-gray-300:#eeeef0;--ifm-color-gray-400:#c0c0c2;--ifm-color-gray-500:#828285;--ifm-color-gray-600:#474747;--ifm-color-gray-700:#373738;--ifm-color-gray-800:#1a1a1a;--ifm-color-gray-900:#151515;--ifm-color-gray-1000:var(--ifm-color-black);--ifm-font-color-base-inverse:var(--ifm-color-black);--docusaurus-highlighted-code-line-bg:#ffffff26;--ifm-card-background-color:var(--ifm-color-gray-100)!important;--ifm-alert-foreground-color:var(--ifm-color-gray-700);--ifm-button-background-color:rgb(var(--lsd-surface-primary));--ifm-global-border-width:1px;--ifm-global-radius:0.6rem;--ifm-hover-overlay:#0000000d;--ifm-font-family-base:var(--lsd-typography-generic-font-family);--ifm-font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--lgs-font-family-secondary:Georgia,"Times New Roman",serif;--ifm-font-size-base:100%;--ifm-line-height-base:1.625;--ifm-font-size-secondary:100%;--ifm-line-height-secondary:1.5;--ifm-font-weight-light:400;--ifm-font-weight-normal:400;--ifm-font-weight-semibold:400;--ifm-font-weight-bold:600;--ifm-font-weight-base:var(--ifm-font-weight-normal);--ifm-h1-font-size:var(--lsd-h1-fontSize);--ifm-h2-font-size:var(--lsd-h2-fontSize);--ifm-h3-font-size:var(--lsd-h3-fontSize);--ifm-h4-font-size:var(--lsd-h4-fontSize);--ifm-h5-font-size:var(--lsd-h5-fontSize);--ifm-h6-font-size:var(--lsd-h6-fontSize);--ifm-global-spacing:24px;--ifm-spacing-vertical:var(--ifm-global-spacing);--ifm-spacing-horizontal:0.75rem;--ifm-image-alignment-padding:1.25rem;--ifm-leading-desktop:1.25;--ifm-leading:24px;--ifm-list-left-padding:2rem;--ifm-list-margin:1rem;--ifm-list-item-margin:0.25rem;--ifm-list-paragraph-margin:1rem;--ifm-code-background:rgb(var(--lsd-text-primary))!important;--ifm-pre-padding:1.125rem;--ifm-link-decoration:none;--ifm-paragraph-margin-bottom:var(--ifm-leading);--ifm-blockquote-border-left-width:3px;--ifm-blockquote-shadow:none;--ifm-hr-height:1px;--ifm-hr-margin-vertical:1.5rem;--ifm-scrollbar-size:7px;--ifm-alert-border-width:0px;--ifm-alert-border-left-width:5px;--ifm-alert-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-alert-padding-vertical:var(--ifm-spacing-vertical);--ifm-alert-shadow:var(--ifm-global-shadow-lw);--ifm-avatar-intro-margin:1rem;--ifm-avatar-intro-alignment:inherit;--ifm-avatar-photo-size:3rem;--ifm-badge-border-radius:var(--ifm-global-radius);--ifm-badge-border-width:var(--ifm-global-border-width);--ifm-badge-padding-horizontal:calc(var(--ifm-spacing-horizontal)*0.5);--ifm-badge-padding-vertical:calc(var(--ifm-spacing-vertical)*0.25);--ifm-breadcrumb-border-radius:1.5rem;--ifm-breadcrumb-spacing:0.5rem;--ifm-breadcrumb-padding-horizontal:0.8rem;--ifm-breadcrumb-padding-vertical:0.4rem;--ifm-breadcrumb-size-multiplier:1;--ifm-breadcrumb-separator:url('data:image/svg+xml;utf8,');--ifm-breadcrumb-separator-filter:none;--ifm-breadcrumb-separator-size:0.5rem;--ifm-breadcrumb-separator-size-multiplier:1.25;--ifm-button-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-button-padding-vertical:0.4rem;--ifm-button-size-multiplier:1;--ifm-button-transition-duration:var(--ifm-transition-fast);--ifm-button-border-radius:calc(var(--ifm-global-radius)*var(--ifm-button-size-multiplier));--ifm-button-group-spacing:2px;--ifm-card-border-radius:var(--ifm-global-radius);--ifm-card-horizontal-spacing:var(--ifm-spacing-horizontal);--ifm-card-vertical-spacing:var(--ifm-spacing-vertical);--ifm-footer-link-horizontal-spacing:0;--ifm-footer-padding-horizontal:var(--content-padding);--ifm-footer-padding-vertical:24px;--ifm-footer-background-color:#0000;--ifm-menu-link-padding-horizontal:var(--ifm-spacing-horizontal);--ifm-menu-link-padding-vertical:0.375rem;--ifm-menu-color:rgb(var(--lsd-text-primary),0.6);--ifm-menu-color-background-active:#0000;--ifm-menu-link-sublist-icon:url('data:image/svg+xml;utf8,');--ifm-menu-link-sublist-icon-filter:none;--ifm-navbar-height:64px;--ifm-navbar-item-padding-horizontal:0.75rem;--ifm-navbar-item-padding-vertical:0.25rem;--ifm-navbar-padding-horizontal:var(--content-padding);--ifm-navbar-padding-vertical:calc(var(--ifm-spacing-vertical)/3);--ifm-navbar-shadow:var(--ifm-global-shadow-lw);--ifm-navbar-search-input-icon:url('data:image/svg+xml;utf8,');--ifm-navbar-sidebar-width:100vw;--ifm-pagination-border-radius:var(--ifm-global-radius);--ifm-pagination-font-size:1rem;--ifm-pagination-item-active-background:var(--ifm-hover-overlay);--ifm-pagination-page-spacing:0.2em;--ifm-pagination-padding-horizontal:calc(var(--ifm-spacing-horizontal)*1);--ifm-pagination-padding-vertical:calc(var(--ifm-spacing-vertical)*0.25);--ifm-pagination-nav-border-radius:var(--ifm-global-radius);--ifm-pills-spacing:0.125rem;--ifm-tabs-padding-horizontal:1rem;--ifm-tabs-padding-vertical:1rem;--ifm-alert-background-color:#0000;--ifm-color-secondary-contrast-background:#0000;--ifm-color-success-contrast-background:#0000;--ifm-color-info-contrast-background:#0000;--ifm-color-warning-contrast-background:#0000;--ifm-color-danger-contrast-background:#0000;--ifm-alert-border-radius:none;--note:#d4d5d8;--tip:#6ace4b;--caution:#ddaa39;--danger:#e46967;--info:#68b1d0;--ifm-color-secondary-dark:var(--note);--ifm-color-secondary-light:var(--note);--ifm-color-success-dark:var(--tip);--ifm-color-success-light:var(--tip);--ifm-color-info-dark:var(--info);--ifm-color-info-light:var(--info);--ifm-color-warning-dark:var(--caution);--ifm-color-warning-light:var(--caution);--ifm-color-danger-dark:var(--danger);--ifm-color-danger-light:var(--danger);--ifm-footer-link-color:rgb(var(--lsd-text-primary));--ifm-color-content-secondary:rgb(var(--lsd-text-primary));--ifm-hero-background-color:#f8f8fa;--ifm-hero-text-color:var(--ifm-color-black);--desktop-footer-gap:200px;--mobile-footer-gap:144px;--doc-grid-gap:16px;--ifm-table-background:#0000!important;--ifm-table-stripe-background:#0000!important;--ifm-pre-border-radius:0!important;--ifm-code-font-size:100%!important;background-color:#0000!important;--doc-sidebar-width:16.66vw!important;--doc-sidebar-max-width:320px;--ifm-navbar-item-padding-vertical:8px;--ifm-navbar-item-padding-horizontal:12px;--docusaurus-announcement-bar-height:auto;--doc-sidebar-width:300px;--doc-sidebar-hidden-width:30px;--card-height:188px;--mobile-width:253px;--mobile-height:176px;--docusaurus-tag-list-border:var(--ifm-color-emphasis-300)}#nprogress .bar{background:var(--docusaurus-progress-bar-color);height:2px;left:0;position:fixed;top:0;width:100%;z-index:1031}#nprogress .peg{box-shadow:0 0 10px var(--docusaurus-progress-bar-color),0 0 5px var(--docusaurus-progress-bar-color);height:100%;opacity:1;position:absolute;right:0;transform:rotate(3deg) translateY(-4px);width:100px}body,html{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;height:100%}.theme-admonition,table{overflow:auto}main{min-height:calc(100vh - var(--ifm-navbar-height) - var(--content-padding))}svg *{fill:rgb(var(--lsd-text-primary))}.grid{display:grid;gap:16px}.table-of-contents__link--active,.table-of-contents__link:hover{color:rgb(var(--lsd-text-primary))!important}h1{font-size:var(--lsd-h1-fontSize);font-weight:var(--lsd-h1-fontWeight);line-height:var(--lsd-h1-lineHeight)}h1,h2,h3,h4{font-family:var(--lsd-typography-generic-font-family)}h2{font-size:var(--lsd-h2-fontSize);font-weight:var(--lsd-h2-fontWeight);line-height:var(--lsd-h2-lineHeight)}h3{font-size:var(--lsd-h3-fontSize);font-weight:var(--lsd-h3-fontWeight);line-height:var(--lsd-h3-lineHeight)}h4{font-size:var(--lsd-h4-fontSize);font-weight:var(--lsd-h4-fontWeight);line-height:var(--lsd-h4-lineHeight)}code{background:#ffffff26;border-radius:0;color:rgb(var(--lsd-text-primary));background:rgba(var(--lsd-theme-primary),.1)}pre code{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-body1-fontSize)!important;font-weight:var(--lsd-body1-fontWeight)!important;line-height:var(--lsd-body1-lineHeight)!important}.theme-doc-footer-edit-meta-row div[class*=lastUpdated_] b,table td strong,table th{font-weight:400!important}.alert,.card,.theme-code-block{box-shadow:none!important}.menu__list-item-collapsible>a,.navbar{box-shadow:none}.sans{font-family:var(--lgs-font-family-secondary);line-height:var(--ifm-line-height-secondary)}.blog-wrapper--index [itemprop=blogPost] .markdown,.external-link,.footer__bottom,.footer__copyright,.menu__list-item .menu__link,.theme-doc-footer-edit-meta-row div[class*=lastUpdated_],table{font-size:var(--lsd-body2-fontSize)!important;font-weight:var(--lsd-body2-fontWeight)!important;line-height:var(--lsd-body2-lineHeight)!important;font-family:var(--lsd-typography-generic-font-family)!important}.markdown a,.markdown a:hover,article a,article a:hover{text-decoration:underline;text-decoration-color:inherit;text-decoration-color:rgba(var(--lsd-text-primary),.6);text-underline-offset:.3rem}.card{padding:var(--ifm-spacing-vertical) var(--ifm-spacing-horizontal);padding:var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal)}.table-of-contents__left-border{border:none!important}.menu__link{align-items:center;display:flex;justify-content:space-between}.menu__link span{color:rgba(var(--lsd-text-primary),.6)}.theme-edit-this-page{align-items:center;display:flex;gap:12px}.menuExternalLink_NmtK,.navbar__inner,.navbar__item{align-items:center}.dropdown__link:hover{color:rgb(var(--lsd-text-secondary))}.blog-wrapper>.container>.row>aside>nav>ul li a:hover,.breadcrumbs__item--active,.footerLogoLink_BH7S:hover,.hash-link:focus,.theme-code-block:hover .buttonGroup_Qu4e button,:hover>.hash-link{opacity:1}.breadcrumbs__item:not(:last-child):after{background:none;content:"/";opacity:1}.breadcrumbs__link{background:#0000!important;padding:0}.navbar{height:var(--ifm-navbar-height)}.menu__link--active,.menu__list-item-collapsible--active>:not(a[href="#"]){box-shadow:inset 1px 0 0 rgb(var(--lsd-border-primary))}.navbar__logo,a[class^=sidebarLogo_] img{height:40px}.header-github-link:hover{opacity:.6}.header-github-link:before{background:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat;content:"";display:flex;height:16px;width:16px}html[data-theme=dark] .header-github-link:before{background:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='%23fff' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") no-repeat}.dropdown__link--active{color:rgb(var(--lsd-text-secondary))!important;background-color:#0000}.row{margin:0}.theme-doc-version-banner *,.theme-doc-version-banner:hover *{color:#fff;text-decoration-color:#fff!important}.blog-wrapper main>section article ul li .lsd-tag>span,.blog-wrapper main>section article ul li .lsd-tag>span>span,.menu__link--active span,.theme-doc-version-badge{color:rgb(var(--lsd-text-primary))}.navbar__left-items>a:not(:last-child){margin-right:1.25rem}.navbar__left-items>a:not(:last-child)[href^="/"]{margin-right:1.75rem}.menu__link--sublist-caret:after{background:none;content:url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTYiIGhlaWdodD0iMTYiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0ibTQgOS41My45NC45NEw4IDcuNDE3bDMuMDYgMy4wNTMuOTQtLjk0LTQtNC00IDRaIiBmaWxsPSIjZmZmIi8+PC9zdmc+);filter:unset;height:unset;min-width:unset;width:unset}.menu__list-item--collapsed>.menu__link--sublist-caret:after{transform:rotate(0)!important}.menu__link--active{border-radius:0}.dropdown__menu,.pagination-nav__link{border:1px solid rgb(var(--lsd-border-primary))}.table-of-contents a{padding-left:16px}.table-of-contents__link--active{border-left:1px solid rgb(var(--lsd-border-primary));padding-left:16px}.navbar__item{display:inline-flex;padding:0}.mdx-accordion-item,.navbar__link--active{border-bottom:1px solid rgb(var(--lsd-border-primary))}.dropdown__menu{background:rgb(var(--lsd-surface-primary));border-radius:0;margin-right:-1px;margin-top:6px;padding:0;transition-duration:0ms}.dropdown__link{background-color:#0000;border-radius:0;margin:0;padding:5px 11px;text-decoration:none}.dropdown__link *{text-decoration:inherit!important}.dropdown__link:hover{background-color:#0000}.margin-left-8,.mdx-jpd__external-link-icon{margin-left:8px}.pagination-nav{margin-top:var(--pagination-nav-margin-top)!important}.pagination-nav__link{align-items:center;border-radius:0;display:flex;gap:18px;padding:10px 14px}.theme-doc-version-badge,table td,table th,table thead>tr{border:1px solid rgb(var(--lsd-border-primary))}.pagination-nav__link--next{grid-column:2/3;text-align:right;justify-content:flex-end}.footer__item a svg,.menu__caret,.pagination-nav__label:after,.pagination-nav__label:before,.theme-doc-footer-edit-meta-row div[class*=lastUpdated_] small{display:none}.theme-back-to-top-button{display:none!important}.authors_dZ4g,.container_PkUo>div,.footer__link-item,.tag_NdBD{display:inline-block}.lsd-collapse__content{padding:24px}.theme-doc-version-badge{background:#0000;margin-bottom:16px;padding:4px 8px}.navbar__right-items{align-items:center;display:flex}table{border-collapse:collapse;margin-bottom:var(--ifm-spacing-vertical);display:table;table-layout:fixed;width:100%}table th{text-align:left}table td{color:var(--ifm-table-cell-color)}.theme-doc-footer{margin-bottom:96px}.theme-doc-footer-edit-meta-row div[class*=lastUpdated_]{font-style:normal;opacity:.7}.navbar-sidebar__item{padding-block:0;padding-inline:var(--content-padding)}.navbar-sidebar__back{align-items:center;display:flex;gap:16px;margin:24px 0!important;padding:0;width:100%!important}.theme-doc-sidebar-menu .menu__link{padding-left:var(--content-padding);padding-right:0}.menu__list-item-collapsible>a[aria-expanded=true]>svg{transform:rotate(180deg)}main[itemtype*=Blog]{margin-left:0}.mdx-ghc-subheader-text,main[itemtype*=Blog] .theme-doc-breadcrumbs{margin-bottom:40px}main[itemtype*=Blog] .blog-divider{background:rgb(var(--lsd-theme-primary));margin-block:40px}.container{padding-inline:var(--content-padding)}.main-wrapper>.container{margin-bottom:80px!important;margin-top:0!important}.main-wrapper>main,.main-wrapper>main>.row,.main-wrapper>main>.row>.col{--ifm-spacing-horizontal:var(--content-padding);flex-basis:100%!important;flex:unset;margin:0;max-width:unset;width:100%!important}.footer{margin-bottom:18px;width:100%!important}.footer>div{display:grid;gap:1rem;margin:unset;max-width:unset;padding:16px 0}.footer__bottom{grid-column:1/12;grid-row:1/1;text-align:unset}#__docusaurus-base-url-issue-banner-container,.blog-archive-page .main-wrapper header .container p,.blog-wrapper main>section article ul li a:after,.blog-wrapper main>section article ul li a:before,.docSidebarContainer_b6E3,.footer__bottom>div:first-of-type,.groups_p1lF .divider_eGUz:last-child,.mdx-accordion-item>input,.modal_kLVz .closeButton_Rr0e,.sidebarLogo_Ydl9,.themedImage_ToTc,.wrapper_SWrM .fullscreenButton_Bocn,[data-hidden-doc-sidebar=true] .theme-doc-sidebar-container,[data-theme=dark] .lightToggleIcon_K4TL,[data-theme=light] .darkToggleIcon_lKkA,div[class^=blogHeader] span,html[data-announcement-bar-initially-dismissed=true] .announcementBar_mb4j{display:none}.footer__title{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-label1-fontSize)!important;font-weight:var(--lsd-label1-fontWeight)!important;font-weight:var(--ifm-font-weight-normal);line-height:var(--lsd-label1-lineHeight)!important;margin-bottom:4px}.blog-wrapper main>section article ul li .lsd-tag>span,.blog-wrapper main>section article ul li .lsd-tag>span>span,.footer__item,.footer__item a{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-label1-fontSize)!important;font-weight:var(--lsd-label1-fontWeight)!important;line-height:var(--lsd-label1-lineHeight)!important}.footer>div:first-child>.footer__links{display:grid;gap:12px;grid-column:13/25;grid-row:1/1;margin-bottom:0}.footer>div:nth-child(2)>.footer__links{display:grid;gap:2rem 1rem;grid-column:13/25;grid-row:1/1;grid-template-columns:repeat(2,1fr);margin-bottom:0}.footer__col{grid-template-columns:span 1}.blog-wrapper main>section article ul li a,.codeBlockStandalone_BRZX,.codeBlockStandalone_MEMb,.col.footer__col,.modal_kLVz>div>div:first-child,.noResults_mD2O,.root_N57j ul,.root_ONDG.empty_a7qb{padding:0}.footer__items{display:flex;flex-direction:row;flex-wrap:wrap;gap:.5rem}.footer__item{display:inline-flex}.footer__item:not(:last-child):after{content:"•";display:inline-block;margin-left:8px;text-decoration:none}.footer__logo{max-width:var(--ifm-footer-logo-max-width);margin:0!important}.navbar__inner{flex-direction:row}.hero--primary{--ifm-hero-background-color:rgb(var(--lsd-surface-primary));--ifm-hero-text-color:rgb(var(--lsd-text-primary))}.docusaurus-mermaid-container{margin-block:40px}.overflow-hidden{overflow:hidden}.hidden-scrollbar{-ms-overflow-style:none;scrollbar-width:none}.hidden-scrollbar::-webkit-scrollbar{display:none;width:0}[class^=mdx-]{--lsd-h1-fontWeight:100;--lsd-h2-fontWeight:300;--lsd-h3-fontWeight:300;--lsd-h4-fontWeight:300;--lsd-h5-fontWeight:300;--lsd-h6-fontWeight:300;--lsd-subtitle1-fontWeight:300}.hidden,.mdx-hero-video .mdx-hero-video__placeholder{opacity:0;visibility:hidden}.blog-wrapper h1[itemprop=headline]{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h1-fontSize)!important;font-weight:var(--lsd-h1-fontWeight)!important;line-height:var(--lsd-h1-lineHeight)!important}.blog-wrapper>.container{display:flex;margin:0;max-width:unset;padding-inline:0;width:100%}.blog-wrapper>.container>.row,.mdx-roadmap,.mdx-scroll-buttons{width:100%}.blog-wrapper main>header h1{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h3-fontSize)!important;font-weight:var(--lsd-h3-fontWeight)!important;line-height:var(--lsd-h3-lineHeight)!important;margin-bottom:.5rem}.blog-wrapper>.container>.row>aside{max-width:unset}.blog-wrapper>.container>.row>aside>nav{padding-top:16px;top:var(--ifm-navbar-height)}.blog-wrapper>.container>.row>aside>nav>div:first-child{font-family:var(--lsd-typography-generic-font-family)!important;margin-bottom:0!important;padding:4px 0 4px 15px}.blog-wrapper>.container>.row>aside>nav>ul li a{box-sizing:border-box;display:block;font-family:var(--lsd-typography-generic-font-family)!important;opacity:.6;padding:4px 0 4px 2rem}.blog-wrapper--index>.container>.row>aside>nav>div:first-child,.blog-wrapper>.container>.row>aside>nav>ul li a[class*=sidebarItemLinkActive_]{border-color:rgb(var(--lsd-border-primary))}.blog-wrapper main article,.mdx-asset-card:not(.mdx-asset-card--downloadable) .mdx-asset-card__inner,.mdx-erc--with-preview .mdx-erc__preview-image{border-bottom:1px solid rgb(var(--lsd-border-primary))}.blog-wrapper--index h2[itemprop=headline]{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h4-fontSize)!important;font-weight:var(--lsd-h4-fontWeight)!important;line-height:var(--lsd-h4-lineHeight)!important}.blog-wrapper .breadcrumbs__item,.blog-wrapper--index h2[itemprop=headline]>a,.sidebarItemLink_mo7H:hover{text-decoration:none}.blog-wrapper>.container>.row>main{margin-top:16px;max-width:unset}.blog-archive-page .main-wrapper header .container h1,.blog-wrapper article header h2,.blog-wrapper main>section article h2{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h3-fontSize)!important;font-weight:var(--lsd-h3-fontWeight)!important;line-height:var(--lsd-h3-lineHeight)!important}.blog-wrapper article footer{align-items:center;display:flex;flex-direction:row;justify-content:space-between;margin-top:24px!important}.blog-wrapper main article{margin-bottom:34px!important;padding-bottom:34px}.blog-wrapper main article:last-of-type{border-bottom:none!important}.blog-archive-page .main-wrapper{padding:2rem var(--content-padding)}.blog-archive-page .main-wrapper header{border-bottom:1px solid var(--ifm-hero-text-color);margin:0;padding:0 0 1rem}.blog-archive-page .main-wrapper header .container{margin:0;padding:0!important}.blog-archive-page .main-wrapper main>section .container{margin:0;max-width:unset;padding:0}.blog-archive-page .main-wrapper main>section .container>.row{margin-top:64px}.blog-archive-page .main-wrapper main>section .container>.row>.col{margin:0!important;max-width:unset;padding:0}.blog-archive-page .main-wrapper main>section .container>.row>.col h3{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h5-fontSize)!important;font-weight:var(--lsd-h5-fontWeight)!important;line-height:var(--lsd-h5-lineHeight)!important}.blog-archive-page .main-wrapper main>section .container>.row>.col a{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-body1-fontSize)!important;font-weight:var(--lsd-body1-fontWeight)!important;line-height:var(--lsd-body1-lineHeight)!important}.blog-archive-page .main-wrapper main>section .container>.row>.col ul{margin:1rem 0 0;padding:0 0 0 1.5rem}.blog-wrapper main>section article{border-bottom:none;padding:0}.blog-wrapper main>section article ul,.mdx-app-card__name{margin-top:1rem}.nav_cMpg,.root_QACb{margin:0 auto;max-width:var(--container-max-width)}.blog-wrapper main>section article ul li{margin:0 .5rem 0 0!important;padding:0!important}.blog-wrapper main>section article ul li .lsd-tag{height:28px;padding:3px 11px}.errorBoundaryError_a6uf{color:red;white-space:pre-wrap}body:not(.navigation-with-keyboard) :not(input):focus{outline:0}[data-theme=dark] .themedImage--dark_i4oU,[data-theme=light] .themedImage--light_HNdA{display:initial}.icon_S7Kx.s_AZDZ svg{width:10px}.icon_S7Kx.m_thRi svg{width:20px}.icon_S7Kx.l_WHPt svg{width:40px}[data-theme=dark] .icon_S7Kx.fill_hNhN,[data-theme=dark] .icon_S7Kx.fill_hNhN *{fill:var(--ifm-color-white)}[data-theme=dark] .icon_S7Kx.stroke_N8dm svg,[data-theme=dark] .icon_S7Kx.stroke_N8dm svg *{stroke:var(--ifm-color-white)}[data-theme=light] .icon_S7Kx.fill_hNhN,[data-theme=light] .icon_S7Kx.fill_hNhN *{fill:var(--ifm-color-black)}[data-theme=light] .icon_S7Kx.stroke_N8dm svg,[data-theme=light] .icon_S7Kx.stroke_N8dm svg *{stroke:var(--ifm-color-black)}.backdrop_Ifvc{background:rgb(var(--lsd-surface-primary));z-index:201}.backdrop_Ifvc,.navWrapper_ybYI{height:100%;left:0;opacity:0;position:fixed;top:0;visibility:hidden;width:100%}.navWrapper_ybYI{background:#0000;z-index:203}.navWrapper_ybYI>*{pointer-events:auto}.nav_cMpg{align-items:center;background:rgb(var(--lsd-surface-primary));display:flex;flex-direction:row;height:var(--ifm-navbar-height);justify-content:space-between;padding:var(--ifm-navbar-padding-vertical) var(--ifm-navbar-padding-horizontal)}.visible_ynAX{opacity:1!important;visibility:visible!important}.codeBlockContent_ugSV,.wrapper_SWrM{position:relative}.wrapper_SWrM:not(.active_qZD5) .fullscreenButton_Bocn{background:rgb(var(--lsd-surface-primary));bottom:8px;position:absolute;right:8px;transition:.3s}.root_QACb{padding:0;width:100%}.skipToContent_fXgn{background-color:var(--ifm-background-surface-color);color:var(--ifm-color-emphasis-900);left:100%;padding:calc(var(--ifm-global-spacing)/2) var(--ifm-global-spacing);position:fixed;top:1rem;z-index:calc(var(--ifm-z-index-fixed) + 1)}.skipToContent_fXgn:focus{box-shadow:var(--ifm-global-shadow-md);left:1rem}.closeButton_CVFx{line-height:0;padding:0}.content_knG7{font-size:85%;padding:5px 0;text-align:center}.content_knG7 a{color:inherit}.announcementBar_mb4j{align-items:center;background-color:var(--ifm-color-white);border-bottom:1px solid var(--ifm-color-emphasis-100);color:var(--ifm-color-black);display:flex;height:var(--docusaurus-announcement-bar-height)}.announcementBarPlaceholder_vyr4{flex:0 0 10px}.announcementBarClose_gvF7{align-self:stretch;flex:0 0 30px}.iconExternalLink_nPIU{margin-left:.3rem}.footerLogoLink_BH7S{opacity:.5;transition:opacity var(--ifm-transition-fast) var(--ifm-transition-timing-default)}.firstRow_ar1q{border-top:1px solid rgb(var(--lsd-text-primary));margin-bottom:96px!important}.mdx-cta-section,.mdx-cta-section--list .mdx-cta-section__list>div:not(:first-child),.mdx-feature-list__feature,.mdx-showcase-card,.secondRow__ww3{border-top:1px solid rgb(var(--lsd-border-primary))}.firstRow_ar1q,.secondRow__ww3{display:flex;flex-direction:row}.backToTop_wDfN{bottom:16px;left:16px;position:absolute;width:-moz-fit-content;width:fit-content}.l-modal,.l-modal__backdrop{position:fixed;top:0;left:0}.toggle_K23S{border:1px solid rgb(var(--lsd-border-primary));height:32px;width:32px}.toggle_K23S svg{height:14px;width:14px}.toggleButton_dl49{align-items:center;border-radius:50%;display:flex;height:100%;justify-content:center;transition:background var(--ifm-transition-fast);width:100%}.dropdownNavbarItem_o23I,.l-modal__content{border:1px solid rgb(var(--lsd-border-primary))}.toggleButton_dl49:hover{background:var(--ifm-color-emphasis-200)}.toggleButtonDisabled_AAS_{cursor:not-allowed}.darkNavbarColorModeToggle_X3D1:hover{background:var(--ifm-color-gray-800)}.l-modal{height:100vh;opacity:0;transition:.3s;visibility:hidden;width:100%;z-index:9999}.l-modal__container{display:grid;gap:1rem;grid-template-columns:repeat(24,1fr);margin:0 auto;max-width:1376px;overflow:auto}.l-modal--open{opacity:1;visibility:visible}.l-modal__content{background:rgb(var(--lsd-surface-primary));grid-column:8/19;height:-moz-fit-content;height:fit-content;margin:64px 0;max-height:80vh;padding:7px;z-index:9998}.groups_p1lF,.root_ONDG{max-height:60vh;overflow-y:auto}.l-modal__backdrop{background-color:rgba(var(--lsd-surface-primary),.6);height:100%;width:100%;z-index:9997}.modal_kLVz .header_QwCa{align-items:center;display:flex;flex-direction:row;gap:.5rem}.root_EFVO{padding:32px;text-align:center}.root_ONDG{padding:24px}.title_uwS_{font-size:.75rem!important}.item_RHYF{align-items:center;display:flex;flex-direction:row;margin-top:1rem}.item_RHYF .itemTitle_jtAv{flex-grow:1;overflow:hidden;padding-left:1rem;text-overflow:ellipsis;white-space:nowrap}.linkContent__x3v,.root_JWD1{align-items:center;display:flex}.item_RHYF .itemTitle_jtAv mark,.root_JWD1 mark{background:none;color:inherit}.root_xZfc{padding:8px 8px 0;position:relative;width:100%}.collapse_LWBd button,.textField_af43{width:100%!important}.textField_af43 button svg{height:auto;max-height:100%!important;width:14px}.root_N57j>div:first-child{color:#828285;padding:0 24px}.root_N57j>div:first-child,.root_N57j>div:first-child *{font-size:.75rem}.root_N57j ul li{margin-top:1rem}.root_JWD1{padding:0 24px}.root_JWD1>div{margin-left:1rem}.mdx-cta-button:hover,.mdx-cta-button:hover *,.root_Ooa0{text-decoration:none!important}.root_Ooa0.level1_fpUf{display:block;padding-left:32px}.icon_sZn2{height:auto;width:16px}.icon_sZn2.fill_fkan,.icon_sZn2.fill_fkan *{fill:currentColor}.icon_sZn2.stroke_j3wH,.icon_sZn2.stroke_j3wH *{stroke:currentColor}[data-theme=light] .icon_sZn2{color:#000}[data-theme=dark] .icon_sZn2{color:#fff}.mdx-feature-list__title,.root_ltHz{padding:24px 0}.groups_p1lF{display:flex;flex-direction:column;gap:16px}[data-theme=dark] .divider_eGUz>div{opacity:.1}[data-theme=dark] .topBar_Dtew button{background-color:var(--ifm-button-color)}.linkContent__x3v{gap:6px;text-decoration:none}.linkContent__x3v:hover{text-decoration:underline;text-decoration-color:rgba(var(--lsd-text-primary),.6);text-underline-offset:.3rem}.externalLinkIcon_Qfue{width:8px}.externalLinkIcon_Qfue svg,.root_TTsz.fullWidth_HBtH{height:auto;width:100%}.dropdownNavbarItem_o23I{height:28px;padding:6px 12px}.navbarHideable_OoEf{transition:transform var(--ifm-transition-fast) ease}.navbarHidden_zoxl{transform:translate3d(0,calc(-100% - 2px),0)}.rightSection_CLeF{align-items:center;display:flex;justify-content:flex-end;white-space:nowrap}.iconButtonGroup_ktNv button:not(:last-child){border-right:1px solid!important;margin-right:-1px!important}.mainWrapper_z2l0{display:flex;flex:1 0 auto;flex-direction:column}.docusaurus-mt-lg{margin-top:3rem}#__docusaurus{display:flex;flex-direction:column;min-height:100%}.backToTopButton_sjWU{background-color:var(--ifm-color-emphasis-200);border-radius:50%;bottom:1.3rem;box-shadow:var(--ifm-global-shadow-lw);height:3rem;opacity:0;position:fixed;right:1.3rem;transform:scale(0);transition:all var(--ifm-transition-fast) var(--ifm-transition-timing-default);visibility:hidden;width:3rem;z-index:calc(var(--ifm-z-index-fixed) - 1)}.backToTopButton_sjWU:after{background-color:var(--ifm-color-emphasis-1000);content:" ";display:inline-block;height:100%;-webkit-mask:var(--ifm-menu-link-sublist-icon) 50%/2rem 2rem no-repeat;mask:var(--ifm-menu-link-sublist-icon) 50%/2rem 2rem no-repeat;width:100%}.backToTopButtonShow_xfvO{opacity:1;transform:scale(1);visibility:visible}.docMainContainer_gTbr,.docPage__5DB{display:flex;width:100%}.docPage__5DB{flex:1 0}.docsWrapper_BCFX{display:flex;flex:1 0 auto}.root_jG9K{align-items:center;display:flex;flex-direction:column;padding:1rem!important;width:100%}.root_jG9K>div{max-width:796px!important;width:100%}.root_jG9K>div>div{align-items:flex-start;display:flex;flex-direction:column;gap:1rem;padding:0!important}.root_jG9K a{margin-top:2.5rem}.mdx-accordion-item__header{align-items:center;display:flex;flex-direction:row;gap:0 16px;justify-content:space-between;min-height:88px;padding:16px 0}.mdx-asset-card,.mdx-erc{min-height:144px;text-decoration:none!important}.mdx-accordion-item__header,.mdx-accordion-item__header *{cursor:pointer}.mdx-accordion-item__content-wrapper{height:auto;overflow:hidden}.mdx-accordion-item__content{height:0}.mdx-accordion-item--open .mdx-accordion-item__content{height:auto;padding-bottom:1.875rem;padding-top:6px}.mdx-app-card{align-items:flex-start;display:flex;flex-direction:column;padding:1.5rem 0}.mdx-app-card__logo{height:40px;width:auto}.mdx-app-card__description{margin-top:1.5rem}.mdx-app-card__link{margin-top:1.5rem;width:100%}.mdx-app-card__link button{padding:10px 16px;width:100%}.mdx-app-card__link button>span{align-items:center;display:block;display:flex;flex-direction:row;justify-content:space-between;width:100%}.root_TTsz{display:inline-block;font-size:0!important;position:relative}.root_TTsz.fullHeight_ls1u{height:100%;width:auto}.root_TTsz .content_feMR{height:100%;left:0;position:absolute;top:0;width:100%}.root_TTsz:not(.keep_Y9Ea) .root_TTsz{height:auto;width:auto}.root_TTsz:not(.keep_Y9Ea) .content_feMR{height:100%;left:unset;position:relative;top:unset;width:100%}.mdx-asset-card{align-items:flex-start;border:1px solid rgb(var(--lsd-border-primary));border-bottom:none;display:flex;flex-direction:column;width:216px}.mdx-asset-card__inner{padding:16px 16px 32px;width:100%}.mdx-asset-card__title,.mdx-ghc__challenge-labels,.mdx-ghc__issue-title{margin-bottom:16px}.mdx-asset-card__image img{height:100%;object-fit:contain;object-position:center center;width:100%}.mdx-asset-card__downloadables{align-items:flex-start;display:flex;flex-direction:row;justify-content:stretch;width:100%}.mdx-asset-card__downloadables>*{flex-grow:1;font-size:12px;text-decoration:none!important;text-underline-offset:unset!important}.mdx-asset-card__downloadables>* button{justify-content:space-between;text-decoration:none;width:100%}.mdx-asset-card__downloadables>:first-child button{border-left:none}.mdx-asset-card__downloadables>:last-child button,.mdx-asset-card__downloadables>:not(:last-child) button{border-right:none}.mdx-box{padding-bottom:var(--mdx-box-bottom);padding-top:var(--mdx-box-top)}.mdx-cta-button,.mdx-section-header--with-description .mdx-section-header__extra{margin-top:2rem}.mdx-cta-button button,.mdx-cta-button button span{font-size:inherit;font-weight:inherit;line-height:inherit}.mdx-cta-button .lsd-button--large{padding:9px 39px}.mdx-cta-button .lsd-button--small{padding:5px 11px}.mdx-cta-section{display:grid;padding:24px 0 0}.mdx-cta-section .mdx-cta-section__link,.mdx-jpd__job-link{display:block;width:-moz-fit-content;width:fit-content}.mdx-cta-section--title-only .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h5-fontSize);font-weight:var(--lsd-h5-fontWeight);font-weight:300;line-height:var(--lsd-h5-lineHeight);max-width:886px}.mdx-cta-section--title-button .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-display4-fontSize);font-weight:var(--lsd-display4-fontWeight);font-weight:300;line-height:var(--lsd-display4-lineHeight);max-width:886px}.mdx-cta-section--full-width .mdx-cta-section__container{display:grid;gap:0 1rem;grid-template-columns:repeat(2,1fr)}.mdx-cta-section--full-width .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h5-fontSize);font-weight:var(--lsd-h5-fontWeight);line-height:var(--lsd-h5-lineHeight)}.mdx-cta-section--full-width .mdx-cta-section__description,.mdx-ns__description{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h3-fontSize);font-weight:var(--lsd-h3-fontWeight);line-height:var(--lsd-h3-lineHeight)}.mdx-cta-section--full-width .mdx-cta-section__link{grid-area:2/2/3/3}.mdx-cta-section--simple .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h5-fontSize);font-weight:var(--lsd-h5-fontWeight);line-height:var(--lsd-h5-lineHeight)}.mdx-cta-section--simple .mdx-cta-section__description{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h2-fontSize);font-weight:var(--lsd-h2-fontWeight);line-height:var(--lsd-h2-lineHeight);margin-top:2rem}.mdx-cta-section--list{display:grid;gap:0 1rem;grid-template-columns:1fr 1fr}.mdx-cta-section--list .mdx-cta-section__container{grid-column:1/2}.mdx-cta-section--list .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h5-fontSize);font-weight:var(--lsd-h5-fontWeight);line-height:var(--lsd-h5-lineHeight)}.mdx-cta-section--list .mdx-cta-section__description{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h1-fontSize);font-weight:var(--lsd-h1-fontWeight);line-height:var(--lsd-h1-lineHeight);margin-top:2rem}.mdx-cta-section--list .mdx-cta-section__list{display:flex;flex-direction:column;gap:1.5rem 0;grid-column:2/3}.docItemContainer_hrrU .tocMobile_imaF:not(:first-of-type),.mdx-erc--with-preview .mdx-erc__icon,.mdx-ns__inner .mdx-ns__toast .lsd-toast__close-button,.mdx-roadmap__timeline .mdx-grid-item:last-child .mdx-timeline-item__border,.mdx-timeline-item__item:last-child .mdx-timeline-item__border{display:none}.mdx-cta-section--list .mdx-cta-section__list>div>div{background-color:rgb(var(--lsd-surface-secondary));border-radius:1rem;color:rgb(var(--lsd-text-secondary));display:inline-block;margin-top:1.5rem;padding:3px 11px}.mdx-cta-section--list .mdx-cta-section__list>div>p{margin-top:1rem}.mdx-doc-metadata{display:flex;flex-direction:row;line-height:0;margin-bottom:2.5rem}.mdx-doc-metadata>span:not(:last-child):after{content:"•";display:inline-block;margin-inline:.75rem}.markdown h1+.mdx-doc-metadata,.markdown header+.mdx-doc-metadata{margin-top:calc(var(--ifm-h1-vertical-rhythm-bottom)*-1*var(--ifm-leading) + 1rem)}.mdx-erc{align-items:flex-start;border:1px solid rgb(var(--lsd-border-primary));display:flex;flex-direction:row}.mdx-erc__icon{padding:16px 16px 16px 8px}.mdx-erc__inner{flex-grow:1;padding:16px}.mdx-erc:hover .mdx-erc__title{text-decoration:underline!important}.mdx-erc__logo,.mdx-erc__logo svg{height:32px!important;width:32px!important}.mdx-erc__title{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;margin-top:32px;max-height:calc(var(--lsd-body1-lineHeight)*2);overflow:hidden}.mdx-erc__description{margin-top:8px}.mdx-erc--with-preview{display:flex;flex-direction:column}.mdx-erc--with-preview .mdx-erc__preview-image{height:100%;object-fit:cover;width:100%}.mdx-erc--with-preview .mdx-erc__inner{display:grid;gap:0 16px;grid-template-columns:32px auto;grid-template-rows:auto auto;padding:16px}.mdx-erc--with-preview .mdx-erc__logo{align-self:center;grid-column:1;grid-row:1/span 2}.mdx-erc--with-preview .mdx-erc__title{-webkit-line-clamp:1;-webkit-box-orient:vertical;margin-top:0;max-height:calc(var(--lsd-body1-lineHeight)*1)}.mdx-erc--with-preview .mdx-erc__description{margin-top:4px}.mdx-feature-list{border-top:1px solid rgb(var(--lsd-surface-secondary));position:relative}.mdx-feature-list__feature-index{align-items:center;background:rgb(var(--lsd-surface-secondary));border-radius:100%;color:rgb(var(--lsd-text-secondary))!important;display:inline-flex;height:1.875rem;justify-content:center;padding:.5rem;width:1.875rem}.mdx-feature-list__list{display:grid;grid-template-columns:repeat(2,1fr)}.mdx-feature-list__feature-inner{display:flex;flex-direction:column}.mdx-feature-list__feature-title{margin-top:1rem}.mdx-feature-list__header{align-items:center;display:flex;flex-direction:row;gap:2rem}.mdx-feature-list--cta-bottom .mdx-feature-list__extra{margin-top:3.5rem;padding:0 1rem}.mdx-ghc__container{border-top:1px solid rgb(var(--lsd-border-primary));padding-top:24px}.mdx-ghc__issue-title-link,.mdx-hero-action button:hover,.mdx-hero-action button:hover *,.mdx-hero-action:hover,.mdx-hero-action:hover *{text-decoration:none!important}.mdx-ghc__issue-title-link:hover,.mdx-ghc__view-on-github-link:hover{text-decoration:underline!important}.mdx-ghc__header,.mdx-jpd__header{margin-bottom:40px;margin-top:16px}.mdx-ghc__challenge-label{border:1px solid rgb(var(--lsd-border-primary));border-radius:20px;margin-right:8px;padding:4px 8px}.mdx-ghc__view-on-github-link{display:block;margin-bottom:56px;margin-top:40px;text-decoration:none!important;width:-moz-fit-content;width:fit-content}.mdx-ghc__participant-photo{border:1px solid rgb(var(--lsd-border-secondary));border-radius:100%;margin-left:-4px;width:24px}.mdx-ghc__participant-photo-container{align-items:center;direction:rtl;display:flex;justify-content:flex-end;padding-left:4px}.mdx-ghc__comment-count{margin-left:4px}.mdx-ghc__issue-content-grid{align-items:center;display:grid;grid-template-columns:82px 135px 82px 1fr;margin-top:24px;row-gap:12px}.mdx-scroll-buttons>div{display:flex;flex-direction:row;justify-content:flex-end;width:100%}.mdx-hero{--hero-max-height:1080px;display:flex;flex-direction:column;height:calc(100vh - var(--ifm-navbar-height) - 8px);max-height:var(--hero-max-height);position:relative}.sidebar_re4s,.tableOfContents_bqdL{max-height:calc(100vh - var(--ifm-navbar-height) - 2rem);overflow-y:auto}.mdx-hero-action--large button{padding:10px 64px}.mdx-hero-actions{align-items:center;display:flex;flex-direction:row;gap:0 1rem;margin-top:2rem;position:relative;z-index:98}.mdx-hero-description{margin-top:18px;max-width:1260px;position:relative;text-shadow:1px 1px 6px rgba(var(--lsd-surface-primary),1);z-index:-2}.mdx-hero-info{border-bottom:1px solid rgb(var(--lsd-surface-secondary));display:flex;flex-direction:column;justify-content:end;padding:32px 0;transition:margin-bottom .05s ease-in-out}.mdx-profile-card,.mdx-profile-card__avatar{border:1px solid rgb(var(--lsd-border-primary))}.mdx-timeline-item,.mdx-timeline-item__border{border-bottom:1px solid rgb(var(--lsd-border-primary))}.mdx-hero-model{--mdx-hero-model-wrapper-scale:1;position:absolute;transform:translateX(calc(-50vw - var(--container-max-width)*-1/2 + var(--content-padding)*4*-1/2));width:100vw;z-index:0}.mdx-hero-model .mdx-hero-model--inner{left:0;position:relative;top:0;width:100%;z-index:0}.mdx-hero-model .mdx-hero-model--inner canvas{pointer-events:unset!important}.mdx-hero-model--top{top:300px}.mdx-hero-model--bottom{top:calc(var(--logos-hero-info-height) - 10px)}.mdx-hero-model--bottom .mdx-hero-model--shade{bottom:10vh;height:40vh}.mdx-hero-model--ascii{filter:grayscale(100%);opacity:.65}.mdx-hero-model--ascii table,.mdx-hero-model--ascii table *{border:none!important;color:rgb(var(--lsd-text-primary));font-family:monospace}.mdx-hero-model--shade{background:linear-gradient(0deg,rgb(var(--lsd-surface-primary)) 37%,rgb(var(--lsd-surface-primary)) 0,rgba(var(--lsd-surface-primary),0) 100%);bottom:0;height:30vh;left:0;position:absolute;width:100%}.mdx-hero-title{font-size:70px!important;line-height:100%!important}.mdx-hero-video{--hero-video-height:120%;--hero-video-min-height:100vh;--hero-video-offset-y:-150px;--hero-video-scale:1.70951586;--hero-video-height-mobile:120%;--hero-video-min-height-mobile:100vh;--hero-video-offset-y-mobile:-50px;--hero-video-scale-mobile:1.70951586;display:grid;flex-grow:1;grid-template-columns:1fr;grid-template-rows:1fr;pointer-events:none;position:relative}.mdx-hero-video>*{align-items:center;display:flex;flex-direction:column;grid-column:1/1;grid-row:1/1;height:var(--hero-video-height);justify-content:center;left:0;min-height:var(--hero-video-min-height);position:absolute;top:0;transform:translateY(var(--hero-video-offset-y));width:100%}.mdx-hero-video img{object-fit:contain}.mdx-hero-video img,.mdx-hero-video video{height:100%;max-width:calc(var(--container-max-width)*var(--hero-video-scale));object-position:top;transform-origin:top center;width:auto}.copyButtonCopied_YovB .copyButtonIcon_ZL7v,.mdx-hero-video--loading .mdx-hero-video__video{opacity:0}.mdx-hero-video--loading .mdx-hero-video__placeholder{opacity:1;visibility:visible}.mdx-jpd__single-job-department-container{border-top:1px solid rgb(var(--lsd-border-primary));padding-top:8px}.mdx-jpd__department-title{font-size:12px!important;line-height:16px!important}.mdx-jpd__job-title-container{align-items:center;display:flex;padding-bottom:8px}.mdx-jpd__job-list{list-style-type:none;margin:0;padding:8px 0 24px}.mdx-jpd__job-list-item{padding:14px 0}.mdx-jpd__job-link,.mdx-jpd__job-link:hover{text-decoration:none!important}.mdx-jpd__job-link:hover .mdx-jpd__job-title,.mdx-social-card:hover{text-decoration:underline!important}.mdx-logo-carousel .mdx-section-header__title{flex-basis:100%}.mdx-logo-carousel .mdx-section-header__extra{margin-left:auto}.mdx-logo-carousel .mdx-logo-carousel__inner{margin-top:100px}.mdx-logo-carousel .mdx-logo-carousel__item{flex-shrink:0}.mdx-logo-carousel .mdx-logo-carousel__logo{height:56px;width:auto}.mdx-ns__inner form{margin-top:40px;width:100%}.mdx-ns__inner .mdx-ns__inputs{align-items:center;display:flex;flex-direction:row;gap:16px;width:100%}.mdx-ns__inner .mdx-ns__inputs>*{flex-grow:1}.mdx-ns__inner .mdx-ns__submit-button{margin-top:40px}.mdx-ns__inner .mdx-ns__toast{margin-bottom:24px;margin-top:-16px;width:100%}.mdx-profile-card{align-items:flex-start;display:flex;flex-direction:column;height:var(--card-height);justify-content:space-between;padding:1rem}.mdx-profile-card__profile{display:flex;flex-direction:column;gap:20px}.mdx-profile-card__avatar{border-radius:50%}.mdx-profile-card__avatar,.mdx-profile-card__avatar svg,.mdx-social-card__logo,.mdx-social-card__logo svg{height:40px!important;width:40px!important}.mdx-profile-card__avatar svg rect{fill:unset!important}.mdx-profile-card__buttons{display:flex;flex-direction:row;gap:8px;width:100%}.mdx-profile-card__link{height:-moz-fit-content;height:fit-content;max-width:calc(50% - 4px);position:relative;text-decoration:none}.mdx-timeline-item{display:flex;flex:0 0 auto;flex-direction:column;gap:1rem;justify-content:space-between;min-height:306px;padding-bottom:23px;scroll-snap-align:start!important;width:236px}.mdx-timeline-item__header{align-items:flex-start;display:flex;flex-direction:column;gap:1rem}.mdx-timeline-item__period-container{position:relative;width:100%}.mdx-timeline-item__border{height:50%;left:0;position:absolute;top:0;width:calc(100% + 1rem);z-index:-1}.mdx-timeline-item--border-dashed .mdx-timeline-item__border{border-bottom-style:dashed}.mdx-timeline-item__period{border-radius:10rem;display:inline-block;padding:3px 12px}.mdx-timeline-item__period--filled{background-color:rgb(var(--lsd-surface-secondary));border:1px solid rgb(var(--lsd-border-secondary));color:rgb(var(--lsd-text-secondary))!important}.mdx-timeline-item__period--transparent{background-color:rgb(var(--lsd-surface-primary));border:1px solid rgb(var(--lsd-border-primary));color:rgb(var(--lsd-text-primary))!important}.mdx-roadmap__timeline{margin-top:6.25rem}.mdx-roadmap__timeline-item{height:100%}.scrollToBottom_NE5w{--offset-y:-2rem;background:rgb(var(--lsd-surface-primary))!important;opacity:1;position:absolute;top:min(100*var(--vh) - var(--ifm-navbar-height),var(--maxTop) - var(--ifm-navbar-height));transform:translateY(-2rem);transform:translateY(calc(-100% + var(--offset-y)));transition:.2s ease-in-out;z-index:100}.scrollToBottom_NE5w.hide_q_kn{opacity:0;transform:translateY(calc(-100% + var(--offset-y) - .625rem))}.mdx-section-header{border-top:1px solid rgb(var(--lsd-border-primary));display:flex;flex-direction:row;gap:1rem;padding-top:24px;width:100%}.mdx-section-header>*{flex-basis:50%}.mdx-section-header__title{display:flex;flex-direction:row;gap:2rem}.mdx-showcase-card__inner{display:flex;flex-direction:column;gap:1.5rem;padding-top:1.5rem}.mdx-showcase-card__logo{height:auto;width:58px}.mdx-showcase-card__index{align-items:center;background-color:rgb(var(--lsd-surface-secondary));border-radius:50%;color:rgb(var(--lsd-text-secondary))!important;display:flex;flex-direction:row;height:30px;justify-content:center;width:30px}.mdx-showcase-card__description{margin-top:-.125rem;padding-top:.5rem}.mdx-showcase-card--large .mdx-showcase-card__logo{width:58px}.mdx-showcase-card--small .mdx-showcase-card__logo{width:34px}.mdx-showcase-card--border-solid .mdx-showcase-card__description{border-top:1px solid rgb(var(--lsd-border-primary));padding-top:1rem}.mdx-social-card{align-items:flex-start;border:1px solid rgb(var(--lsd-border-primary));display:flex;flex-direction:column;justify-content:space-between;min-height:144px;padding:1rem;text-decoration:none!important}.mdx-social-card__row{display:flex;justify-content:space-between;width:100%}.sidebar_re4s{position:sticky;top:calc(var(--ifm-navbar-height) + 2rem)}.sidebarItemTitle_pO2u{font-size:var(--ifm-h3-font-size);font-weight:var(--ifm-font-weight-bold)}.sidebarItemList_Yudw{font-size:.9rem}.sidebarItem__DBe{margin-top:.7rem}.sidebarItemLink_mo7H{color:var(--ifm-font-color-base);display:block}.sidebarItemLinkActive_I1ZP{color:var(--ifm-color-primary)!important}.blogHeader_QORd{border-bottom:1px solid rgb(var(--lsd-border-primary));display:flex;flex-direction:column;gap:8px!important;left:0;margin-bottom:40px;margin-inline:var(--content-padding);padding-bottom:24px;padding-top:16px}.blogDescription_dds8,.blogTitle_KnCm{grid-column:span 24}.authorCol_y4tx{flex-grow:1!important;max-width:inherit!important}.imageOnlyAuthorRow_n8pT{display:flex;flex-flow:row wrap}.imageOnlyAuthorCol_OswQ{margin-left:.3rem;margin-right:.3rem}.container_PkUo{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-body2-fontSize)!important;font-weight:var(--lsd-body2-fontWeight)!important;line-height:var(--lsd-body2-lineHeight)!important;margin-bottom:0!important;margin-top:32px!important}.blogContainer_wIfk{margin-bottom:16px!important;margin-top:8px!important}.container_PkUo>:not(:last-child):after{content:"•";display:inline-block;font-size:10px;margin-inline:.75rem}.authors_dZ4g>div{align-items:flex-end;display:inline-flex;flex-direction:row;max-width:unset;padding:0;width:auto}.authors_dZ4g>div .avatar{margin:0!important}.authors_dZ4g>div:not(:last-child):after{content:",";display:inline-block;margin-right:4px}.authors_dZ4g .avatar__name{line-height:0}.breadcrumbsContainer_RLvU{--ifm-breadcrumb-size-multiplier:0.8;margin-bottom:24px}.blogPostSubtitle_ysb0{margin-top:12px}.codeBlockContainer_Ckt0{background:var(--prism-background-color);border-radius:var(--ifm-code-border-radius);box-shadow:var(--ifm-global-shadow-lw)}.codeBlockContainer_Ckt0,.codeBlockContainer_EB2s{color:var(--prism-color);margin-bottom:var(--ifm-leading)}.codeBlockContainer_EB2s{background:#0000;border-radius:0}.codeBlockContent_biex{border-radius:inherit;direction:ltr;position:relative}.codeBlockTitle_Ktv7{border-bottom:1px solid var(--ifm-color-emphasis-300);border-top-left-radius:inherit;border-top-right-radius:inherit;font-size:var(--ifm-code-font-size);font-weight:500;padding:.75rem var(--ifm-pre-padding)}.codeBlock_TWhw,.codeBlock_bY9V{--ifm-pre-background:var(--prism-background-color);margin:0;padding:0}.codeBlockTitle_Ktv7+.codeBlockContent_biex .codeBlock_bY9V,.codeBlockTitle_sjMo+.codeBlockContent_ugSV .codeBlock_TWhw{border-top-left-radius:0;border-top-right-radius:0}.codeBlockLines_LDrR,.codeBlockLines_e6Vv{float:left;font:inherit;min-width:100%;padding:var(--ifm-pre-padding)}.buttonGroup_Qu4e button,.buttonGroup__atx button{color:var(--prism-color);line-height:0;transition:opacity var(--ifm-transition-fast) ease-in-out}.codeBlockLinesWithNumbering_bsRF,.codeBlockLinesWithNumbering_o6Pm{display:table;padding:var(--ifm-pre-padding) 0}.buttonGroup_Qu4e,.buttonGroup__atx{column-gap:.2rem;display:flex;position:absolute;right:calc(var(--ifm-pre-padding)/2);top:calc(var(--ifm-pre-padding)/2)}.buttonGroup__atx button{align-items:center;background:var(--prism-background-color);border:1px solid var(--ifm-color-emphasis-300);border-radius:var(--ifm-global-radius);display:flex;opacity:0;padding:.4rem}.buttonGroup__atx button:focus-visible,.buttonGroup__atx button:hover{opacity:1!important}.theme-code-block:hover .buttonGroup__atx button{opacity:.4}:where(:root){--docusaurus-highlighted-code-line-bg:#484d5b}:where([data-theme=dark]){--docusaurus-highlighted-code-line-bg:#646464}.theme-code-block-highlighted-line{background-color:var(--docusaurus-highlighted-code-line-bg);display:block;margin:0 calc(var(--ifm-pre-padding)*-1);padding:0 var(--ifm-pre-padding)}.codeLine_lJS_{counter-increment:a;display:table-row}.codeLineNumber_Tfdd{background:var(--ifm-pre-background);display:table-cell;left:0;overflow-wrap:normal;padding:0 var(--ifm-pre-padding);position:sticky;text-align:right;width:1%}.codeLineNumber_Tfdd:before{content:counter(a);opacity:.4}.codeLineContent_feaV{padding-right:var(--ifm-pre-padding)}.theme-code-block:hover .copyButtonCopied_YovB{opacity:1!important}.copyButtonIcons_an20{align-items:center;display:flex;justify-content:center;position:relative}.copyButtonIcon_ZL7v,.copyButtonSuccessIcon_P2h8{fill:currentColor;height:14px;opacity:inherit;transition:all var(--ifm-transition-fast) ease;width:14px}.copyButtonSuccessIcon_P2h8{color:#00d600;opacity:0}.copyButtonCopied_YovB .copyButtonSuccessIcon_P2h8{opacity:1;transition-delay:75ms}.wordWrapButtonIcon_Bwma{height:1.2rem;width:1.2rem}.codeBlockTitle_sjMo{background:var(--prism-background-color);border-bottom:1px solid var(--ifm-color-primary);border-top-left-radius:inherit;border-top-right-radius:inherit;font-size:var(--ifm-code-font-size);font-weight:500;padding:.75rem var(--ifm-pre-padding)}.buttonGroup_Qu4e button{align-items:center;background:var(--prism-background-color);border:1px solid rgb(var(--lsd-border-primary));display:flex;height:28px;justify-content:center;opacity:0;width:28px}.buttonGroup_Qu4e button:focus-visible,.buttonGroup_Qu4e button:hover{opacity:1!important}.anchorWithStickyNavbar_LWe7{scroll-margin-top:calc(var(--ifm-navbar-height) + .5rem)}.anchorWithHideOnScrollNavbar_WYt5{scroll-margin-top:.5rem}.hash-link{opacity:0;padding-left:.5rem;transition:opacity var(--ifm-transition-fast);-webkit-user-select:none;user-select:none}.hash-link:before{content:"#"}.img_ev3q{height:auto}.admonition_ntHH{border:1px solid var(--ifm-alert-border-color);display:flex;gap:18px;margin-bottom:24px;padding:18px}.admonitionHeading_JPfy{font-size:1.25rem!important;font-weight:400;line-height:1.5rem!important;margin-bottom:.3rem;text-transform:capitalize}.admonitionHeading_JPfy code{text-transform:none}.admonitionIcon_BGV6{display:inline-block}.admonitionIcon_BGV6 svg{display:inline-block;height:16px;width:16px}.admonitionIcon_BGV6 svg *{stroke:none!important;fill:var(--ifm-alert-border-color)}.admonitionContent_iozl{display:block;margin-top:12px}.tag_dtsN{border:1px solid var(--docusaurus-tag-list-border);transition:border var(--ifm-transition-fast)}.tag_dtsN:hover{--docusaurus-tag-list-border:var(--ifm-link-color);text-decoration:none}.tagRegular_SGIC{border-radius:var(--ifm-global-radius);font-size:90%;padding:.2rem .5rem .3rem}.tagWithCount_gIvh{align-items:center;border-left:0;display:flex;padding:0 .5rem 0 1rem;position:relative}.tagWithCount_gIvh:after,.tagWithCount_gIvh:before{border:1px solid var(--docusaurus-tag-list-border);content:"";position:absolute;top:50%;transition:inherit}.tagWithCount_gIvh:before{border-bottom:0;border-right:0;height:1.18rem;right:100%;transform:translate(50%,-50%) rotate(-45deg);width:1.18rem}.tagWithCount_gIvh:after{border-radius:50%;height:.5rem;left:0;transform:translateY(-50%);width:.5rem}.tagWithCount_gIvh span{background:var(--ifm-color-secondary);border-radius:var(--ifm-global-radius);color:var(--ifm-color-black);font-size:.7rem;line-height:1.2;margin-left:.3rem;padding:.1rem .4rem}.root_UH70{align-items:center;display:flex;flex-direction:row}.tags_LT8x{display:inline;margin:0 0 0 1rem!important;padding:0!important}.tag_NdBD:not(:first-child){margin-left:.5rem}.tag_NdBD a{margin:0!important;padding:0!important}.lastUpdated_vwxv{font-size:smaller;font-style:italic;margin-top:.2rem}.blogPostFooterDetailsFull_FF8l{flex-direction:column}.readMoreLink_cNfS span{color:rgb(var(--lsd-text-secondary))!important}.tableOfContents_bqdL{position:sticky;top:calc(var(--ifm-navbar-height) + 1rem)}.tocCollapsibleButton_dxRj{align-items:center;border:1px solid rgb(var(--lsd-border-primary));display:flex;font-size:inherit;justify-content:space-between;padding:.4rem .8rem;width:100%}.tocCollapsibleButtonExpanded_TSyC:after,.tocCollapsibleExpanded_zTjk{transform:none}.tocCollapsible_ROek{background-color:var(--ifm-menu-color-background-active);border-radius:var(--ifm-global-radius);margin:1rem 0}.tocCollapsibleContent_Qsjj>ul{border-left:none;border:1px solid rgb(var(--lsd-border-primary))!important;border-top:none!important;font-size:15px;padding:.2rem 0}.tocCollapsibleContent_Qsjj ul li{margin:.4rem .8rem}.tocCollapsibleContent_Qsjj a{display:block;-webkit-text-decoration:var(--ifm-link-text-decoration);text-decoration:var(--ifm-link-text-decoration)}.badge_AsjZ{background:#0000}.docItemGrid_SzoZ{display:grid;gap:16px;grid-template-columns:repeat(14,1fr)}.gap1_XuuQ{grid-column:span 1}.toc_pP_5{grid-column:span 4}.tocMobile_imaF{margin-bottom:2rem;margin-top:-.5rem}.tocMobile_imaF>div{display:block!important;margin:0}.docItemContainer_hrrU h1{margin-bottom:40px!important}.docItemContainer_hrrU h2,.docItemContainer_hrrU h3{margin-top:32px!important}@media (min-width:0px){.mdx-box{--mdx-box-top:var(--mdx-box-top-xs);--mdx-box-bottom:var(--mdx-box-bottom-xs)}}@media (min-width:576px){.mdx-box{--mdx-box-top:var(--mdx-box-top-sm);--mdx-box-bottom:var(--mdx-box-bottom-sm)}}@media (min-width:768px){.blog-archive-page .main-wrapper main>section .container>.row{display:grid;gap:64px 24px;grid-template-columns:repeat(24,1fr)}.blog-archive-page .main-wrapper main>section .container>.row>.col{grid-column:span 12;margin:0!important;max-width:unset;padding:0 8.3333333333% 0 0}.mdx-box{--mdx-box-top:var(--mdx-box-top-md);--mdx-box-bottom:var(--mdx-box-bottom-md)}.mdx-scroll-buttons__button--with-label{gap:12px;min-width:83px;padding:5px 11px 5px 9px!important;width:auto!important}.mdx-scroll-buttons__button--with-label:first-of-type,.mdx-scroll-buttons__button--with-label:last-of-type{justify-content:flex-start}.mdx-scroll-buttons--spaced>div{gap:0 1rem;justify-content:space-between}.mdx-scroll-buttons--spaced>div>button:not(:last-child){border-right:1px solid rgb(var(--lsd-border-primary))!important}}@media (min-width:997px){.main-wrapper .container{padding-bottom:calc(var(--desktop-footer-gap) - var(--ifm-footer-padding-vertical))!important}.main-wrapper>div{display:grid;gap:1rem;grid-template-columns:repeat(16,42px)}.main-wrapper>div aside{border:none;grid-column:1/5;padding-left:var(--content-padding);width:auto}.main-wrapper>div main{grid-column:6/17;max-width:none}.navbar__inner{display:flex;flex-direction:row;justify-content:flex-start}.navbar__left{flex-basis:8.3333333333%;flex-grow:0}.navbar__left-items{flex:1 0;margin:auto}nav.menu{padding-top:16px}.navbar .lsd-icon-button--medium{height:28px!important;width:28px!important}.blog-wrapper>.container>.row{display:grid;gap:16px;grid-template-columns:repeat(24,1fr)}.blog-wrapper>.container>.row>aside{grid-column:1/5}.blog-wrapper>.container>.row>main{grid-column:6/20}.blog-wrapper:not(.blog-wrapper--index)>.container>.row>div:last-child{grid-column:21/25;max-width:unset}.wrapper_SWrM:not(.active_qZD5) .fullscreenButton_Bocn{background:rgb(var(--lsd-surface-primary));bottom:8px;display:flex;opacity:0;position:absolute;right:8px;transition:.3s;visibility:hidden}.wrapper_SWrM:not(.active_qZD5):hover .fullscreenButton_Bocn{opacity:1;visibility:visible}:root{--docusaurus-announcement-bar-height:30px}.announcementBarClose_gvF7,.announcementBarPlaceholder_vyr4{flex-basis:50px}.searchBox_ZlJk{padding:var(--ifm-navbar-item-padding-vertical) var(--ifm-navbar-item-padding-horizontal)}.menuHtmlItem_M9Kj{padding:var(--ifm-menu-link-padding-vertical) var(--ifm-menu-link-padding-horizontal)}.menu_NjKY{flex-grow:1;padding-top:16px}@supports (scrollbar-gutter:stable){.menu_NjKY{scrollbar-gutter:stable}}.menuWithAnnouncementBar_U5pk{margin-bottom:var(--docusaurus-announcement-bar-height)}.sidebar_UEyd{display:flex;flex-direction:column;height:100%;padding-top:var(--ifm-navbar-height)}.sidebarWithHideableNavbar__00c{padding-top:0!important}.sidebarHidden_F1ZE{opacity:0;visibility:hidden}.sidebarLogo_Ydl9{align-items:center;color:inherit!important;display:flex!important;margin:0 var(--ifm-navbar-padding-horizontal);max-height:var(--ifm-navbar-height);min-height:var(--ifm-navbar-height);text-decoration:none!important}.sidebarLogo_Ydl9 img{height:2rem;margin-right:.5rem}.expandButton_m80_{align-items:center;background-color:var(--docusaurus-collapse-button-bg);display:flex;height:100%;justify-content:center;position:absolute;right:0;top:0;transition:background-color var(--ifm-transition-fast) ease;width:100%}.expandButton_m80_:focus,.expandButton_m80_:hover{background-color:var(--docusaurus-collapse-button-bg-hover)}.expandButtonIcon_BlDH{transform:rotate(0)}[dir=rtl] .expandButtonIcon_BlDH{transform:rotate(180deg)}.docSidebarContainer_b6E3{border-right:1px solid var(--ifm-toc-border-color);-webkit-clip-path:inset(0);clip-path:inset(0);display:block;margin-top:calc(var(--ifm-navbar-height)*-1);transition:width var(--ifm-transition-fast) ease;width:var(--doc-sidebar-width);will-change:width}.docSidebarContainerHidden_b3ry{cursor:pointer;width:var(--doc-sidebar-hidden-width)}.sidebarViewport_Xe31{height:100%;max-height:100vh;position:sticky;top:0}.docMainContainer_gTbr{flex-grow:1;max-width:calc(100% - var(--doc-sidebar-width))}.docMainContainerEnhanced_Uz_u{max-width:calc(100% - var(--doc-sidebar-hidden-width))}.docItemWrapperEnhanced_czyv{max-width:calc(var(--ifm-container-width) + var(--doc-sidebar-width))!important}.mdx-box{--mdx-box-top:var(--mdx-box-top-lg);--mdx-box-bottom:var(--mdx-box-bottom-lg)}.mdx-feature-list--bottom-aligned .mdx-feature-list__feature{min-height:332px}.mdx-feature-list--bottom-aligned .mdx-feature-list__feature-description{display:flex;flex:1 0;flex-direction:column;justify-content:flex-end}.mdx-feature-list--top-aligned .mdx-feature-list__feature-description{margin-top:136px}.mdx-feature-list__feature{padding:1rem;position:relative}.mdx-feature-list__feature-inner{height:100%;padding:.5rem}.mdx-feature-list__feature-border{background:rgb(var(--lsd-border-primary));height:calc(100% - 2rem);position:absolute;right:0;top:1rem;width:1px}.mdx-feature-list__feature:nth-child(odd){padding-left:0}.mdx-feature-list__feature:nth-child(2n){padding-right:0}.mdx-feature-list__feature:nth-child(2n) .mdx-feature-list__feature-border,.tocMobile_ITEo{display:none}.lastUpdated_vwxv{text-align:right}.docItemGrid_SzoZ{display:grid;gap:var(--doc-grid-gap);grid-template-columns:repeat(11,1fr)}.docItemCol_F52z{grid-column:span 11}}@media (min-width:1200px){.main-wrapper>div,.navbar__inner{display:grid;grid-template-columns:repeat(24,1fr);grid-template-columns:repeat(24,1fr)}.main-wrapper>div{gap:1rem}.main-wrapper>div aside{border:none;grid-column:1/5;width:auto}.main-wrapper>div main{grid-column:6/25;max-width:none}.theme-doc-toc-desktop{top:calc(var(--ifm-navbar-height))!important}.navbar__inner{gap:16px;padding:0}.navbar__left{align-items:center;display:flex;flex-direction:row;grid-column:1/6}.navbar__left-items{grid-column:6/20}.navbar__right-items{flex-direction:row;gap:16px;grid-column:20/25;justify-content:flex-end}.blog-archive-page .main-wrapper main>section .container>.row{display:grid;gap:64px 24px;grid-template-columns:repeat(24,1fr)}.blog-archive-page .main-wrapper main>section .container>.row>.col{grid-column:span 8;margin:0!important;max-width:unset;padding:0 12.5% 0 0}.mdx-box{--mdx-box-top:var(--mdx-box-top-xl);--mdx-box-bottom:var(--mdx-box-bottom-xl)}.docItemGrid_SzoZ{display:grid;gap:var(--doc-grid-gap);grid-template-columns:repeat(19,1fr)}.docItemCol_F52z{grid-column:span 14}.tocMobile_imaF{display:none}}@media (min-width:1440px){.container{max-width:var(--ifm-container-width-xl)}}@media (max-width:1440px)and (min-width:992px){.mdx-hero-model{transform:translateX(calc((var(--content-padding) + 0)*-3));width:calc(100vw + var(--content-padding))}}@media (max-width:1199px){:root{--container-max-width:912px}.main-wrapper main>.container{padding-bottom:calc(var(--desktop-footer-gap) - var(--ifm-footer-padding-vertical) - var(--doc-grid-gap))!important}.header-github-link:before{margin-right:16px}.toc_pP_5{display:none}}@media (max-width:997px);@media (max-width:996px){.navbar-sidebar__close,.navbar__toggle{border:1px solid rgb(var(--lsd-border-primary))}.mdx-cta-section__title,.mdx-section-header__title,.navbar-sidebar:not(.navbar-sidebar--show-secondary) .menu__link>div{font-size:var(--lsd-subtitle2-fontSize)!important;font-weight:var(--lsd-subtitle2-fontWeight)!important;line-height:var(--lsd-subtitle2-lineHeight)!important}.blog-wrapper h1[itemprop=headline],.blogPostSubtitle_ysb0,.mdx-cta-button .lsd-button--large,.mdx-cta-section--list .mdx-cta-section__list>div>p,.mdx-cta-section__description,.mdx-cta-section__title,.mdx-hero-description,.mdx-section-header__description,.mdx-section-header__title{font-family:var(--lsd-typography-generic-font-family)!important}.col{--ifm-col-width:100%;flex-basis:var(--ifm-col-width);margin-left:0}.footer{--ifm-footer-padding-horizontal:0;padding-bottom:0!important;--ifm-footer-padding-horizontal:var(--content-padding)}.desktop,.footer__link-separator,.navbar__item,.navbar__left-items{display:none}.footer__col{margin-bottom:calc(var(--ifm-spacing-vertical)*3)}.footer__link-item,.main-wrapper>div,.mdx-cta-section--full-width .mdx-cta-section__container,.mdx-cta-section--list,.tocMobile_imaF{display:block}.hero{padding-left:0;padding-right:0}.navbar>.container,.navbar>.container-fluid,.root_xZfc{padding:0}.navbar__toggle{display:inherit}.navbar__search-input{width:9rem}.mdx-section-header,.pills--block,.tabs--block{flex-direction:column}:root{--ifm-background-color:rgb(var(--lsd-surface-primary))}.navbar__color-mode-toggle{display:none!important}.main-wrapper main>.container{padding-bottom:calc(var(--mobile-footer-gap) - var(--ifm-footer-padding-vertical) - var(--doc-grid-gap))!important}.grid{grid-template-columns:repeat(2,1fr)}.footer-grid,.l-modal__container,.mdx-feature-list__list,.mdx-profile-card{grid-template-columns:1fr}.navbar{display:flex;height:56px}.navbar__toggle{align-items:center;box-sizing:border-box;display:flex;height:32px;justify-content:center;margin:0;width:32px}.navbar__toggle svg{height:14px;width:14px}.theme-admonition{display:block!important}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .menu{padding-inline:0}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .menu__list{display:flex;flex-direction:column;gap:8px;padding:var(--content-padding)}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .menu__link{border-left:none;padding-left:0}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .menu__link>div{font-family:var(--lsd-typography-generic-font-family)!important;text-decoration:none}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer{margin-top:3rem;padding:0!important}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer>div{display:flex!important;flex-direction:column!important;gap:32px!important}.colorModeToggle_GSaI,.dropdownNavbarItem_o23I,.l-modal__backdrop,.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer>button,.sidebar_re4s,.tableOfContents_bqdL{display:none}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer .container.container-fluid{margin-bottom:16px!important;padding-inline:var(--content-padding)}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer .container.container-fluid>.footer__links{gap:0!important}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer>div[class*=secondRow]{margin:var(--content-padding)}.navbar-sidebar:not(.navbar-sidebar--show-secondary) .footer .footer__links{gap:16px!important}.navbar-sidebar__brand{height:60px;padding-block:0}.navbar-sidebar__brand>button{height:32px;width:32px}.navbar-sidebar__brand>button:first-of-type{margin-left:auto;margin-right:0!important}.navbar-sidebar__brand>button:not(:last-of-type){border-right:none}.navbar-sidebar__close{align-items:center;display:flex;justify-content:center;margin-left:unset}.blog-divider{margin-block:32px!important}.blog-wrapper main,.blogHeader_QORd{padding-inline:var(--content-padding)}.blog-wrapper article footer{align-items:flex-start;flex-direction:column}.blog-wrapper article footer .read-more-link{margin-top:24px}.blog-wrapper article footer .read-more-link,.blog-wrapper article footer .read-more-link button{flex-basis:100%;width:100%}.blog-wrapper h1[itemprop=headline]{font-size:var(--lsd-h2-fontSize)!important;font-weight:var(--lsd-h2-fontWeight)!important;line-height:var(--lsd-h2-lineHeight)!important}.wrapper_SWrM>div{overflow:visible!important}.nav_cMpg{height:56px}.backToTop_wDfN{bottom:16px;left:16px;margin-top:76px!important;position:relative}.l-modal{overflow:hidden;width:100vw}.l-modal__container{display:grid;height:100vh;max-width:unset;min-height:-webkit-fill-available;width:100%}.docItemGrid_SzoZ,.mdx-showcase{grid-template-columns:repeat(2,1fr)}.root_ONDG,.root_ltHz{height:100%;overflow:auto}.l-modal__content{border:none;grid-column:1/2;grid-row:1/2;height:100%;margin:0;max-height:100vh;overflow:hidden}.modal_kLVz .header_QwCa{padding:12px 16px}.modal_kLVz .closeButton_Rr0e{display:block;height:2rem;width:2rem}.root_EFVO{padding:40px 24px}.root_ONDG{max-height:100%;padding:24px 24px 64px}.groups_p1lF{max-height:unset;overflow:auto}.root_ltHz{padding:24px 0 64px}.searchBox_ZlJk{position:absolute;right:var(--ifm-navbar-padding-horizontal)}.mdx-cta-button .lsd-button--large{font-size:var(--lsd-label2-fontSize)!important;font-weight:var(--lsd-label2-fontWeight)!important;line-height:var(--lsd-label2-lineHeight)!important;padding:5px 11px}.mdx-cta-section{padding:16px 0 0}.mdx-cta-section--title-button .mdx-cta-section__title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h4-fontSize);font-weight:var(--lsd-h4-fontWeight);line-height:var(--lsd-h4-lineHeight)}.mdx-feature-list .mdx-feature-list__title,.mdx-feature-list__feature .mdx-feature-list__feature-index{font-size:var(--lsd-subtitle2-fontSize);font-weight:var(--lsd-subtitle2-fontWeight);line-height:var(--lsd-subtitle2-lineHeight);font-family:var(--lsd-typography-generic-font-family)}.mdx-cta-section--full-width .mdx-cta-section__description,.mdx-cta-section--simple .mdx-cta-section__description{margin-top:1.5rem}.mdx-cta-section--list .mdx-cta-section__list,.mdx-roadmap__timeline{margin-top:4rem}.mdx-cta-section--list .mdx-cta-section__list>div{border-top:1px solid rgb(var(--lsd-border-primary))}.mdx-cta-section--list .mdx-cta-section__list>div>div{font-size:.875rem!important;line-height:1.25rem!important}.mdx-cta-section--list .mdx-cta-section__list>div>p{font-size:var(--lsd-h4-fontSize)!important;font-weight:var(--lsd-h4-fontWeight)!important;line-height:var(--lsd-h4-lineHeight)!important}.mdx-cta-section__description{font-size:var(--lsd-h4-fontSize)!important;font-weight:var(--lsd-h4-fontWeight)!important;line-height:var(--lsd-h4-lineHeight)!important}.mdx-feature-list .mdx-feature-list__title{padding:1rem 0}.mdx-feature-list--cta-bottom .mdx-feature-list__extra{margin-top:2rem;padding:0}.mdx-feature-list__feature{border:none;border-top:1px solid rgb(var(--lsd-border-primary));padding:1.5rem 0}.mdx-feature-list__feature .mdx-feature-list__feature-index{height:1.625rem;width:1.625rem}.mdx-feature-list__feature .mdx-feature-list__feature-title{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h4-fontSize);font-weight:var(--lsd-h4-fontWeight);line-height:var(--lsd-h4-lineHeight);margin-top:1rem}.mdx-feature-list__feature .mdx-feature-list__feature-description{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-body1-fontSize);font-weight:var(--lsd-body1-fontWeight);line-height:var(--lsd-body1-lineHeight);margin-top:3rem}.mdx-feature-list__feature:nth-child(odd) .mdx-feature-list__feature-inner{border:none}.mdx-hero-action{text-decoration:none!important}.mdx-hero-action button{padding:6px 12px!important}.mdx-hero-actions{align-items:center;display:flex;flex-direction:row;justify-content:stretch;margin-top:1.5rem;width:100%;z-index:0}.mdx-hero-actions>*{flex-basis:50%}.mdx-hero-actions>a,.mdx-hero-actions>a>button{display:block;width:100%}.mdx-hero-description{font-size:var(--lsd-subtitle1-fontSize)!important;font-weight:var(--lsd-subtitle1-fontWeight)!important;line-height:var(--lsd-subtitle1-lineHeight)!important;margin-top:12px}.mdx-hero-info{padding:24px 0}.mdx-hero-model--top{top:calc(var(--logos-hero-info-height)/ 1 - 10px)}.mdx-hero-model{transform:translate(-12px);width:calc(100vw - 20px)}.mdx-hero-model--shade{bottom:0;height:90vh!important}.mdx-hero-title{font-size:28px!important}.mdx-hero-video>*{height:var(--hero-video-height-mobile);min-height:var(--hero-video-min-height-mobile);transform:translateY(var(--hero-video-offset-y-mobile))}.mdx-hero-video img,.mdx-hero-video video{max-width:calc((100vw - 32px)*var(--hero-video-scale-mobile))}.mdx-logo-carousel .mdx-logo-carousel__inner{margin-top:64px}.mdx-timeline-item{width:204px}.scrollToBottom_NE5w{--offset-y:-1rem}.mdx-section-header__description{font-size:var(--lsd-h4-fontSize)!important;font-weight:var(--lsd-h4-fontWeight)!important;line-height:var(--lsd-h4-lineHeight)!important}.mdx-showcase{gap:1.5rem 1rem}.mdx-showcase-card__name{font-size:1.375rem!important;line-height:1.75rem!important}.mdx-showcase-card__description{margin-top:1rem}.mdx-showcase-card__logo{height:auto;width:34px}.container_PkUo{margin-top:24px!important}.blogContainer_wIfk{margin-top:8px!important}.blogPostSubtitle_ysb0{font-size:var(--lsd-subtitle2-fontSize)!important;font-weight:var(--lsd-subtitle2-fontWeight)!important;line-height:var(--lsd-subtitle2-lineHeight)!important;margin-top:4px}.docItemContainer_F8PC{padding:0 .3rem}.docItemGrid_SzoZ{display:grid;padding-left:0}.docItemCol_F52z{grid-column:span 2}}@media (max-width:767px){.blog-archive-page .main-wrapper main>section .container>.row{margin-top:0}.blog-archive-page .main-wrapper main>section .container>.row .col{margin-top:40px!important}.mdx-scroll-buttons>div{justify-content:flex-end}.mdx-scroll-buttons>div>button:not(:last-child){border-right:none!important}.mdx-scroll-buttons .mdx-scroll-buttons__label{display:none}.mdx-ns__description{font-family:var(--lsd-typography-generic-font-family);font-size:var(--lsd-h4-fontSize);font-weight:var(--lsd-h4-fontWeight);line-height:var(--lsd-h4-lineHeight)}.mdx-ns__inner form{margin-top:40px}.mdx-ns__inner .mdx-ns__inputs{flex-direction:column;gap:24px}.mdx-ns__inner .mdx-ns__inputs>*{width:100%}.mdx-ns__inner .mdx-ns__submit-button{margin-top:24px}.mdx-profile-card{flex:0 0 var(--mobile-width);height:var(--mobile-height);scroll-snap-align:start!important;width:var(--mobile-width)}.mdx-profile-card__profile{gap:16px}.mdx-profile-card__name{font-family:var(--lsd-typography-generic-font-family)!important;font-size:var(--lsd-h5-fontSize)!important;font-weight:var(--lsd-h5-fontWeight)!important;line-height:var(--lsd-h5-lineHeight)!important}}@media (max-width:576px){.markdown h1:first-child{--ifm-h1-font-size:2rem}.markdown>h2{--ifm-h2-font-size:1.5rem}.markdown>h3{--ifm-h3-font-size:1.25rem}.title_f1Hy{font-size:2rem}}@media (max-width:575px){:root{--ifm-spacing-vertical:var(--ifm-global-spacing);--ifm-spacing-horizontal:var(--ifm-global-spacing);--ifm-button-size-multiplier:1.25;--ifm-button-padding-horizontal:calc(var(--ifm-button-size-multiplier)*var(--ifm-spacing-horizontal));--ifm-button-padding-vertical:calc(var(--ifm-button-size-multiplier)*var(--ifm-spacing-vertical));--ifm-alert-padding-horizontal:calc(var(--ifm-spacing-horizontal)*2);--ifm-alert-padding-vertical:calc(var(--ifm-spacing-vertical)*2)}.markdown>h2{--ifm-h2-font-size:1.75rem}.markdown h4{--ifm-h4-font-size:1.25rem!important}.footer>div{display:flex!important;flex-direction:column;gap:72px!important}.footer__links{display:flex!important;flex-direction:column!important;gap:24px!important}.col.footer__col{margin-bottom:0!important}.navbar__left{display:flex}.footer{--ifm-footer-padding-horizontal:0}.firstRow_ar1q{padding-inline:var(--content-padding)!important}.secondRow__ww3{margin-inline:var(--content-padding)!important}}@media (max-width:574px){.mdx-ghc__issue-content-grid{grid-template-columns:82px 1fr}}@media (hover:hover){.backToTopButton_sjWU:hover{background-color:var(--ifm-color-emphasis-300)}}@media (pointer:fine){.thin-scrollbar{scrollbar-width:thin}.thin-scrollbar::-webkit-scrollbar{height:var(--ifm-scrollbar-size);width:var(--ifm-scrollbar-size)}.thin-scrollbar::-webkit-scrollbar-track{background:var(--ifm-scrollbar-track-background-color);border-radius:10px}.thin-scrollbar::-webkit-scrollbar-thumb{background:var(--ifm-scrollbar-thumb-background-color);border-radius:10px}.thin-scrollbar::-webkit-scrollbar-thumb:hover{background:var(--ifm-scrollbar-thumb-hover-background-color)}}@media (prefers-reduced-motion:reduce){:root{--ifm-transition-fast:0ms;--ifm-transition-slow:0ms}}@media print{.announcementBar_mb4j,.footer,.menu,.navbar,.pagination-nav,.table-of-contents,.tocMobile_ITEo{display:none}.tabs{page-break-inside:avoid}.codeBlockLines_LDrR,.codeBlockLines_e6Vv{white-space:pre-wrap}} \ No newline at end of file diff --git a/assets/images/N11M-87240745a0c2c63e625cab407b6f0439.png b/assets/images/N11M-87240745a0c2c63e625cab407b6f0439.png new file mode 100644 index 00000000..bc28d58e Binary files /dev/null and b/assets/images/N11M-87240745a0c2c63e625cab407b6f0439.png differ diff --git a/assets/images/NM-81c4d4cfe2ecb8a75dbdbfa450ebafa2.png b/assets/images/NM-81c4d4cfe2ecb8a75dbdbfa450ebafa2.png new file mode 100644 index 00000000..b445d3d7 Binary files /dev/null and b/assets/images/NM-81c4d4cfe2ecb8a75dbdbfa450ebafa2.png differ diff --git a/assets/images/building_private_infra_adaptive-69974a7e087e209572e1c2faf162e5d5.png b/assets/images/building_private_infra_adaptive-69974a7e087e209572e1c2faf162e5d5.png new file mode 100644 index 00000000..257363f5 Binary files /dev/null and b/assets/images/building_private_infra_adaptive-69974a7e087e209572e1c2faf162e5d5.png differ diff --git a/assets/images/building_private_infra_interactions-8e9eee38a9a67973c36d11d111bdc384.png b/assets/images/building_private_infra_interactions-8e9eee38a9a67973c36d11d111bdc384.png new file mode 100644 index 00000000..a26b159d Binary files /dev/null and b/assets/images/building_private_infra_interactions-8e9eee38a9a67973c36d11d111bdc384.png differ diff --git a/assets/images/building_private_infra_misc-16721ea7c68873dbb0276ae7fe665ae5.png b/assets/images/building_private_infra_misc-16721ea7c68873dbb0276ae7fe665ae5.png new file mode 100644 index 00000000..a4af2cf3 Binary files /dev/null and b/assets/images/building_private_infra_misc-16721ea7c68873dbb0276ae7fe665ae5.png differ diff --git a/assets/images/building_private_infra_network-43aa536967aee45b44a1e2a6673b6941.png b/assets/images/building_private_infra_network-43aa536967aee45b44a1e2a6673b6941.png new file mode 100644 index 00000000..2683252c Binary files /dev/null and b/assets/images/building_private_infra_network-43aa536967aee45b44a1e2a6673b6941.png differ diff --git a/assets/images/building_private_infra_principles-699c52e62e0e4de0843ddb23ffbed365.png b/assets/images/building_private_infra_principles-699c52e62e0e4de0843ddb23ffbed365.png new file mode 100644 index 00000000..6e9ba745 Binary files /dev/null and b/assets/images/building_private_infra_principles-699c52e62e0e4de0843ddb23ffbed365.png differ diff --git a/assets/images/building_private_infra_rlnrelay-4823f37fce52d9d44d72ca73028fa9b8.png b/assets/images/building_private_infra_rlnrelay-4823f37fce52d9d44d72ca73028fa9b8.png new file mode 100644 index 00000000..0a3d7415 Binary files /dev/null and b/assets/images/building_private_infra_rlnrelay-4823f37fce52d9d44d72ca73028fa9b8.png differ diff --git a/assets/images/building_private_infra_servicecred-b022d763d66e89fb610d8d4552355e3c.png b/assets/images/building_private_infra_servicecred-b022d763d66e89fb610d8d4552355e3c.png new file mode 100644 index 00000000..9c79d145 Binary files /dev/null and b/assets/images/building_private_infra_servicecred-b022d763d66e89fb610d8d4552355e3c.png differ diff --git a/assets/images/building_private_infra_shamir-8f4c8e31d2eaa86b62392514a411b999.png b/assets/images/building_private_infra_shamir-8f4c8e31d2eaa86b62392514a411b999.png new file mode 100644 index 00000000..a56ec0df Binary files /dev/null and b/assets/images/building_private_infra_shamir-8f4c8e31d2eaa86b62392514a411b999.png differ diff --git a/assets/images/building_private_infra_vote-a5992b54f4076642acc8e20ac716c750.png b/assets/images/building_private_infra_vote-a5992b54f4076642acc8e20ac716c750.png new file mode 100644 index 00000000..30859ed8 Binary files /dev/null and b/assets/images/building_private_infra_vote-a5992b54f4076642acc8e20ac716c750.png differ diff --git a/assets/images/building_private_infra_zk-61dc3331f70705c672242b894bc35ab8.png b/assets/images/building_private_infra_zk-61dc3331f70705c672242b894bc35ab8.png new file mode 100644 index 00000000..f5944ed6 Binary files /dev/null and b/assets/images/building_private_infra_zk-61dc3331f70705c672242b894bc35ab8.png differ diff --git a/assets/images/huilong-8b4f76f3e4117a3d34d3e90c0baef066.jpg b/assets/images/huilong-8b4f76f3e4117a3d34d3e90c0baef066.jpg new file mode 100644 index 00000000..4d93bd6e Binary files /dev/null and b/assets/images/huilong-8b4f76f3e4117a3d34d3e90c0baef066.jpg differ diff --git a/assets/images/libp2p_gossipsub_types_of_peering-d0772153a5d11dea7b24c0bdc307a93d.png b/assets/images/libp2p_gossipsub_types_of_peering-d0772153a5d11dea7b24c0bdc307a93d.png new file mode 100644 index 00000000..f977d04f Binary files /dev/null and b/assets/images/libp2p_gossipsub_types_of_peering-d0772153a5d11dea7b24c0bdc307a93d.png differ diff --git a/assets/images/light-rln-verifiers-f801999160884be6a1223ee7d76cebcf.png b/assets/images/light-rln-verifiers-f801999160884be6a1223ee7d76cebcf.png new file mode 100644 index 00000000..51959a1f Binary files /dev/null and b/assets/images/light-rln-verifiers-f801999160884be6a1223ee7d76cebcf.png differ diff --git a/assets/images/mvds_batch-8bb753ee771b1f96610ba432fa7fcec3.png b/assets/images/mvds_batch-8bb753ee771b1f96610ba432fa7fcec3.png new file mode 100644 index 00000000..aa3449be Binary files /dev/null and b/assets/images/mvds_batch-8bb753ee771b1f96610ba432fa7fcec3.png differ diff --git a/assets/images/mvds_interactive-b04b5377d67c337013e72abbbd40ec69.png b/assets/images/mvds_interactive-b04b5377d67c337013e72abbbd40ec69.png new file mode 100644 index 00000000..84e24501 Binary files /dev/null and b/assets/images/mvds_interactive-b04b5377d67c337013e72abbbd40ec69.png differ diff --git a/assets/images/proof_generation_time-195632e4864fa4c5f883895f2ea9e9e3.png b/assets/images/proof_generation_time-195632e4864fa4c5f883895f2ea9e9e3.png new file mode 100644 index 00000000..d74d5f60 Binary files /dev/null and b/assets/images/proof_generation_time-195632e4864fa4c5f883895f2ea9e9e3.png differ diff --git a/assets/images/proof_verification_time-c95708ef2a4fc0470114fbceebc6bc30.png b/assets/images/proof_verification_time-c95708ef2a4fc0470114fbceebc6bc30.png new file mode 100644 index 00000000..0863024c Binary files /dev/null and b/assets/images/proof_verification_time-c95708ef2a4fc0470114fbceebc6bc30.png differ diff --git a/assets/images/remote-log-5781aa7290ab5e4b5dbf652f5792ef2e.png b/assets/images/remote-log-5781aa7290ab5e4b5dbf652f5792ef2e.png new file mode 100644 index 00000000..89266024 Binary files /dev/null and b/assets/images/remote-log-5781aa7290ab5e4b5dbf652f5792ef2e.png differ diff --git a/assets/images/rln-message-verification-9fece24cdde2c518766ede67364c958f.png b/assets/images/rln-message-verification-9fece24cdde2c518766ede67364c958f.png new file mode 100644 index 00000000..5a56274b Binary files /dev/null and b/assets/images/rln-message-verification-9fece24cdde2c518766ede67364c958f.png differ diff --git a/assets/images/rln-relay-704966b8b4e9245d45ff49dee43f1c05.png b/assets/images/rln-relay-704966b8b4e9245d45ff49dee43f1c05.png new file mode 100644 index 00000000..2ed43f01 Binary files /dev/null and b/assets/images/rln-relay-704966b8b4e9245d45ff49dee43f1c05.png differ diff --git a/assets/images/rln-relay-overview-7a73a646ea1b6b9bdfc62d96d7de296e.png b/assets/images/rln-relay-overview-7a73a646ea1b6b9bdfc62d96d7de296e.png new file mode 100644 index 00000000..d8edfd76 Binary files /dev/null and b/assets/images/rln-relay-overview-7a73a646ea1b6b9bdfc62d96d7de296e.png differ diff --git a/assets/images/rln_dep_tree-0bf1837513daecde1a3de4deb9a8855f.jpg b/assets/images/rln_dep_tree-0bf1837513daecde1a3de4deb9a8855f.jpg new file mode 100644 index 00000000..bdd15bc4 Binary files /dev/null and b/assets/images/rln_dep_tree-0bf1837513daecde1a3de4deb9a8855f.jpg differ diff --git a/assets/images/spam_prevention_in_action-50221f227e3d94be5aeae45193cc04ea.png b/assets/images/spam_prevention_in_action-50221f227e3d94be5aeae45193cc04ea.png new file mode 100644 index 00000000..70977f16 Binary files /dev/null and b/assets/images/spam_prevention_in_action-50221f227e3d94be5aeae45193cc04ea.png differ diff --git a/assets/images/status_scaling_model_fig1-64444f5b246c9fd1014093b193fc3453.png b/assets/images/status_scaling_model_fig1-64444f5b246c9fd1014093b193fc3453.png new file mode 100644 index 00000000..75eff106 Binary files /dev/null and b/assets/images/status_scaling_model_fig1-64444f5b246c9fd1014093b193fc3453.png differ diff --git a/assets/images/status_scaling_model_fig10-d1535b8a2d4061f75fe94d85002c9d31.png b/assets/images/status_scaling_model_fig10-d1535b8a2d4061f75fe94d85002c9d31.png new file mode 100644 index 00000000..12801973 Binary files /dev/null and b/assets/images/status_scaling_model_fig10-d1535b8a2d4061f75fe94d85002c9d31.png differ diff --git a/assets/images/status_scaling_model_fig11-1b7a924152d9305d69fe266f18812a9f.png b/assets/images/status_scaling_model_fig11-1b7a924152d9305d69fe266f18812a9f.png new file mode 100644 index 00000000..8a98c8ba Binary files /dev/null and b/assets/images/status_scaling_model_fig11-1b7a924152d9305d69fe266f18812a9f.png differ diff --git a/assets/images/status_scaling_model_fig12-e590a87c5895c90a527a9d21ac1e7fcf.png b/assets/images/status_scaling_model_fig12-e590a87c5895c90a527a9d21ac1e7fcf.png new file mode 100644 index 00000000..30c4b53d Binary files /dev/null and b/assets/images/status_scaling_model_fig12-e590a87c5895c90a527a9d21ac1e7fcf.png differ diff --git a/assets/images/status_scaling_model_fig13-3eacca1ad71d153bfa9f55c665946e60.png b/assets/images/status_scaling_model_fig13-3eacca1ad71d153bfa9f55c665946e60.png new file mode 100644 index 00000000..ddd6b966 Binary files /dev/null and b/assets/images/status_scaling_model_fig13-3eacca1ad71d153bfa9f55c665946e60.png differ diff --git a/assets/images/status_scaling_model_fig2-774c26bf57c92c1d1a30cc6eddc95180.png b/assets/images/status_scaling_model_fig2-774c26bf57c92c1d1a30cc6eddc95180.png new file mode 100644 index 00000000..8b9b900e Binary files /dev/null and b/assets/images/status_scaling_model_fig2-774c26bf57c92c1d1a30cc6eddc95180.png differ diff --git a/assets/images/status_scaling_model_fig3-47788396aa8aa03dcb9fda2dd4ac306f.png b/assets/images/status_scaling_model_fig3-47788396aa8aa03dcb9fda2dd4ac306f.png new file mode 100644 index 00000000..288e3dd0 Binary files /dev/null and b/assets/images/status_scaling_model_fig3-47788396aa8aa03dcb9fda2dd4ac306f.png differ diff --git a/assets/images/status_scaling_model_fig4-f8aaace8b1a824bee26cc451699faa6b.png b/assets/images/status_scaling_model_fig4-f8aaace8b1a824bee26cc451699faa6b.png new file mode 100644 index 00000000..9912d0ae Binary files /dev/null and b/assets/images/status_scaling_model_fig4-f8aaace8b1a824bee26cc451699faa6b.png differ diff --git a/assets/images/status_scaling_model_fig5-ffdec86c00f77ab94306c7ae27e9c8c7.png b/assets/images/status_scaling_model_fig5-ffdec86c00f77ab94306c7ae27e9c8c7.png new file mode 100644 index 00000000..6a6cf411 Binary files /dev/null and b/assets/images/status_scaling_model_fig5-ffdec86c00f77ab94306c7ae27e9c8c7.png differ diff --git a/assets/images/status_scaling_model_fig8-0ebc4f49d1fcf101e3bb712af814a2b0.png b/assets/images/status_scaling_model_fig8-0ebc4f49d1fcf101e3bb712af814a2b0.png new file mode 100644 index 00000000..5e659776 Binary files /dev/null and b/assets/images/status_scaling_model_fig8-0ebc4f49d1fcf101e3bb712af814a2b0.png differ diff --git a/assets/images/status_scaling_model_fig9-7b81a7e5843abd908d28b9e5915b6deb.png b/assets/images/status_scaling_model_fig9-7b81a7e5843abd908d28b9e5915b6deb.png new file mode 100644 index 00000000..a1ce2d91 Binary files /dev/null and b/assets/images/status_scaling_model_fig9-7b81a7e5843abd908d28b9e5915b6deb.png differ diff --git a/assets/images/waku1-vs-waku2-10-nodes-fcc807080c17463099e65069a7580532.png b/assets/images/waku1-vs-waku2-10-nodes-fcc807080c17463099e65069a7580532.png new file mode 100644 index 00000000..d37302f9 Binary files /dev/null and b/assets/images/waku1-vs-waku2-10-nodes-fcc807080c17463099e65069a7580532.png differ diff --git a/assets/images/waku1-vs-waku2-150-nodes-aee6ff3d7b339b78fd56cc52eb86268e.png b/assets/images/waku1-vs-waku2-150-nodes-aee6ff3d7b339b78fd56cc52eb86268e.png new file mode 100644 index 00000000..b3d134db Binary files /dev/null and b/assets/images/waku1-vs-waku2-150-nodes-aee6ff3d7b339b78fd56cc52eb86268e.png differ diff --git a/assets/images/waku1-vs-waku2-30-nodes-dc614f343b395e41c450f67e7e753881.png b/assets/images/waku1-vs-waku2-30-nodes-dc614f343b395e41c450f67e7e753881.png new file mode 100644 index 00000000..3b6c7c9e Binary files /dev/null and b/assets/images/waku1-vs-waku2-30-nodes-dc614f343b395e41c450f67e7e753881.png differ diff --git a/assets/images/waku1-vs-waku2-50-nodes-7facf8c03b87e2ebb2dd3967fac6e6a0.png b/assets/images/waku1-vs-waku2-50-nodes-7facf8c03b87e2ebb2dd3967fac6e6a0.png new file mode 100644 index 00000000..f00d9b3a Binary files /dev/null and b/assets/images/waku1-vs-waku2-50-nodes-7facf8c03b87e2ebb2dd3967fac6e6a0.png differ diff --git a/assets/images/waku1-vs-waku2-85-nodes-a4c56b478f03d471ae25bc36d6087bbf.png b/assets/images/waku1-vs-waku2-85-nodes-a4c56b478f03d471ae25bc36d6087bbf.png new file mode 100644 index 00000000..d05390a5 Binary files /dev/null and b/assets/images/waku1-vs-waku2-85-nodes-a4c56b478f03d471ae25bc36d6087bbf.png differ diff --git a/assets/images/waku1-vs-waku2-overall-message-rate-aa6f7884f804b62ba14a6b4e369eaee3.png b/assets/images/waku1-vs-waku2-overall-message-rate-aa6f7884f804b62ba14a6b4e369eaee3.png new file mode 100644 index 00000000..2e2f150c Binary files /dev/null and b/assets/images/waku1-vs-waku2-overall-message-rate-aa6f7884f804b62ba14a6b4e369eaee3.png differ diff --git a/assets/images/waku1-vs-waku2-overall-network-size-42e3912f3895367f3458c92e3e46ea47.png b/assets/images/waku1-vs-waku2-overall-network-size-42e3912f3895367f3458c92e3e46ea47.png new file mode 100644 index 00000000..94fe0cc5 Binary files /dev/null and b/assets/images/waku1-vs-waku2-overall-network-size-42e3912f3895367f3458c92e3e46ea47.png differ diff --git a/assets/images/waku_simulation-3923bb91799b72a73608687149068b40.jpeg b/assets/images/waku_simulation-3923bb91799b72a73608687149068b40.jpeg new file mode 100644 index 00000000..0300bafd Binary files /dev/null and b/assets/images/waku_simulation-3923bb91799b72a73608687149068b40.jpeg differ diff --git a/assets/images/waku_v1_routing_small-65bf881ec98bcded566accbbc4f2262d.png b/assets/images/waku_v1_routing_small-65bf881ec98bcded566accbbc4f2262d.png new file mode 100644 index 00000000..f1927bec Binary files /dev/null and b/assets/images/waku_v1_routing_small-65bf881ec98bcded566accbbc4f2262d.png differ diff --git a/assets/images/waku_v2_discv5_random_walk_estimation-671ba3d44404d97d719de8853a3cbbbb.svg b/assets/images/waku_v2_discv5_random_walk_estimation-671ba3d44404d97d719de8853a3cbbbb.svg new file mode 100644 index 00000000..8e69b10b --- /dev/null +++ b/assets/images/waku_v2_discv5_random_walk_estimation-671ba3d44404d97d719de8853a3cbbbb.svg @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/images/waku_v2_routing_flood_small-1b9dc4447d094efc9f29656c534ca3b9.png b/assets/images/waku_v2_routing_flood_small-1b9dc4447d094efc9f29656c534ca3b9.png new file mode 100644 index 00000000..e9bc6a9b Binary files /dev/null and b/assets/images/waku_v2_routing_flood_small-1b9dc4447d094efc9f29656c534ca3b9.png differ diff --git a/assets/images/waku_v2_routing_gossip_small-f8d8b32be158ac22c72dc2199b2b8a5a.png b/assets/images/waku_v2_routing_gossip_small-f8d8b32be158ac22c72dc2199b2b8a5a.png new file mode 100644 index 00000000..32fe3c1d Binary files /dev/null and b/assets/images/waku_v2_routing_gossip_small-f8d8b32be158ac22c72dc2199b2b8a5a.png differ diff --git a/assets/images/waku_v2_routing_sharding_small-b830b07d0e5c1ba984dda0e21481328b.png b/assets/images/waku_v2_routing_sharding_small-b830b07d0e5c1ba984dda0e21481328b.png new file mode 100644 index 00000000..4179b50f Binary files /dev/null and b/assets/images/waku_v2_routing_sharding_small-b830b07d0e5c1ba984dda0e21481328b.png differ diff --git a/assets/images/walletconnect-62fa376651fb831e64e9a247d495bf53.png b/assets/images/walletconnect-62fa376651fb831e64e9a247d495bf53.png new file mode 100644 index 00000000..54d84902 Binary files /dev/null and b/assets/images/walletconnect-62fa376651fb831e64e9a247d495bf53.png differ diff --git a/assets/images/web3_holy_trinity-fd2023ba2271927950dc70bb56f3c615.png b/assets/images/web3_holy_trinity-fd2023ba2271927950dc70bb56f3c615.png new file mode 100644 index 00000000..da958b4a Binary files /dev/null and b/assets/images/web3_holy_trinity-fd2023ba2271927950dc70bb56f3c615.png differ diff --git a/assets/images/whisper_scalability-4450d316341c4305ffc6f580a776fc2d.png b/assets/images/whisper_scalability-4450d316341c4305ffc6f580a776fc2d.png new file mode 100644 index 00000000..9db87bd8 Binary files /dev/null and b/assets/images/whisper_scalability-4450d316341c4305ffc6f580a776fc2d.png differ diff --git a/assets/js/0b46e8e7.64cdc00f.js b/assets/js/0b46e8e7.64cdc00f.js new file mode 100644 index 00000000..a09a1d0e --- /dev/null +++ b/assets/js/0b46e8e7.64cdc00f.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkvac_dev=self.webpackChunkvac_dev||[]).push([[5387],{94644:(e,t,r)=>{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>u,frontMatter:()=>o,metadata:()=>p,toc:()=>l});var n=r(87462),a=(r(67294),r(3905));const o={hide_table_of_contents:!0},i="Publications",p={unversionedId:"publications",id:"publications",title:"Publications",description:"Papers",source:"@site/docs/publications.md",sourceDirName:".",slug:"/publications",permalink:"/publications",draft:!1,tags:[],version:"current",frontMatter:{hide_table_of_contents:!0}},c={},l=[{value:"Papers",id:"papers",level:2},{value:"Write-ups",id:"write-ups",level:2}],s={toc:l};function u(e){let{components:t,...r}=e;return(0,a.kt)("wrapper",(0,n.Z)({},s,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"publications"},"Publications"),(0,a.kt)("h2",{id:"papers"},"Papers"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("a",{parentName:"p",href:"https://arxiv.org/abs/2207.00038"},"Waku: A Family of Modular P2P Protocols For Secure & Censorship-Resistant Communication")," (demo)")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("a",{parentName:"p",href:"https://arxiv.org/pdf/2207.00117.pdf"},"WAKU-RLN-RELAY: Privacy-Preserving Peer-to-Peer Economic Spam Protection")," (full)")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("a",{parentName:"p",href:"https://arxiv.org/abs/2207.00116"},"Privacy-Preserving Spam-Protected Gossip-Based Routing")," (poster)"))),(0,a.kt)("h2",{id:"write-ups"},"Write-ups"),(0,a.kt)("p",null,"See ",(0,a.kt)("a",{parentName:"p",href:"/rlog"},"write-ups"),"."))}u.isMDXComponent=!0},3905:(e,t,r)=>{r.d(t,{Zo:()=>s,kt:()=>m});var n=r(67294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var c=n.createContext({}),l=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},s=function(e){var t=l(e.components);return n.createElement(c.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},f=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,s=p(e,["components","mdxType","originalType","parentName"]),f=l(r),m=a,d=f["".concat(c,".").concat(m)]||f[m]||u[m]||o;return r?n.createElement(d,i(i({ref:t},s),{},{components:r})):n.createElement(d,i({ref:t},s))}));function m(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=f;var p={};for(var c in t)hasOwnProperty.call(t,c)&&(p[c]=t[c]);p.originalType=e,p.mdxType="string"==typeof e?e:a,i[1]=p;for(var l=2;l{n.r(t),n.d(t,{assets:()=>u,contentTitle:()=>i,default:()=>p,frontMatter:()=>o,metadata:()=>c,toc:()=>l});var r=n(87462),a=(n(67294),n(3905));const o={layout:"post",name:"Introducing nwaku",title:"Introducing nwaku",date:new Date("2022-04-12T10:00:00.000Z"),authors:"hanno",published:!0,slug:"introducing-nwaku",categories:"research",discuss:"https://forum.vac.dev/",toc_min_heading_level:2,toc_max_heading_level:5},i=void 0,c={permalink:"/rlog/introducing-nwaku",source:"@site/rlog/2022-04-12-introducing-nwaku.mdx",title:"Introducing nwaku",description:"Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas.",date:"2022-04-12T10:00:00.000Z",formattedDate:"April 12, 2022",tags:[],readingTime:10.765,hasTruncateMarker:!0,authors:[{name:"Hanno Cornelius",twitter:"4aelius",github:"jm-clius",key:"hanno"}],frontMatter:{layout:"post",name:"Introducing nwaku",title:"Introducing nwaku",date:"2022-04-12T10:00:00.000Z",authors:"hanno",published:!0,slug:"introducing-nwaku",categories:"research",discuss:"https://forum.vac.dev/",toc_min_heading_level:2,toc_max_heading_level:5},prevItem:{title:"Waku v2 Ambient Peer Discovery",permalink:"/rlog/wakuv2-apd"},nextItem:{title:"Opinion: Pseudo-ethics in the Surveillance Tech Industry",permalink:"/rlog/ethics-surveillance-tech"}},u={authorsImageUrls:[void 0]},l=[],s={toc:l};function p(e){let{components:t,...n}=e;return(0,a.kt)("wrapper",(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas."))}p.isMDXComponent=!0},3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>m});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var u=r.createContext({}),l=function(e){var t=r.useContext(u),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=l(e.components);return r.createElement(u.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,u=e.parentName,s=c(e,["components","mdxType","originalType","parentName"]),d=l(n),m=a,f=d["".concat(u,".").concat(m)]||d[m]||p[m]||o;return n?r.createElement(f,i(i({ref:t},s),{},{components:n})):r.createElement(f,i({ref:t},s))}));function m(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var c={};for(var u in t)hasOwnProperty.call(t,u)&&(c[u]=t[u]);c.originalType=e,c.mdxType="string"==typeof e?e:a,i[1]=c;for(var l=2;l{e.exports=JSON.parse('{"permalink":"/rlog","page":1,"postsPerPage":10,"totalPages":4,"totalCount":31,"nextPage":"/rlog/page/2","blogDescription":"Blog","blogTitle":"Research Blog"}')}}]); \ No newline at end of file diff --git a/assets/js/12da0170.40efcc2d.js b/assets/js/12da0170.40efcc2d.js new file mode 100644 index 00000000..d08d769d --- /dev/null +++ b/assets/js/12da0170.40efcc2d.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkvac_dev=self.webpackChunkvac_dev||[]).push([[8056],{16486:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>s,default:()=>c,frontMatter:()=>o,metadata:()=>i,toc:()=>u});var n=a(87462),r=(a(67294),a(3905));const o={layout:"post",name:"Presenting JS-Waku: Waku v2 in the Browser",title:"Presenting JS-Waku: Waku v2 in the Browser",date:new Date("2021-06-04T12:00:00.000Z"),authors:"franck",published:!0,slug:"presenting-js-waku",categories:"platform",image:"/img/js-waku-gist.png",discuss:"https://forum.vac.dev/t/discussion-presenting-js-waku-waku-v2-in-the-browser/81"},s=void 0,i={permalink:"/rlog/presenting-js-waku",source:"@site/rlog/2021-06-04-presenting-js-waku.mdx",title:"Presenting JS-Waku: Waku v2 in the Browser",description:"JS-Waku is bringing Waku v2 to the browser. Learn what we achieved so far and what is next in our pipeline!",date:"2021-06-04T12:00:00.000Z",formattedDate:"June 4, 2021",tags:[],readingTime:6.84,hasTruncateMarker:!0,authors:[{name:"Franck",twitter:"fryorcraken",github:"fryorcraken",key:"franck"}],frontMatter:{layout:"post",name:"Presenting JS-Waku: Waku v2 in the Browser",title:"Presenting JS-Waku: Waku v2 in the Browser",date:"2021-06-04T12:00:00.000Z",authors:"franck",published:!0,slug:"presenting-js-waku",categories:"platform",image:"/img/js-waku-gist.png",discuss:"https://forum.vac.dev/t/discussion-presenting-js-waku-waku-v2-in-the-browser/81"},prevItem:{title:"[Talk at COSCUP] Vac, Waku v2 and Ethereum Messaging",permalink:"/rlog/waku-v2-ethereum-coscup"},nextItem:{title:"Privacy-preserving p2p economic spam protection in Waku v2",permalink:"/rlog/rln-relay"}},p={authorsImageUrls:[void 0]},u=[{value:"Waku v2",id:"waku-v2",level:2},{value:"Waku v2 in the browser",id:"waku-v2-in-the-browser",level:2},{value:"Achievements so far",id:"achievements-so-far",level:2},{value:"What's next?",id:"whats-next",level:2}],l={toc:u};function c(e){let{components:t,...a}=e;return(0,r.kt)("wrapper",(0,n.Z)({},l,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"JS-Waku is bringing Waku v2 to the browser. Learn what we achieved so far and what is next in our pipeline!"),(0,r.kt)("p",null,"For the past 3 months, we have been working on bringing Waku v2 to the browser.\nOur aim is to empower dApps with Waku v2, and it led to the creation of a new library.\nWe believe now is good time to introduce it!"),(0,r.kt)("h2",{id:"waku-v2"},"Waku v2"),(0,r.kt)("p",null,"First, let's review what Waku v2 is and what problem it is trying to solve."),(0,r.kt)("p",null,"Waku v2 comes from a need to have a more scalable, better optimised solution for the Status app to achieve decentralised\ncommunications on resource restricted devices (i.e., mobile phones)."),(0,r.kt)("p",null,"The Status chat feature was initially built over Whisper.\nHowever, Whisper has a number of caveats which makes it inefficient for mobile phones.\nFor example, with Whisper, all devices are receiving all messages which is not ideal for limited data plans."),(0,r.kt)("p",null,"To remediate this, a Waku mode (then Waku v1), based on devp2p, was introduced.\nTo further enable web and restricted resource environments, Waku v2 was created based on libp2p.\nThe migration of the Status chat feature to Waku v2 is currently in progress."),(0,r.kt)("p",null,"We see the need of such solution in the broader Ethereum ecosystem, beyond Status.\nThis is why we are building Waku v2 as a decentralised communication platform for all to use and build on.\nIf you want to read more about Waku v2 and what it aims to achieve,\ncheckout ",(0,r.kt)("a",{parentName:"p",href:"/waku-v2-plan"},"What's the Plan for Waku v2?"),"."),(0,r.kt)("p",null,"Since last year, we have been busy defining and implementing Waku v2 protocols in ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku"},"nim-waku"),",\nfrom which you can build ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku#wakunode"},"wakunode2"),".\nWakunode2 is an adaptive and modular Waku v2 node,\nit allows users to run their own node and use the Waku v2 protocols they need.\nThe nim-waku project doubles as a library, that can be used to add Waku v2 support to native applications."),(0,r.kt)("h2",{id:"waku-v2-in-the-browser"},"Waku v2 in the browser"),(0,r.kt)("p",null,"We believe that dApps and wallets can benefit from the Waku network in several ways.\nFor some dApps, it makes sense to enable peer-to-peer communications.\nFor others, machine-to-machine communications would be a great asset.\nFor example, in the case of a DAO,\nWaku could be used for gas-less voting.\nEnabling the DAO to notify their users of a new vote,\nand users to vote without interacting with the blockchain and spending gas."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/murmur"},"Murmur")," was the first attempt to bring Whisper to the browser,\nacting as a bridge between devp2p and libp2p.\nOnce Waku v2 was started and there was a native implementation on top of libp2p,\na ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/vacp2p/waku-web-chat"},"chat POC")," was created to demonstrate the potential of Waku v2\nin web environment.\nIt showed how using js-libp2p with few modifications enabled access to the Waku v2 network.\nThere was still some unresolved challenges.\nFor example, nim-waku only support TCP connections which are not supported by browser applications.\nHence, to connect to other node, the POC was connecting to a NodeJS proxy application using websockets,\nwhich in turn could connect to wakunode2 via TCP."),(0,r.kt)("p",null,"However, to enable dApp and Wallet developers to easily integrate Waku in their product,\nwe need to give them a library that is easy to use and works out of the box:\nintroducing ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku"},"JS-Waku"),"."),(0,r.kt)("p",null,"JS-Waku is a JavaScript library that allows your dApp, wallet or other web app to interact with the Waku v2 network.\nIt is available right now on ",(0,r.kt)("a",{parentName:"p",href:"https://www.npmjs.com/package/js-waku"},"npm"),":"),(0,r.kt)("p",null,(0,r.kt)("inlineCode",{parentName:"p"},"npm install js-waku"),"."),(0,r.kt)("p",null,"As it is written in TypeScript, types are included in the npm package to allow easy integration with TypeScript, ClojureScript and other typed languages that compile to JavaScript."),(0,r.kt)("p",null,"Key Waku v2 protocols are already available:\n",(0,r.kt)("a",{parentName:"p",href:"https://rfc.vac.dev/spec/14/"},"message"),", ",(0,r.kt)("a",{parentName:"p",href:"https://rfc.vac.dev/spec/13/"},"store"),", ",(0,r.kt)("a",{parentName:"p",href:"https://rfc.vac.dev/spec/11/"},"relay")," and ",(0,r.kt)("a",{parentName:"p",href:"https://rfc.vac.dev/spec/19/"},"light push"),",\nenabling your dApp to:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Send and receive near-instant messages on the Waku network (relay),"),(0,r.kt)("li",{parentName:"ul"},"Query nodes for messages that may have been missed, e.g. due to poor cellular network (store),"),(0,r.kt)("li",{parentName:"ul"},"Send messages with confirmations (light push).")),(0,r.kt)("p",null,"JS-Waku needs to operate in the same context from which Waku v2 was born:\na restricted environment were connectivity or uptime are not guaranteed;\nJS-Waku brings Waku v2 to the browser."),(0,r.kt)("h2",{id:"achievements-so-far"},"Achievements so far"),(0,r.kt)("p",null,"We focused the past month on developing a ",(0,r.kt)("a",{parentName:"p",href:"https://status-im.github.io/js-waku/"},"ReactJS Chat App"),".\nThe aim was to create enough building blocks in JS-Waku to enable this showcase web app that\nwe now ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/issues/399"},"use for dogfooding")," purposes."),(0,r.kt)("p",null,"Most of the effort was on getting familiar with the ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/libp2p/js-libp2p"},"js-libp2p")," library\nthat we heavily rely on.\nJS-Waku is the second implementation of Waku v2 protocol,\nso a lot of effort on interoperability was needed.\nFor example, to ensure compatibility with the nim-waku reference implementation,\nwe run our ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku/blob/90c90dea11dfd1277f530cf5d683fb92992fe141/src/lib/waku_relay/index.spec.ts#L137"},"tests against wakunode2")," as part of the CI."),(0,r.kt)("p",null,"This interoperability effort helped solidify the current Waku v2 specifications:\nBy clarifying the usage of topics\n(",(0,r.kt)("a",{parentName:"p",href:"https://github.com/vacp2p/rfc/issues/327"},"#327"),", ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/vacp2p/rfc/pull/383"},"#383"),"),\nfix discrepancies between specs and nim-waku\n(",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/issues/418"},"#418"),", ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/issues/419"},"#419"),")\nand fix small nim-waku & nim-libp2p bugs\n(",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/issues/411"},"#411"),", ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/issues/439"},"#439"),")."),(0,r.kt)("p",null,"To fully access the waku network, JS-Waku needs to enable web apps to connect to nim-waku nodes.\nA standard way to do so is using secure websockets as it is not possible to connect directly to a TCP port from the browser.\nUnfortunately websocket support is not yet available in ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-libp2p/issues/407"},"nim-libp2p")," so\nwe ended up deploying ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/novnc/websockify"},"websockify")," alongside wakunode2 instances."),(0,r.kt)("p",null,"As we built the ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku/tree/main/examples/web-chat"},"web chat app"),",\nwe were able to fine tune the API to provide a simple and succinct interface.\nYou can start a node, connect to other nodes and send a message in less than ten lines of code:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-javascript"},"import { Waku } from 'js-waku'\n\nconst waku = await Waku.create({})\n\nconst nodes = await getStatusFleetNodes()\nawait Promise.all(nodes.map((addr) => waku.dial(addr)))\n\nconst msg = WakuMessage.fromUtf8String(\n 'Here is a message!',\n '/my-cool-app/1/my-use-case/proto',\n)\nawait waku.relay.send(msg)\n")),(0,r.kt)("p",null,"We have also put a bounty at ",(0,r.kt)("a",{parentName:"p",href:"https://0xhack.dev/"},"0xHack")," for using JS-Waku\nand running a ",(0,r.kt)("a",{parentName:"p",href:"https://www.youtube.com/watch?v=l77j0VX75QE"},"workshop"),".\nWe were thrilled to have a couple of hackers create new software using our libraries.\nOne of the projects aimed to create a decentralised, end-to-end encrypted messenger app,\nsimilar to what the ",(0,r.kt)("a",{parentName:"p",href:"https://rfc.vac.dev/spec/20/"},"ETH-DM")," protocol aims to achieve.\nAnother project was a decentralised Twitter platform.\nSuch projects allow us to prioritize the work on JS-Waku and understand how DevEx can be improved."),(0,r.kt)("p",null,"As more developers use JS-Waku, we will evolve the API to allow for more custom and fine-tune usage of the network\nwhile preserving this out of the box experience."),(0,r.kt)("h2",{id:"whats-next"},"What's next?"),(0,r.kt)("p",null,"Next, we are directing our attention towards ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku/issues/68"},"Developer Experience"),".\nWe already have ",(0,r.kt)("a",{parentName:"p",href:"https://www.npmjs.com/package/js-waku"},"documentation")," available but we want to provide more:\n",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku/issues/56"},"Tutorials"),", various examples\nand showing how ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/js-waku/issues/72"},"JS-Waku can be used with Web3"),"."),(0,r.kt)("p",null,"By prioritizing DevEx we aim to enable JS-Waku integration in dApps and wallets.\nWe think JS-Waku builds a strong case for machine-to-machine (M2M) communications.\nThe first use cases we are looking into are dApp notifications:\nEnabling dApp to notify their user directly in their wallets!\nLeveraging Waku as a decentralised infrastructure and standard so that users do not have to open their dApp to be notified\nof events such as DAO voting."),(0,r.kt)("p",null,"We already have some POC in the pipeline to enable voting and polling on the Waku network,\nallowing users to save gas by ",(0,r.kt)("strong",{parentName:"p"},"not")," broadcasting each individual vote on the blockchain."),(0,r.kt)("p",null,"To facilitate said applications, we are looking at improving integration with Web3 providers by providing examples\nof signing, validating, encrypting and decrypting messages using Web3.\nWaku is privacy conscious, so we will also provide signature and encryption examples decoupled from users' Ethereum identity."),(0,r.kt)("p",null,"As you can read, we have grand plans for JS-Waku and Waku v2.\nThere is a lot to do, and we would love some help so feel free to\ncheck out the new role in our team:\n",(0,r.kt)("a",{parentName:"p",href:"https://status.im/our_team/jobs.html?gh_jid=3157894"},"js-waku: Wallet & Dapp Integration Developer"),".\nWe also have a number of ",(0,r.kt)("a",{parentName:"p",href:"https://status.im/our_team/jobs.html"},"positions")," open to work on Waku protocol and nim-waku."),(0,r.kt)("p",null,"If you are as excited as us by JS-Waku, why not build a dApp with it?\nYou can find documentation on the ",(0,r.kt)("a",{parentName:"p",href:"https://www.npmjs.com/package/js-waku"},"npmjs page"),"."),(0,r.kt)("p",null,"Whether you are a developer, you can come chat with us using ",(0,r.kt)("a",{parentName:"p",href:"https://status-im.github.io/js-waku/"},"WakuJS Web Chat"),"\nor ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md"},"chat2"),".\nYou can get support in #dappconnect-support on ",(0,r.kt)("a",{parentName:"p",href:"https://discord.gg/j5pGbn7MHZ"},"Vac Discord")," or ",(0,r.kt)("a",{parentName:"p",href:"https://t.me/dappconnectsupport"},"Telegram"),".\nIf you have any ideas on how Waku could enable a specific dapp or use case, do share, we are always keen to hear it."))}c.isMDXComponent=!0},3905:(e,t,a)=>{a.d(t,{Zo:()=>l,kt:()=>m});var n=a(67294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function s(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),u=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):s(s({},t),e)),a},l=function(e){var t=u(e.components);return n.createElement(p.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,l=i(e,["components","mdxType","originalType","parentName"]),h=u(a),m=r,d=h["".concat(p,".").concat(m)]||h[m]||c[m]||o;return a?n.createElement(d,s(s({ref:t},l),{},{components:a})):n.createElement(d,s({ref:t},l))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,s=new Array(o);s[0]=h;var i={};for(var p in t)hasOwnProperty.call(t,p)&&(i[p]=t[p]);i.originalType=e,i.mdxType="string"==typeof e?e:r,s[1]=i;for(var u=2;u{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>m,frontMatter:()=>o,metadata:()=>i,toc:()=>u});var r=n(87462),a=(n(67294),n(3905));const o={layout:"post",name:"Noise handshakes as key-exchange mechanism for Waku",title:"Noise handshakes as key-exchange mechanism for Waku",date:new Date("2022-05-17T10:00:00.000Z"),authors:"s1fr0",published:!0,slug:"wakuv2-noise",categories:"research",summary:null,image:"/img/noise/NM.png",discuss:"https://forum.vac.dev/t/discussion-noise-handshakes-as-key-exchange-mechanism-for-waku/137",_includes:["math"]},s=void 0,i={permalink:"/rlog/wakuv2-noise",source:"@site/rlog/2022-05-17-noise.mdx",title:"Noise handshakes as key-exchange mechanism for Waku",description:"We provide an overview of the Noise Protocol Framework as a tool to design efficient and secure key-exchange mechanisms in Waku2.",date:"2022-05-17T10:00:00.000Z",formattedDate:"May 17, 2022",tags:[],readingTime:21.115,hasTruncateMarker:!0,authors:[{name:"s1fr0",github:"s1fr0",key:"s1fr0"}],frontMatter:{layout:"post",name:"Noise handshakes as key-exchange mechanism for Waku",title:"Noise handshakes as key-exchange mechanism for Waku",date:"2022-05-17T10:00:00.000Z",authors:"s1fr0",published:!0,slug:"wakuv2-noise",categories:"research",summary:null,image:"/img/noise/NM.png",discuss:"https://forum.vac.dev/t/discussion-noise-handshakes-as-key-exchange-mechanism-for-waku/137",_includes:["math"]},prevItem:{title:"Waku Privacy and Anonymity Analysis Part I: Definitions and Waku Relay",permalink:"/rlog/wakuv2-relay-anon"},nextItem:{title:"Waku v2 Ambient Peer Discovery",permalink:"/rlog/wakuv2-apd"}},c={authorsImageUrls:[void 0]},u=[],l={toc:u};function m(e){let{components:t,...n}=e;return(0,a.kt)("wrapper",(0,r.Z)({},l,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"We provide an overview of the Noise Protocol Framework as a tool to design efficient and secure key-exchange mechanisms in Waku2."))}m.isMDXComponent=!0},3905:(e,t,n)=>{n.d(t,{Zo:()=>l,kt:()=>f});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function s(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),u=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},l=function(e){var t=u(e.components);return r.createElement(c.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},p=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,l=i(e,["components","mdxType","originalType","parentName"]),p=u(n),f=a,h=p["".concat(c,".").concat(f)]||p[f]||m[f]||o;return n?r.createElement(h,s(s({ref:t},l),{},{components:n})):r.createElement(h,s({ref:t},l))}));function f(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,s=new Array(o);s[0]=p;var i={};for(var c in t)hasOwnProperty.call(t,c)&&(i[c]=t[c]);i.originalType=e,i.mdxType="string"==typeof e?e:a,s[1]=i;for(var u=2;u{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>u,frontMatter:()=>o,metadata:()=>s,toc:()=>l});var n=r(87462),a=(r(67294),r(3905));const o={layout:"post",name:"DNS Based Discovery",title:"DNS Based Discovery",date:new Date("2020-02-07T12:00:00.000Z"),authors:"dean",published:!0,slug:"dns-based-discovery",categories:"research"},i=void 0,s={permalink:"/rlog/dns-based-discovery",source:"@site/rlog/2020-02-7-dns-based-discovery.mdx",title:"DNS Based Discovery",description:"A look at EIP-1459 and the benefits of DNS based discovery.",date:"2020-02-07T12:00:00.000Z",formattedDate:"February 7, 2020",tags:[],readingTime:5.635,hasTruncateMarker:!0,authors:[{name:"Dean",twitter:"DeanEigenmann",github:"decanus",website:"https://dean.eigenmann.me",key:"dean"}],frontMatter:{layout:"post",name:"DNS Based Discovery",title:"DNS Based Discovery",date:"2020-02-07T12:00:00.000Z",authors:"dean",published:!0,slug:"dns-based-discovery",categories:"research"},prevItem:{title:"Waku Update",permalink:"/rlog/waku-update"},nextItem:{title:"Fixing Whisper with Waku",permalink:"/rlog/fixing-whisper-with-waku"}},c={authorsImageUrls:[void 0]},l=[],p={toc:l};function u(e){let{components:t,...r}=e;return(0,a.kt)("wrapper",(0,n.Z)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"A look at EIP-1459 and the benefits of DNS based discovery."))}u.isMDXComponent=!0},3905:(e,t,r)=>{r.d(t,{Zo:()=>p,kt:()=>f});var n=r(67294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var c=n.createContext({}),l=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},p=function(e){var t=l(e.components);return n.createElement(c.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),d=l(r),f=a,y=d["".concat(c,".").concat(f)]||d[f]||u[f]||o;return r?n.createElement(y,i(i({ref:t},p),{},{components:r})):n.createElement(y,i({ref:t},p))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var c in t)hasOwnProperty.call(t,c)&&(s[c]=t[c]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var l=2;l{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>c,frontMatter:()=>o,metadata:()=>s,toc:()=>h});var n=r(87462),a=(r(67294),r(3905));const o={layout:"post",name:"DNS Based Discovery",title:"DNS Based Discovery",date:new Date("2020-02-07T12:00:00.000Z"),authors:"dean",published:!0,slug:"dns-based-discovery",categories:"research"},i=void 0,s={permalink:"/rlog/dns-based-discovery",source:"@site/rlog/2020-02-7-dns-based-discovery.mdx",title:"DNS Based Discovery",description:"A look at EIP-1459 and the benefits of DNS based discovery.",date:"2020-02-07T12:00:00.000Z",formattedDate:"February 7, 2020",tags:[],readingTime:5.635,hasTruncateMarker:!0,authors:[{name:"Dean",twitter:"DeanEigenmann",github:"decanus",website:"https://dean.eigenmann.me",key:"dean"}],frontMatter:{layout:"post",name:"DNS Based Discovery",title:"DNS Based Discovery",date:"2020-02-07T12:00:00.000Z",authors:"dean",published:!0,slug:"dns-based-discovery",categories:"research"},prevItem:{title:"Waku Update",permalink:"/rlog/waku-update"},nextItem:{title:"Fixing Whisper with Waku",permalink:"/rlog/fixing-whisper-with-waku"}},l={authorsImageUrls:[void 0]},h=[],p={toc:h};function c(e){let{components:t,...r}=e;return(0,a.kt)("wrapper",(0,n.Z)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"A look at EIP-1459 and the benefits of DNS based discovery."),(0,a.kt)("p",null,"Discovery in p2p networks is the process of how nodes find each other and specific resources they are looking for. Popular discovery protocols, such as ",(0,a.kt)("a",{parentName:"p",href:"https://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf"},"Kademlia")," which utilizes a ",(0,a.kt)("a",{parentName:"p",href:"https://en.wikipedia.org/wiki/Distributed_hash_table"},"distributed hash table")," or DHT, are highly inefficient for resource restricted devices. These methods use short connection windows, and it is quite battery intensive to keep establishing connections. Additionally, we cannot expect a mobile phone for example to synchronize an entire DHT using cellular data."),(0,a.kt)("p",null,"Another issue is how we do the initial bootstrapping. In other words, how does a client find its first node to then discover the rest of the network? In most applications, including Status right now, this is done with a ",(0,a.kt)("a",{parentName:"p",href:"https://specs.status.im/spec/1#bootstrapping"},"static list of nodes")," that a client can connect to."),(0,a.kt)("p",null,"In summary, we have a static list that provides us with nodes we can connect to which then allows us to discover the rest of the network using something like Kademlia. But what we need is something that can easily be mutated, guarantees a certain amount of security, and is efficient for resource restricted devices. Ideally our solution would also be robust and scalable."),(0,a.kt)("p",null,"How do we do this?"),(0,a.kt)("p",null,(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP 1459: Node Discovery via DNS"),", which is one of the strategies we are using for discovering waku nodes. ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP-1459")," is a DNS-based discovery protocol that stores ",(0,a.kt)("a",{parentName:"p",href:"https://en.wikipedia.org/wiki/Merkle_tree"},"merkle trees")," in DNS records which contain connection information for nodes."),(0,a.kt)("p",null,(0,a.kt)("em",{parentName:"p"},"Waku is our fork of Whisper. Oskar recently wrote an ",(0,a.kt)("a",{parentName:"em",href:"https://vac.dev/fixing-whisper-with-waku"},"entire post")," explaining it. In short, Waku is our method of fixing the shortcomings of Whisper in a more iterative fashion. You can find the specification ",(0,a.kt)("a",{parentName:"em",href:"https://rfc.vac.dev/spec/6/"},"here"))),(0,a.kt)("p",null,"DNS-based methods for bootstrapping p2p networks are quite popular. Even Bitcoin uses it, but it uses a concept called DNS seeds, which are just DNS servers that are configured to return a list of randomly selected nodes from the network upon being queried. This means that although these seeds are hardcoded in the client, the IP addresses of actual nodes do not have to be."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-console"},"> dig dnsseed.bluematt.me +short\n129.226.73.12\n107.180.78.111\n169.255.56.123\n91.216.149.28\n85.209.240.91\n66.232.124.232\n207.55.53.96\n86.149.241.168\n193.219.38.57\n190.198.210.139\n74.213.232.234\n158.181.226.33\n176.99.2.207\n202.55.87.45\n37.205.10.3\n90.133.4.73\n176.191.182.3\n109.207.166.232\n45.5.117.59\n178.211.170.2\n160.16.0.30\n")),(0,a.kt)("p",null,"The above displays the result of querying on of these DNS seeds. All the nodes are stored as ",(0,a.kt)("a",{parentName:"p",href:"https://simpledns.plus/help/a-records"},(0,a.kt)("inlineCode",{parentName:"a"},"A")," records")," for the given domain name. This is quite a simple solution which Bitcoin almost soley relies on since removing the ",(0,a.kt)("a",{parentName:"p",href:"https://en.bitcoin.it/wiki/Network#IRC"},"IRC bootstrapping method in v0.8.2"),"."),(0,a.kt)("p",null,"What makes this DNS based discovery useful? It allows us to have a mutable list of bootstrap nodes without needing to ship a new version of the client every time a list is mutated. It also allows for a more lightweight method of discovering nodes, something very important for resource restricted devices."),(0,a.kt)("p",null,"Additionally, DNS provides us with a robust and scalable infrastructure. This is due to its hierarchical architecture. This hierarchical architecture also already makes it distributed such that the failure of one DNS server does not result in us no longer being able to resolve our name."),(0,a.kt)("p",null,"As with every solution though, there is a trade-off. By storing the list in DNS name an adversary would simply need to censor the DNS records for a specific name. This would prevent any new client trying to join the network from being able to do so."),(0,a.kt)("p",null,"One thing you notice when looking at ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP-1459")," is that it is a lot more technically complex than Bitcoin's way of doing this. So if Bitcoin uses this simple method and has proven that it works, why did we need a new method?"),(0,a.kt)("p",null,"There are multiple reasons, but the main one is ",(0,a.kt)("strong",{parentName:"p"},"security"),". In the Bitcoin example, an attacker could create a new list and no one querying would be able to tell. This is however mitigated in ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP-1459")," where we can verify the integrity of the entire returned list by storing an entire merkle tree in the DNS records."),(0,a.kt)("p",null,"Let's dive into this. Firstly, a client that is using these DNS records for discovery must know the public key corresponding to the private key controlled by the entity creating the list. This is because the entire list is signed using a secp256k1 private key, giving the client the ability to authenticate the list and know that it has not been tampered with by some external party."),(0,a.kt)("p",null,"So that already makes this a lot safer than the method Bitcoin uses. But how are these lists even stored? As previously stated they are stored using ",(0,a.kt)("strong",{parentName:"p"},"merkle trees")," as follows:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"The root of the tree is stored in a ",(0,a.kt)("a",{parentName:"p",href:"https://simpledns.plus/help/txt-records"},(0,a.kt)("inlineCode",{parentName:"a"},"TXT")," record"),", this record contains the tree's root hash, a sequence number which is incremented every time the tree is updated and a signature as stated above."),(0,a.kt)("p",{parentName:"li"},"Additionally, there is also a root hash to a second tree called a ",(0,a.kt)("strong",{parentName:"p"},"link tree"),", it contains the information to different lists. This link tree allows us to delegate trust and build a graph of multiple merkle trees stored across multiple DNS names."),(0,a.kt)("p",{parentName:"li"},"The sequence number ensures that an attacker cannot replace a tree with an older version because when a client reads the tree, they should ensure that the sequence number is greater than the last synchronized version.")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Using the root hash for the tree, we can find the merkle tree's first branch, the branch is also stored in a ",(0,a.kt)("inlineCode",{parentName:"p"},"TXT")," record. The branch record contains all the hashes of the branch's leafs.")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Once a client starts reading all the leafs, they can find one of two things: either a new branch record leading them further down the tree or an Ethereum Name Records (ENR) which means they now have the address of a node to connect to! To learn more about ethereum node records you can have a look at ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-778"},"EIP-778"),", or read a short blog post I wrote explaining them ",(0,a.kt)("a",{parentName:"p",href:"https://dean.eigenmann.me/blog/2020/01/21/network-addresses-in-ethereum/#enr"},"here"),"."))),(0,a.kt)("p",null,"Below is the zone file taken from the ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP-1459"),", displaying how this looks in practice."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"; name ttl class type content\n@ 60 IN TXT enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA\nC7HRFPF3BLGF3YR4DY5KX3SMBE 86900 IN TXT enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org\nJWXYDBPXYWG6FX3GMDIBFA6CJ4 86900 IN TXT enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24\n2XS2367YHAXJFGLZHVAWLQD4ZY 86900 IN TXT enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA\nH4FHT4B454P6UXFD7JCYQ5PWDY 86900 IN TXT enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI\nMHTDO6TMUBRIA2XWG5LUDACK24 86900 IN TXT enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o\n")),(0,a.kt)("p",null,"All of this has already been introduced into go-ethereum with the pull request ",(0,a.kt)("a",{parentName:"p",href:"https://github.com/ethereum/go-ethereum/pull/20094"},"#20094"),", created by Felix Lange. There's a lot of tooling around it that already exists too which is really cool. So if your project is written in Golang and wants to use this, it's relatively simple! Additionally, here's a proof of concept that shows what this might look like with libp2p on ",(0,a.kt)("a",{parentName:"p",href:"https://github.com/decanus/dns-discovery"},"github"),"."),(0,a.kt)("p",null,"I hope this was a helpful explainer into DNS based discovery, and shows ",(0,a.kt)("a",{parentName:"p",href:"https://eips.ethereum.org/EIPS/eip-1459"},"EIP-1459"),"'s benefits over more traditional DNS-based discovery schemes."))}c.isMDXComponent=!0},3905:(e,t,r)=>{r.d(t,{Zo:()=>p,kt:()=>u});var n=r(67294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var l=n.createContext({}),h=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},p=function(e){var t=h(e.components);return n.createElement(l.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,l=e.parentName,p=s(e,["components","mdxType","originalType","parentName"]),d=h(r),u=a,m=d["".concat(l,".").concat(u)]||d[u]||c[u]||o;return r?n.createElement(m,i(i({ref:t},p),{},{components:r})):n.createElement(m,i({ref:t},p))}));function u(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s.mdxType="string"==typeof e?e:a,i[1]=s;for(var h=2;h extends ICstVisitor {\n ").concat((0,s.default)(n,(function(e){return function(e){var t=d(e.name);return"".concat(e.name,"(children: ").concat(t,", param?: IN): OUT;")}(e)})).join("\n "),"\n}")))),c.join("\n\n")+"\n"}},62222:function(e,t,r){"use strict";var n,i=this&&this.__extends||(n=function(e,t){return n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r])},n(e,t)},function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");function r(){this.constructor=e}n(e,t),e.prototype=null===t?Object.create(t):(r.prototype=t.prototype,new r)}),a=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.buildModel=void 0;var s=r(93062),o=a(r(35161)),l=a(r(85564)),c=a(r(52628)),u=a(r(59704)),h=a(r(7739)),d=a(r(28583));t.buildModel=function(e){var t=new p,r=(0,c.default)(e);return(0,o.default)(r,(function(e){return t.visitRule(e)}))};var p=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return i(t,e),t.prototype.visitRule=function(e){var t=this.visitEach(e.definition),r=(0,h.default)(t,(function(e){return e.propertyName})),n=(0,o.default)(r,(function(e,t){var r=!(0,u.default)(e,(function(e){return!e.canBeNull})),n=e[0].type;return e.length>1&&(n=(0,o.default)(e,(function(e){return e.type}))),{name:t,type:n,optional:r}}));return{name:e.name,properties:n}},t.prototype.visitAlternative=function(e){return this.visitEachAndOverrideWith(e.definition,{canBeNull:!0})},t.prototype.visitOption=function(e){return this.visitEachAndOverrideWith(e.definition,{canBeNull:!0})},t.prototype.visitRepetition=function(e){return this.visitEachAndOverrideWith(e.definition,{canBeNull:!0})},t.prototype.visitRepetitionMandatory=function(e){return this.visitEach(e.definition)},t.prototype.visitRepetitionMandatoryWithSeparator=function(e){return this.visitEach(e.definition).concat({propertyName:e.separator.name,canBeNull:!0,type:f(e.separator)})},t.prototype.visitRepetitionWithSeparator=function(e){return this.visitEachAndOverrideWith(e.definition,{canBeNull:!0}).concat({propertyName:e.separator.name,canBeNull:!0,type:f(e.separator)})},t.prototype.visitAlternation=function(e){return this.visitEachAndOverrideWith(e.definition,{canBeNull:!0})},t.prototype.visitTerminal=function(e){return[{propertyName:e.label||e.terminalType.name,canBeNull:!1,type:f(e)}]},t.prototype.visitNonTerminal=function(e){return[{propertyName:e.label||e.nonTerminalName,canBeNull:!1,type:f(e)}]},t.prototype.visitEachAndOverrideWith=function(e,t){return(0,o.default)(this.visitEach(e),(function(e){return(0,d.default)({},e,t)}))},t.prototype.visitEach=function(e){var t=this;return(0,l.default)((0,o.default)(e,(function(e){return t.visit(e)})))},t}(s.GAstVisitor);function f(e){return e instanceof s.NonTerminal?{kind:"rule",name:e.referencedRule.name}:{kind:"token"}}},93062:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.isSequenceProd=t.isBranchingProd=t.isOptionalProd=t.getProductionDslName=t.GAstVisitor=t.serializeProduction=t.serializeGrammar=t.Alternative=t.Alternation=t.RepetitionWithSeparator=t.RepetitionMandatoryWithSeparator=t.RepetitionMandatory=t.Repetition=t.Option=t.NonTerminal=t.Terminal=t.Rule=void 0;var n=r(94490);Object.defineProperty(t,"Rule",{enumerable:!0,get:function(){return n.Rule}}),Object.defineProperty(t,"Terminal",{enumerable:!0,get:function(){return n.Terminal}}),Object.defineProperty(t,"NonTerminal",{enumerable:!0,get:function(){return n.NonTerminal}}),Object.defineProperty(t,"Option",{enumerable:!0,get:function(){return n.Option}}),Object.defineProperty(t,"Repetition",{enumerable:!0,get:function(){return n.Repetition}}),Object.defineProperty(t,"RepetitionMandatory",{enumerable:!0,get:function(){return n.RepetitionMandatory}}),Object.defineProperty(t,"RepetitionMandatoryWithSeparator",{enumerable:!0,get:function(){return n.RepetitionMandatoryWithSeparator}}),Object.defineProperty(t,"RepetitionWithSeparator",{enumerable:!0,get:function(){return n.RepetitionWithSeparator}}),Object.defineProperty(t,"Alternation",{enumerable:!0,get:function(){return n.Alternation}}),Object.defineProperty(t,"Alternative",{enumerable:!0,get:function(){return n.Alternative}}),Object.defineProperty(t,"serializeGrammar",{enumerable:!0,get:function(){return n.serializeGrammar}}),Object.defineProperty(t,"serializeProduction",{enumerable:!0,get:function(){return n.serializeProduction}});var i=r(62156);Object.defineProperty(t,"GAstVisitor",{enumerable:!0,get:function(){return i.GAstVisitor}});var a=r(53559);Object.defineProperty(t,"getProductionDslName",{enumerable:!0,get:function(){return a.getProductionDslName}}),Object.defineProperty(t,"isOptionalProd",{enumerable:!0,get:function(){return a.isOptionalProd}}),Object.defineProperty(t,"isBranchingProd",{enumerable:!0,get:function(){return a.isBranchingProd}}),Object.defineProperty(t,"isSequenceProd",{enumerable:!0,get:function(){return a.isSequenceProd}})},53559:function(e,t,r){"use strict";var n=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.getProductionDslName=t.isBranchingProd=t.isOptionalProd=t.isSequenceProd=void 0;var i=n(r(59704)),a=n(r(711)),s=n(r(64721)),o=r(94490);t.isSequenceProd=function(e){return e instanceof o.Alternative||e instanceof o.Option||e instanceof o.Repetition||e instanceof o.RepetitionMandatory||e instanceof o.RepetitionMandatoryWithSeparator||e instanceof o.RepetitionWithSeparator||e instanceof o.Terminal||e instanceof o.Rule},t.isOptionalProd=function e(t,r){return void 0===r&&(r=[]),!!(t instanceof o.Option||t instanceof o.Repetition||t instanceof o.RepetitionWithSeparator)||(t instanceof o.Alternation?(0,i.default)(t.definition,(function(t){return e(t,r)})):!(t instanceof o.NonTerminal&&(0,s.default)(r,t))&&(t instanceof o.AbstractProduction&&(t instanceof o.NonTerminal&&r.push(t),(0,a.default)(t.definition,(function(t){return e(t,r)})))))},t.isBranchingProd=function(e){return e instanceof o.Alternation},t.getProductionDslName=function(e){if(e instanceof o.NonTerminal)return"SUBRULE";if(e instanceof o.Option)return"OPTION";if(e instanceof o.Alternation)return"OR";if(e instanceof o.RepetitionMandatory)return"AT_LEAST_ONE";if(e instanceof o.RepetitionMandatoryWithSeparator)return"AT_LEAST_ONE_SEP";if(e instanceof o.RepetitionWithSeparator)return"MANY_SEP";if(e instanceof o.Repetition)return"MANY";if(e instanceof o.Terminal)return"CONSUME";throw Error("non exhaustive match")}},94490:function(e,t,r){"use strict";var n,i=this&&this.__extends||(n=function(e,t){return n=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r])},n(e,t)},function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");function r(){this.constructor=e}n(e,t),e.prototype=null===t?Object.create(t):(r.prototype=t.prototype,new r)}),a=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.serializeProduction=t.serializeGrammar=t.Terminal=t.Alternation=t.RepetitionWithSeparator=t.Repetition=t.RepetitionMandatoryWithSeparator=t.RepetitionMandatory=t.Option=t.Alternative=t.Rule=t.NonTerminal=t.AbstractProduction=void 0;var s=a(r(35161)),o=a(r(84486)),l=a(r(47037)),c=a(r(96347)),u=a(r(35937)),h=a(r(28583));function d(e){return t=e,(0,l.default)(t.LABEL)&&""!==t.LABEL?e.LABEL:e.name;var t}var p=function(){function e(e){this._definition=e}return Object.defineProperty(e.prototype,"definition",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),e.prototype.accept=function(e){e.visit(this),(0,o.default)(this.definition,(function(t){t.accept(e)}))},e}();t.AbstractProduction=p;var f=function(e){function t(t){var r=e.call(this,[])||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),Object.defineProperty(t.prototype,"definition",{get:function(){return void 0!==this.referencedRule?this.referencedRule.definition:[]},set:function(e){},enumerable:!1,configurable:!0}),t.prototype.accept=function(e){e.visit(this)},t}(p);t.NonTerminal=f;var m=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.orgText="",(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.Rule=m;var g=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.ignoreAmbiguities=!1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.Alternative=g;var v=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.Option=v;var A=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.RepetitionMandatory=A;var y=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.RepetitionMandatoryWithSeparator=y;var x=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.Repetition=x;var b=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),t}(p);t.RepetitionWithSeparator=b;var w=function(e){function t(t){var r=e.call(this,t.definition)||this;return r.idx=1,r.ignoreAmbiguities=!1,r.hasPredicates=!1,(0,h.default)(r,(0,u.default)(t,(function(e){return void 0!==e}))),r}return i(t,e),Object.defineProperty(t.prototype,"definition",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),t}(p);t.Alternation=w;var E=function(){function e(e){this.idx=1,(0,h.default)(this,(0,u.default)(e,(function(e){return void 0!==e})))}return e.prototype.accept=function(e){e.visit(this)},e}();function _(e){function t(e){return(0,s.default)(e,_)}if(e instanceof f){var r={type:"NonTerminal",name:e.nonTerminalName,idx:e.idx};return(0,l.default)(e.label)&&(r.label=e.label),r}if(e instanceof g)return{type:"Alternative",definition:t(e.definition)};if(e instanceof v)return{type:"Option",idx:e.idx,definition:t(e.definition)};if(e instanceof A)return{type:"RepetitionMandatory",idx:e.idx,definition:t(e.definition)};if(e instanceof y)return{type:"RepetitionMandatoryWithSeparator",idx:e.idx,separator:_(new E({terminalType:e.separator})),definition:t(e.definition)};if(e instanceof b)return{type:"RepetitionWithSeparator",idx:e.idx,separator:_(new E({terminalType:e.separator})),definition:t(e.definition)};if(e instanceof x)return{type:"Repetition",idx:e.idx,definition:t(e.definition)};if(e instanceof w)return{type:"Alternation",idx:e.idx,definition:t(e.definition)};if(e instanceof E){var n={type:"Terminal",name:e.terminalType.name,label:d(e.terminalType),idx:e.idx};(0,l.default)(e.label)&&(n.terminalLabel=e.label);var i=e.terminalType.PATTERN;return e.terminalType.PATTERN&&(n.pattern=(0,c.default)(i)?i.source:i),n}if(e instanceof m)return{type:"Rule",name:e.name,orgText:e.orgText,definition:t(e.definition)};throw Error("non exhaustive match")}t.Terminal=E,t.serializeGrammar=function(e){return(0,s.default)(e,_)},t.serializeProduction=_},62156:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.GAstVisitor=void 0;var n=r(94490),i=function(){function e(){}return e.prototype.visit=function(e){var t=e;switch(t.constructor){case n.NonTerminal:return this.visitNonTerminal(t);case n.Alternative:return this.visitAlternative(t);case n.Option:return this.visitOption(t);case n.RepetitionMandatory:return this.visitRepetitionMandatory(t);case n.RepetitionMandatoryWithSeparator:return this.visitRepetitionMandatoryWithSeparator(t);case n.RepetitionWithSeparator:return this.visitRepetitionWithSeparator(t);case n.Repetition:return this.visitRepetition(t);case n.Alternation:return this.visitAlternation(t);case n.Terminal:return this.visitTerminal(t);case n.Rule:return this.visitRule(t);default:throw Error("non exhaustive match")}},e.prototype.visitNonTerminal=function(e){},e.prototype.visitAlternative=function(e){},e.prototype.visitOption=function(e){},e.prototype.visitRepetition=function(e){},e.prototype.visitRepetitionMandatory=function(e){},e.prototype.visitRepetitionMandatoryWithSeparator=function(e){},e.prototype.visitRepetitionWithSeparator=function(e){},e.prototype.visitAlternation=function(e){},e.prototype.visitTerminal=function(e){},e.prototype.visitRule=function(e){},e}();t.GAstVisitor=i},68877:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.toFastProperties=t.timer=t.PRINT_ERROR=t.PRINT_WARNING=void 0;var n=r(73017);Object.defineProperty(t,"PRINT_WARNING",{enumerable:!0,get:function(){return n.PRINT_WARNING}}),Object.defineProperty(t,"PRINT_ERROR",{enumerable:!0,get:function(){return n.PRINT_ERROR}});var i=r(99870);Object.defineProperty(t,"timer",{enumerable:!0,get:function(){return i.timer}});var a=r(55591);Object.defineProperty(t,"toFastProperties",{enumerable:!0,get:function(){return a.toFastProperties}})},73017:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.PRINT_WARNING=t.PRINT_ERROR=void 0,t.PRINT_ERROR=function(e){console&&console.error&&console.error("Error: ".concat(e))},t.PRINT_WARNING=function(e){console&&console.warn&&console.warn("Warning: ".concat(e))}},99870:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.timer=void 0,t.timer=function(e){var t=(new Date).getTime(),r=e();return{time:(new Date).getTime()-t,value:r}}},55591:(e,t)=>{"use strict";function r(e){function t(){}t.prototype=e;var r=new t;function n(){return typeof r.bar}return n(),n(),e}Object.defineProperty(t,"__esModule",{value:!0}),t.toFastProperties=void 0,t.toFastProperties=r},23558:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.KeepRatio=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=n.__importDefault(r(58147));t.KeepRatio=e=>{let{children:t,width:r,height:n,fullHeight:o=!1,fullWidth:l=!0,rootProps:c,contentProps:u,containerWidth:h,containerHeight:d,keep:p=!0}=e;const f=!o&&l,m=!f,g=100*(m?n/r:r/n);return a.default.createElement("div",{...c??{},className:(0,i.default)(s.default.root,f&&s.default.fullWidth,m&&s.default.fullHeight,p&&s.default.keep,null==c?void 0:c.className)},a.default.createElement("div",{...u??{},className:(0,i.default)(s.default.content,null==u?void 0:u.className)},t),p&&a.default.createElement("svg",{style:{pointerEvents:"none",height:m?"100%":d?`calc(${n} / ${r} * ${h})`:"auto",width:f?"100%":d?`calc(${r} / ${n} * ${d})`:"auto"},viewBox:`0 0 ${f?g:100} ${m?g:100}`}))}},26534:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(23558),t)},22226:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.AccordionItem=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importStar(r(67294));r(11403);const s=r(31665),o=r(6980),l=n.__importDefault(r(73955));t.AccordionItem=e=>{let{open:t,onToggle:r,title:n,className:c,children:u,...h}=e;const d=(0,l.default)("accordion-item-"),[p,f]=(0,a.useState)(t??!1);void 0!==t&&t!==p&&f(t);return a.default.createElement("div",{className:(0,i.default)(c,"mdx-accordion-item",p&&"mdx-accordion-item--open"),...h},a.default.createElement("input",{type:"checkbox",id:d,checked:p}),a.default.createElement("div",{role:"button",className:"mdx-accordion-item__header",onClick:()=>{void 0!==t?r&&r(!p):f((e=>!e))}},a.default.createElement(s.Typography,{className:"mdx-accordion-item__title",variant:"h5",component:"label",htmlFor:d},n),a.default.createElement("div",{className:"mdx-accordion-item__icon"},p?a.default.createElement(o.IconRemove,null):a.default.createElement(o.IconAdd,null))),a.default.createElement("div",{className:"mdx-accordion-item__content-wrapper"},a.default.createElement("div",{className:"mdx-accordion-item__content"},u)))}},66169:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(22226),t)},3309:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.AppCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(47002)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294));r(93255);t.AppCard=e=>{let{logoSrc:t,logoSrcDark:r,name:n,description:l,link:c,linkLabel:u,...h}=e;return o.default.createElement("div",{...h,className:(0,s.default)(h.className,"mdx-app-card")},(t||r)&&o.default.createElement(a.default,{sources:{dark:r??t??"",light:t??r??""},alt:"string"==typeof n?n:"",className:"mdx-app-card__logo"}),o.default.createElement(i.Typography,{component:"span",variant:"h5",className:"mdx-app-card__name"},n),o.default.createElement(i.Typography,{variant:"subtitle1",className:"mdx-app-card__description"},l),c&&o.default.createElement("a",{href:c,target:"_blank",className:"mdx-app-card__link"},o.default.createElement(i.Button,{size:"large",variant:"outlined"},o.default.createElement(i.Typography,{variant:"label1",component:"span"},u??o.default.createElement(o.default.Fragment,null,"Visit ",n)),o.default.createElement("span",null,o.default.createElement(i.PickIcon,{color:"primary"})))))}},84614:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(3309),t)},12077:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.AssetCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(6980),l=r(26534);r(77877);t.AssetCard=e=>{let{title:t,previewSrc:r,downloadable:n,forceDownload:c=!1,...u}=e;const h=n&&n.length>0;return s.default.createElement("div",{...u,className:(0,a.default)(u.className,"mdx-asset-card",h&&"mdx-asset-card--downloadable")},s.default.createElement("div",{className:"mdx-asset-card__inner"},t&&s.default.createElement(i.Typography,{component:"div",variant:"subtitle2",className:"mdx-asset-card__title"},t),s.default.createElement(l.KeepRatio,{width:16,height:9,fullWidth:!0,rootProps:{className:"mdx-asset-card__image"}},s.default.createElement("img",{src:r,alt:"string"==typeof t&&t||"asset image"}))),h&&s.default.createElement("div",{className:"mdx-asset-card__downloadables"},n.map(((e,r)=>s.default.createElement("a",{href:e.src,target:"_blank",download:!0,onClick:r=>((e,r)=>{if(!c)return;e.preventDefault();const{src:n,filename:i}=r,a=document.createElement("a");a.download=i||"string"==typeof t&&t||"",fetch(n).then((e=>e.blob())).then((e=>{const t=window.URL.createObjectURL(e);a.href=t,window.document.body.appendChild(a),a.click(),window.document.body.removeChild(a)})).catch((e=>{console.error("failed to download asset:"+e),a.href=n,a.target="_blank",window.document.body.appendChild(a),a.click(),window.document.body.removeChild(a)}))})(r,e)},s.default.createElement(i.Button,{key:r,variant:"outlined",size:"small",icon:s.default.createElement(o.IconDownload,null)},e.title))))))}},64314:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(12077),t)},93152:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Box=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(9259);function o(e,t,r,n){const i="string"==typeof t||"number"==typeof t?{xs:t}:t,a=[],s=e=>"number"==typeof e&&n?`${e}${n}`:`${e}`;return["xs","sm","md","lg","xl"].forEach(((t,n)=>{const o=i[t];if(o)a.push([`${e}-${t}`,s(o)]);else{var l;const i=null==(l=a[n-1])?void 0:l[1];a.push([`${e}-${t}`,i||s(r)])}})),Object.fromEntries(a)}r(56216);t.Box=e=>{let{top:t=0,bottom:r=0,className:n,style:l={},children:c,...u}=e;return a.default.createElement("div",{className:(0,i.default)(n,"mdx-box"),style:(0,s.makeStyle)({...l},{...o("mdx-box-top",t,0,"px"),...o("mdx-box-bottom",r,0,"px")}),...u},c)}},32765:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(93152),t)},40299:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.CallToActionButton=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(88746)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294));r(51928);t.CallToActionButton=e=>{let{className:t,variant:r="filled",size:n="large",children:l,...c}=e;return o.default.createElement(a.default,{className:(0,s.default)("mdx-cta-button",t),...c},o.default.createElement(i.Typography,{component:"span",variant:"large"===n?"label1":"label2"},o.default.createElement(i.Button,{size:n,variant:r},l)))}},54442:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(40299),t)},40824:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.CallToActionSection=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(65701);r(8981);t.CallToActionSection=e=>{let{label:t,href:r,title:n,columns:l=1,description:c,list:u=[],target:h,className:d,children:p,variant:f="outlined",...m}=e;const g=!!c,v=u.length>0,A=!!r,y=!n||g||A||v?n&&!c&&!v&&A?"title-button":n&&c&&2===l?"full-width":n&&c&&u.length>0?"list":"simple":"title-only";return s.default.createElement("div",{className:(0,a.default)(d,"mdx-cta-section",`mdx-cta-section--${y}`),...m},s.default.createElement("div",{className:"mdx-cta-section__container"},s.default.createElement(i.Typography,{component:"h2",className:"mdx-cta-section__title"},n),s.default.createElement(i.Typography,{component:"h3",className:"mdx-cta-section__description"},c),r&&s.default.createElement(o.CallToActionButton,{target:h,href:r,className:"mdx-cta-section__link",variant:f},t)),u.length>0&&s.default.createElement("div",{className:"mdx-cta-section__list"},u.map(((e,t)=>s.default.createElement("div",{key:t},s.default.createElement(i.Typography,{variant:"subtitle2",component:"div"},e.title),s.default.createElement(i.Typography,{variant:"h2",component:"p"},e.description))))))}},78256:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(40824),t)},63906:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.DocMetadata=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(88746)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294)),l=r(38840);r(46774);const c=r(98906);t.DocMetadata=e=>{let{className:t,children:r,...n}=e;const{date:u,authors:h}=(0,c.useDocMetadata)(),{content:{authorPage:d}={}}=(0,l.useDocThemeOptions)();return o.default.createElement("div",{className:(0,s.default)(t,"mdx-doc-metadata"),...n},u&&o.default.createElement(i.Typography,{variant:"body2"},u),h&&h.length>0&&o.default.createElement(o.default.Fragment,null,o.default.createElement(i.Typography,{variant:"body2"},"by"," ",h.map(((e,t)=>o.default.createElement(o.default.Fragment,{key:e.key},d?o.default.createElement(a.default,{to:`author/${e.key}`},e.name):e.name,t{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(63906),t)},98906:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.useDocMetadata=void 0;const n=r(70655),i=r(13616),a=r(38840),s=n.__importDefault(r(99486));t.useDocMetadata=()=>{const e=(0,a.useDocThemeOptions)(),{content:{authors:t=[]}={}}=e,{frontMatter:r={}}=(0,i.useDoc)(),{author:n=[],date:o}=r,l=(Array.isArray(n)?n:[n]).map((e=>t.find((t=>e===t.key)))).filter((e=>!!e));return{date:o?(0,s.default)(new Date(o),"MMM d yyyy"):"",authors:l}}},58135:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ExternalResourceCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(47002)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294)),l=r(6980);r(84238);const c=r(26534);t.ExternalResourceCard=e=>{let{title:t,logoSrc:r,logoSrcDark:n,description:u,previewSrc:h,previewSrcDark:d,...p}=e;const f=!(!h&&!d);return o.default.createElement("a",{target:"_blank",...p,className:(0,s.default)(p.className,"mdx-erc",f&&"mdx-erc--with-preview")},f&&o.default.createElement(c.KeepRatio,{width:16,height:9,fullWidth:!0},o.default.createElement(a.default,{sources:{dark:d??h??"",light:h??d??""},alt:"string"==typeof t&&t||"preview image",className:"mdx-erc__preview-image"})),o.default.createElement("div",{className:"mdx-erc__inner"},(r||n)&&o.default.createElement(a.default,{sources:{dark:n??r??"",light:r??n??""},alt:"string"==typeof t&&t||"logo",className:"mdx-erc__logo"}),o.default.createElement(i.Typography,{variant:"body1",component:"div",className:"mdx-erc__title"},t),u&&o.default.createElement(i.Typography,{variant:"label2",component:"div",className:"mdx-erc__description"},u)),o.default.createElement("div",{className:"mdx-erc__icon"},o.default.createElement(l.IconExternalLink,{className:"mdx-erc__external-link"})))}},24084:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(58135),t)},24646:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.FeatureList=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294));r(69774);t.FeatureList=e=>{let{title:t="Features",alignment:r="bottom",features:n=[],className:o,ctaPosition:l="bottom",children:c,...u}=e;return s.default.createElement("div",{className:(0,a.default)(o,"mdx-feature-list",`mdx-feature-list--${r}-aligned`,`mdx-feature-list--cta-${l}`),...u},s.default.createElement("div",{className:"mdx-feature-list__header"},s.default.createElement(i.Typography,{variant:"h5",component:"h1",className:"mdx-feature-list__title"},t),c&&"top"===l&&s.default.createElement("div",{className:"mdx-feature-list__extra"},c)),s.default.createElement("div",{className:"mdx-feature-list__list"},n.map(((e,t)=>s.default.createElement("div",{key:t,className:(0,a.default)("mdx-feature-list__feature")},s.default.createElement("div",{className:"mdx-feature-list__feature-inner"},s.default.createElement(i.Typography,{variant:"subtitle1",component:"div",className:"mdx-feature-list__feature-index"},t+1),s.default.createElement(i.Typography,{variant:"h2",component:"h2",className:"mdx-feature-list__feature-title"},e.title),s.default.createElement(i.Typography,{variant:"h5",component:"h3",className:"mdx-feature-list__feature-description"},s.default.createElement("span",null,e.description))),s.default.createElement("div",{className:"mdx-feature-list__feature-border"}))))),c&&"bottom"===l&&s.default.createElement("div",{className:"mdx-feature-list__extra"},c))}},34155:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(24646),t)},51499:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.GithubChallenges=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(67294)),s=r(696),o=r(79527),l=e=>{let{message:t}=e;return a.default.createElement(a.default.Fragment,null,a.default.createElement(i.Typography,{variant:"h1",className:"mdx-ghc__header"},"Open challenges"),!!t&&a.default.createElement(i.Typography,{variant:"body1"},t))};t.GithubChallenges=e=>{let{challengesData:t,useDummyData:r,subheaderText:n,filterByName:c,...u}=e,h=r?[s.dummyGithubIssue]:(e=>{let t=[];return Object.keys(e).forEach((r=>{if(!isNaN(Number(r))){const n=Object.values(e[r])[0];Array.isArray(n)&&(t=t.concat(n))}})),t})(t);if(!h||(d=h,!Array.isArray(d)||!d.length))return a.default.createElement(l,{message:"No challenges to show"});var d;if(c)try{var p;const e=new RegExp(c,"i");h=null==(p=h)?void 0:p.filter((t=>e.test(t.title)))}catch(f){console.error("Invalid regex pattern:",f)}return a.default.createElement("div",{...u},a.default.createElement(l,null),!!n&&a.default.createElement(i.Typography,{variant:"body1",className:"mdx-ghc-subheader-text",component:"div"},n),h.map((e=>a.default.createElement(o.SingleGithubChallenge,{key:e.id,issue:e}))))}},79527:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.SingleGithubChallenge=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(67294)),s=r(6980);r(58918);const o=n.__importDefault(r(88746));function l(e,t){void 0===t&&(t=24);const r=new URL(e),n=new URLSearchParams(r.search);return n.set("s",t.toString()),r.search=Array.from(n.entries()).map((e=>{let[t,r]=e;return`${t}=${r}`})).join("&"),r.toString()}t.SingleGithubChallenge=e=>{let{issue:t}=e;const r=(e=>{const t=new Map;return t.set(e.user.login,{name:e.user.login,avatarUrl:e.user.avatarUrl}),e.assignees.forEach((e=>{t.set(e.login,{name:e.login,avatarUrl:e.avatarUrl})})),e.comments.forEach((e=>{t.set(e.author.login,{name:e.author.login,avatarUrl:e.author.avatarUrl})})),Array.from(t.values())})(t),n=(e=>{const t=e.projects.map((e=>e.name));return 0===t.length?"":Array.from(new Set(t)).join(", ")})(t),c=""!==n,u=!!t.milestone,h=[];return c&&h.push(a.default.createElement(a.default.Fragment,null,a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__label"},n.includes(", ")?"Projects":"Project"),a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__project-name"},n))),u&&h.push(a.default.createElement(a.default.Fragment,null,a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__label"},"Milestone"),a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__milestone-text"},t.milestone))),a.default.createElement("div",{className:"mdx-ghc__container"},a.default.createElement(o.default,{href:t.url,className:"mdx-ghc__issue-title-link"},a.default.createElement(i.Typography,{variant:"h5",className:"mdx-ghc__issue-title"},t.title)),a.default.createElement("div",{className:"mdx-ghc__challenge-labels"},t.labels.map(((e,t)=>a.default.createElement(i.Typography,{variant:"body3",key:t,className:"mdx-ghc__challenge-label"},e)))),a.default.createElement("div",{className:"mdx-ghc__issue-content-grid"},a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__label"},"Participants"),a.default.createElement("div",{className:"mdx-ghc__participant-photo-container"},a.default.createElement(i.Typography,{variant:"body3",className:"mdx-ghc__comment-count",component:"div"},r.length),r.map(((e,t)=>a.default.createElement("img",{key:t,className:"mdx-ghc__participant-photo",src:l(e.avatarUrl),alt:e.name})))),h.map(((e,t)=>a.default.createElement(a.default.Fragment,{key:t},e)))),a.default.createElement(o.default,{href:t.url,className:"mdx-ghc__view-on-github-link"},a.default.createElement(i.Button,{className:"mdx-ghc__view-on-github-button",icon:a.default.createElement(s.IconExternalLink,null)},"View on GitHub")))}},696:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.dummyGithubIssue=void 0,t.dummyGithubIssue={id:"12345",title:"Bug in pagination",body:"When navigating to the second page, the first item repeats.",url:"https://github.com/user/repo/issues/12345",user:{login:"user123",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"},labels:["bug","frontend"],commentCount:3,comments:[{id:"c1",author:{login:"alice123",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"},body:"I have also noticed this issue. Working on a fix now.",createdAt:"2021-01-01T12:00:00Z"},{id:"c2",author:{login:"bob456",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"},body:"Any updates on this?",createdAt:"2021-01-02T15:30:00Z"},{id:"c3",author:{login:"jaquim",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"},body:"I like turtles.",createdAt:"2021-01-03T09:45:00Z"}],assignees:[{login:"alice123",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"},{login:"bob456",avatarUrl:"https://avatars.githubusercontent.com/u/8811422?v=4"}],milestone:"v1.0.0",created_at:"2020-12-31T11:00:00Z",updated_at:"2021-01-04T13:00:00Z",projects:[{name:"Awesome Project"}]}},47513:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(51499),t)},7859:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Grid=void 0;const n=r(70655),i=r(31665),a=r(70917),s=n.__importDefault(r(77366)),o=n.__importDefault(r(86010)),l=n.__importStar(r(67294)),c=r(59567),u=r(3796),h=r(18031);t.Grid=e=>{let{actions:t,leftLabel:r="",rightLabel:n="",spacingButtons:i=!1,children:a,...s}=e;const c=(0,l.useRef)(null);return l.default.createElement(d,{...s,className:(0,o.default)(s.className,"mdx-grid")},l.default.createElement("div",{className:"mdx-grid__actions"},t,l.default.createElement(u.ScrollButtons,{containerRef:c,className:"mdx-grid__scroll",leftLabel:r,rightLabel:n,spacing:i?"spaced":"grouped"})),l.default.createElement("div",{ref:c,className:(0,o.default)("mdx-grid__content","hidden-scrollbar")},a))},t.Grid.Item=h.GridItem;const d=s.default.div` + width: 100%; + + .mdx-grid__scroll { + display: flex; + } + + .mdx-grid__content { + display: grid; + gap: var(--grid-gap); + grid-template-columns: repeat(var(--grid-cols), minmax(0, 1fr)); + overflow: hidden; + } + + .mdx-grid__actions { + display: flex; + flex-direction: row; + align-items: center; + gap: 1rem; + + & > * { + margin-bottom: 3rem; + } + } + + ${e=>i.THEME_BREAKPOINTS.map((t=>{if(!e[t])return null;const r=e[t];return c.lsdUtils.responsive(e.theme,t,"up")(a.css` + ${void 0!==r.cols&&`\n --grid-cols: ${r.cols};\n `} + + ${void 0!==r.gap&&`\n --grid-gap: ${r.gap};\n `} + + ${(void 0===r.wrap||!0===r.wrap)&&a.css` + .mdx-grid__scroll { + display: none; + } + .mdx-grid__content { + display: grid; + flex-wrap: unset; + overflow-x: unset; + overflow-y: unset; + scroll-snap-type: unset; + } + `} + + ${void 0!==r.wrap&&!1===r.wrap&&a.css` + .mdx-grid__scroll { + display: flex; + } + .mdx-grid__content { + display: flex; + flex-wrap: nowrap; + overflow-x: scroll; + overflow-y: hidden; + scroll-snap-type: x mandatory; + } + `} + + ${!1===r.scrollButtons&&a.css` + .mdx-grid__scroll { + display: none; + } + `} + `)}))} +`},18031:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.GridItem=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=n.__importDefault(r(77366)),o=r(31665),l=r(59567),c=r(70917);t.GridItem=e=>{let{children:t,...r}=e;return a.default.createElement(u,{...r,className:(0,i.default)(r.className)},t)};const u=s.default.div` + ${e=>o.THEME_BREAKPOINTS.map((t=>{if(!e[t])return null;const r=e[t];return l.lsdUtils.responsive(e.theme,t,"up")(c.css` + grid-column: span ${r}; + flex-basis: calc(100% / var(--grid-cols) * ${r}); + `)}))} +`},78378:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(70655);n.__exportStar(r(7859),t),n.__exportStar(r(18031),t)},94787:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.useHero=t.HeroContext=void 0;const n=r(70655).__importStar(r(67294));t.HeroContext=n.default.createContext({size:"medium"});t.useHero=()=>(0,n.useContext)(t.HeroContext)},53255:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Hero=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(94787);r(91786);t.Hero=e=>{let{size:t="medium",className:r,children:n,...o}=e;return a.default.createElement(s.HeroContext.Provider,{value:{size:t}},a.default.createElement("div",{className:(0,i.default)(r,"mdx-hero",`mdx-hero--${t}`),...o},n))}},72295:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(53255),t)},23908:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroAction=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(88746)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294));r(85661);t.HeroAction=e=>{let{size:t="large",variant:r="outlined",className:n,children:l,...c}=e;return o.default.createElement(a.default,{className:(0,s.default)(n,"mdx-hero-action",`mdx-hero-action--${t}`),...c},o.default.createElement(i.Button,{variant:r,size:t},o.default.createElement(i.Typography,{component:"span",variant:"label1",style:{color:"inherit"}},l)))}},48126:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(23908),t)},82897:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroActions=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294));r(57255);t.HeroActions=e=>{let{className:t,children:r,...n}=e;return a.default.createElement("div",{className:(0,i.default)(t,"mdx-hero-actions"),...n},r)}},16693:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(82897),t)},28795:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroDescription=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(94787);r(30204);t.HeroDescription=e=>{let{size:t,className:r,uppercase:n=!1,children:l,...c}=e;const u=(0,o.useHero)(),h=t??(u?u.size:"medium");return s.default.createElement(i.Typography,{variant:"h4",className:(0,a.default)(r,"mdx-hero-description",`mdx-hero-description--${h}`,n?"mdx-hero-description--uppercase":""),...c},l)}},22447:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(28795),t)},78274:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroInfo=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(67713),o=r(94787),l=r(65701);r(6460);t.HeroInfo=e=>{let{size:t,className:r,children:n,...c}=e;const u=(0,o.useHero)(),h=(0,s.useHydrated)(),d=t||(u?u.size:"medium");return a.default.createElement(a.default.Fragment,null,a.default.createElement("div",{className:(0,i.default)(r,"mdx-hero-info",`mdx-hero-info--${d}`),...c},n),h&&a.default.createElement(l.ScrollToBottom,null))}},67812:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(78274),t)},84773:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.AsciiRenderer=void 0;const n=r(70655),i=r(44169),a=r(64232),s=n.__importStar(r(67294)),o=r(84849),l=r(24052);t.AsciiRenderer=e=>(function(e){void 0===e&&(e={});const{renderIndex:t=i.defaultAsciiConfigs.renderIndex,characters:r=i.defaultAsciiConfigs.characters,invert:n=i.defaultAsciiConfigs.invert,color:c=i.defaultAsciiConfigs.color,resolution:u=i.defaultAsciiConfigs.resolution,bgColor:h=i.defaultAsciiConfigs.bgColor,fgColor:d=i.defaultAsciiConfigs.fgColor,textShadowSize:p=i.defaultAsciiConfigs.textShadowSize,withTextShadow:f=!1}=e,{gl:m,size:g,scene:v,camera:A,viewport:y}=(0,a.useThree)(),x=(0,s.useMemo)((()=>{const e=new o.AsciiEffect(m,r,{invert:n,color:c,resolution:u});return e.domElement.style.position="absolute",e.domElement.style.top="0px",e.domElement.style.left="0px",e.domElement.style.pointerEvents="none",e}),[r,n,c,u]);(0,s.useLayoutEffect)((()=>{x.domElement.style.color=d}),[d,h]),(0,s.useEffect)((()=>{if(m.domElement.style.opacity="0",m.domElement.parentNode.appendChild(x.domElement),f){const e=document.createElement("style");e.innerHTML=`table *{text-shadow: ${(0,l.generateTextShadow)(p)};`,x.domElement.appendChild(e)}return()=>{m.domElement.style.opacity="1",m.domElement.parentNode.removeChild(x.domElement)}}),[x]),(0,s.useEffect)((()=>{x.setSize(g.width,g.height)}),[x,g]),(0,a.useFrame)((e=>{x.render(v,A)}),t)}(e),s.default.createElement(s.default.Fragment,null))},35901:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Controls=void 0;const n=r(70655),i=r(44169),a=n.__importStar(r(67294)),s=r(40213),o=r(64232),l=r(90278),c=r(24052),u=(e,t,r)=>(1-r)*e+r*t;function h(e,t,r,n){const i=Math.min(e,t)/t,a=[0,0,0];for(let s=0;s<3;s++)a[s]=u(r[s],n[s],i);return a}t.Controls=e=>{let{rotateSpeed:t=i.ROTATE_SPEED,enableZoom:r=!0,enableRotateOnScroll:n=!0,preset:u,targetPreset:d,children:p,...f}=e;const m=(0,a.useRef)(),g=(0,s.useScrollY)(),{camera:v,size:A}=(0,o.useThree)(),y=(0,a.useRef)(),[x,b]=a.default.useState(!1),[w,E]=a.default.useState(1),[_,S]=a.default.useState(0);return(0,o.useFrame)(((e,r)=>{m.current.rotation.y-=r*((0,c.isMobile)()?.6*t:t)})),(0,a.useEffect)((()=>{const e=()=>{console.log(JSON.stringify({cameraPos:v.position.toArray(),cameraRot:v.rotation.toArray().slice(0,3),controlsTarget:y.current.target.toArray()},null,2))};return window.addEventListener("click",e),()=>window.removeEventListener("click",e)}),[v]),(0,a.useEffect)((()=>{(0,c.isTouchDevice)()&&(y.current.minPolarAngle=Math.PI/2,y.current.maxPolarAngle=Math.PI/2,setTimeout((()=>{b(!0)}),1e3))}),[]),(0,a.useEffect)((()=>{(0,c.isMobile)()&&(E((0,c.mapFloat)(g,0,(0,c.calcScrollThreshold)(),1,.65)),S((0,c.mapFloat)(g,0,(0,c.calcScrollThreshold)(),0,.35)))}),[g]),(0,a.useEffect)((()=>{if(!r)return;if(!d)return;const e=h(g,400,u.cameraPos,d.cameraPos),t=h(g,400,u.cameraRot,d.cameraRot),n=h(g,400,u.controlsTarget,d.controlsTarget);v.position.set(...e),v.rotation.set(...t),y.current.target.set(...n),v.updateProjectionMatrix()}),[g,v]),a.default.createElement("group",{ref:m,...f,scale:w,"position-y":_},p,a.default.createElement(l.OrbitControls,{ref:y,enableZoom:!1,target:y.current?y.current.target:u.controlsTarget,enabled:!x}))}},44169:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.OBJECTS_PRESETS=t.defaultPresets=t.defaultAsciiConfigs=t.INITIAL_ZOOM=t.RESIZE_SPEED_FACTOR=t.MIN_ZOOM=t.MAX_ROTATE_SPEED=t.MIN_ROTATE_SPEED=t.ROTATE_SPEED=void 0,t.ROTATE_SPEED=.1,t.MIN_ROTATE_SPEED=0,t.MAX_ROTATE_SPEED=1,t.MIN_ZOOM=.22,t.RESIZE_SPEED_FACTOR=.9,t.INITIAL_ZOOM=3,t.defaultAsciiConfigs={renderIndex:1,bgColor:"rgb(var(--lsd-surface-primary))",fgColor:"rgb(var(--lsd-text-primary))",characters:" l.o.g.o.s ",invert:!1,color:!1,resolution:.21,textShadowSize:15},t.defaultPresets={modelId:"default",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[-2.733342169570335,1.127956558492365,2.7690180385429666],controlsTarget:[-.2185887974027981,3.4320197290105474,-.08409377618890646]},abstract:{cameraPos:[-1.4826176635786852,4.021180061821954,-1.5929058418153597],cameraRot:[-2.9244096935808908,-.8625529112689497,-2.9755407843387185],controlsTarget:[-.3236695017538898,3.8072918272567,-.6236093222013962]}},t.OBJECTS_PRESETS=[t.defaultPresets,{modelId:"architecture01",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"architecture02",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"architecture03",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"architecture04",simple:{cameraPos:[2.263701079468784,.6448855513810133,-1.1446840846564066],cameraRot:[-2.3314571674867457,.984733935216302,2.422537357648925],controlsTarget:[.6593539926170827,-.1266335925222026,-.4104215479626598]},abstract:{cameraPos:[1.8397825927313005,.5006635210357668,-.7112749496103248],cameraRot:[-2.0637769654190388,.941591559117755,2.1572780914058356],controlsTarget:[.7525282128865571,-.19650999135957886,-.33673737792715125]},targetLook:{cameraPos:[1.2775067913826557,1.3342390202957728,-3.123518037652026],cameraRot:[-2.6905895797482686,.32826936260575107,2.9867046984290964],controlsTarget:[.27965301570358253,.05724884619536415,-.48670374558576035]}},{modelId:"architecture05",simple:{cameraPos:[2.783916402572475,.8560027544276005,2.7596737879252995],cameraRot:[-.16075199314229247,.8310730403125255,.11920589473418516],controlsTarget:[-.13283501637059433,.4299574965789378,.13221598116033606]},abstract:{cameraPos:[-.5465903796360435,.8773980794748312,.08538618930043433],cameraRot:[-.26855166005001807,-1.1713993239674987,-.24830772343977955],controlsTarget:[-.0544632211815667,.8222830795164463,-.1148867151943581]},targetLook:{cameraPos:[-.08633866196975185,1.065498410363367,3.299441795447572],cameraRot:[-.30705338387773307,-.04232616594886318,-.01341603621000156],controlsTarget:[.057360484222938594,.03995565554668334,.06512362298065871]}},{modelId:"architecture06",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"architecture07",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"atlas",simple:{cameraPos:[-.05747471409961126,1.1561369169508278,.2934743027588207],cameraRot:[.13523803197626882,-.45004875426349367,.05912213357583956],controlsTarget:[-.03052411570622545,1.1636581998026059,.23819862568800665]},abstract:{cameraPos:[-.18886266143333627,1.0759713173211645,.49472614307040697],cameraRot:[.11827338438455919,-.49355424749746096,.056236459481599846],controlsTarget:[-.03586918676122722,1.1095292429426495,.21231853618809665]},targetLook:{cameraPos:[-.6194495673706852,.6791728914823414,1.1145248759455844],cameraRot:[.08253419059538356,-.589296072042965,.045942607479927136],controlsTarget:[.09058057232790886,.7667301800204325,.05607398084523753]}},{modelId:"bust01",simple:{cameraPos:[-.6865425525854476,.9101267370893742,.6192780523604176],cameraRot:[-.39660492694232563,-.962317303218196,-.3309893133032883],controlsTarget:[.008888669206507317,.7229784900626205,.17240700391361993]},abstract:{cameraPos:[.025758408225725123,.9432728632646389,.5085343068565109],cameraRot:[-.016796160047877214,.23199352927595504,.003862085219634371],controlsTarget:[-.04849025797636223,.9379943498483801,.19429480114059927]},targetLook:{cameraPos:[-.0995637601904456,.9905193985583883,1.2844592429465957],cameraRot:[-.21489715158374015,-.011908392248228742,-.002599147273856083],controlsTarget:[-.08519853311178426,.7332893758986562,.10595091334036527]}},{modelId:"bust02",simple:{cameraPos:[-1.4566842350476759,1.3228318382357354,.3529107224325513],cameraRot:[-.7212161402570414,-1.161345100016008,-.6787189972933454],controlsTarget:[.13759890903780844,.8659810364239205,-.1666973840261623]},abstract:{cameraPos:[-.3488062269042841,1.3111379960412364,.033354968216059155],cameraRot:[-.7212161402570405,-1.1613451000160089,-.6787189972933446],controlsTarget:[.2227218365835886,1.147363414950647,-.15291722311874764]}},{modelId:"bust03",simple:{cameraPos:[6.898858137575106,4.772099506970454,-3.1821660872368627],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"discobolus",simple:{cameraPos:[.9321278495515372,1.1243517299151449,1.0437243089401456],cameraRot:[-.05449607597993083,.5948614803388417,.030560026854436973],controlsTarget:[.17851738112859908,1.0636847191934362,-.06840974825492452]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"hand",simple:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"vase01",simple:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}},{modelId:"venus",simple:{cameraPos:[-.33960257176056113,1.3731114512210183,.5843851218219973],cameraRot:[.19165735697369993,-.4160172409280952,.07825504508386714],controlsTarget:[.1766806722765915,1.595709756901872,-.5627979418735829]},abstract:{cameraPos:[-.2543046264127692,1.591894996466899,.21161310295797725],cameraRot:[.17078534527374745,-.637502107706861,.10229142114685894],controlsTarget:[.2766995177502174,1.7137410300309837,-.49488235116491014]}},{modelId:"flower",simple:{cameraPos:[-.33960257176056113,1.3731114512210183,.5843851218219973],cameraRot:[0,0,0],controlsTarget:[0,0,0]},abstract:{cameraPos:[0,0,0],cameraRot:[0,0,0],controlsTarget:[0,0,0]}}]},9354:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroModel=void 0;const n=r(70655),i=r(90278),a=r(64232),s=n.__importDefault(r(86010)),o=n.__importStar(r(67294)),l=r(44169),c=r(24052),u=r(84773),h=r(35901);r(61588);const d=r(40213);function p(e){let{url:t,onMount:r=(()=>{}),...n}=e;const{scene:a}=(0,i.useGLTF)(t,"/scripts/draco-1.4.3/");return(0,o.useEffect)((()=>{r()}),[]),o.default.createElement("primitive",{object:a,...n})}t.HeroModel=e=>{const{modelId:t,preset:r,mode:n="simple",className:i,children:f,asciiConfig:m,rotateSpeed:g=l.ROTATE_SPEED,enableZoom:v,enableRotateOnScroll:A,withParallelEffect:y=!0,startY:x="bottom",...b}=e,w=((e,t,r)=>(0,o.useMemo)((()=>{if(t)return{...l.defaultPresets,[e]:t};if(!r)return l.defaultPresets;return l.OBJECTS_PRESETS.find((e=>e.modelId===r))||l.defaultPresets}),[t,r]))(n,r,t),E=(0,d.useScrollY)();return o.default.createElement("div",{className:(0,s.default)(i,"mdx-hero-model","mdx-hero-model--ascii",`mdx-hero-model--${n}`,`mdx-hero-model--${x}`),...b},o.default.createElement("div",{className:"mdx-hero-model--inner",style:{...y?{transform:"translateY(0px) scale(var(--mdx-hero-model-wrapper-scale))"}:{}}},o.default.createElement(o.Suspense,{fallback:o.default.createElement("span",null," ")},o.default.createElement(a.Canvas,{dpr:[1,2],style:{height:"100vh"},camera:{fov:50,position:w[n].cameraPos,rotation:w[n].cameraRot}},o.default.createElement("directionalLight",{position:[-10,10,0],intensity:1.5}),o.default.createElement("directionalLight",{position:[-10,10,5],intensity:.3}),o.default.createElement("directionalLight",{position:[-10,20,0],intensity:1.5}),o.default.createElement("directionalLight",{position:[100,-10,0],intensity:.25}),o.default.createElement(h.Controls,{rotateSpeed:g,preset:w[n],targetPreset:w.targetLook,enableZoom:"simple"!==n},o.default.createElement(o.Suspense,{fallback:o.default.createElement(p,{url:`/hero/${w.modelId}/lo.glb`})},o.default.createElement(p,{url:`/hero/${w.modelId}/hi.glb`}),o.default.createElement(u.AsciiRenderer,{...m})))))),o.default.createElement("div",{className:"mdx-hero-model--shade",style:{opacity:(0,c.mapFloat)(E,0,(0,c.calcScrollThreshold)()*l.RESIZE_SPEED_FACTOR,0,1)}}))}},28292:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroModel=void 0;const n=r(70655).__importDefault(r(67294)),i=r(24052),a=r(40213),s=r(9354);t.HeroModel=e=>{if("undefined"==typeof window)return null;const t=(0,a.useScrollY)();(0,i.mapFloat)(t,0,(0,i.calcScrollThreshold)(),0,.1*window.innerHeight);return n.default.createElement("div",{style:{position:"absolute",top:0,left:0}},n.default.createElement(s.HeroModel,{...e}))}},59533:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroTitle=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(94787);r(92751);t.HeroTitle=e=>{let{size:t,uppercase:r,className:n,children:l,...c}=e;const u=(0,o.useHero)(),h=t||(u?u.size:"medium"),d=void 0!==r?r:"large"===(null==u?void 0:u.size);return s.default.createElement(i.Typography,{variant:"h1",component:"h1",className:(0,a.default)(n,"mdx-hero-title",`mdx-hero-title--${h}`,d&&"mdx-hero-title--uppercase"),...c},l)}},85486:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(59533),t)},37207:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.HeroVideo=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importStar(r(67294)),s=r(9259),o=r(32833),l=r(67713),c=r(94787);r(53175);t.HeroVideo=e=>{let{placeholderSrc:t,desktop:r,mobile:n,className:u,style:h={},children:d,...p}=e;(0,c.useHero)();const f=(0,l.useHydrated)(),m=(0,a.useRef)(null),[g,v]=(0,a.useState)(!0),A=async()=>{const e=m.current;if(!e)return;e.muted=!0,e.defaultMuted=!0;const[t,r]=await(0,o.settle)((()=>e.play()));r||v(!1)};return(0,a.useEffect)((()=>{const e=document.querySelector(".col > article");if(e)return e.classList.add("overflow-hidden"),()=>{e.classList.remove("overflow-hidden")}}),[]),a.default.createElement(a.default.Fragment,null,a.default.createElement("div",{className:(0,i.default)(u,"mdx-hero-video",g&&"mdx-hero-video--loading"),style:(0,s.makeStyle)({...h},{"hero-video-scale":(null==r?void 0:r.scale)??"1.70951586","hero-video-offset-y":(null==r?void 0:r.offsetY)??"-150px","hero-video-height":(null==r?void 0:r.height)??"100%","hero-video-min-height":(null==r?void 0:r.minHeight)??"min(100vh, var(--hero-max-height))","hero-video-scale-mobile":(null==n?void 0:n.scale)??"1.70951586","hero-video-offset-y-mobile":(null==n?void 0:n.offsetY)??"-50px","hero-video-height-mobile":(null==n?void 0:n.height)??"120%","hero-video-min-height-mobile":(null==n?void 0:n.minHeight)??"100vh"}),...p},a.default.createElement("div",{className:"mdx-hero-video__placeholder"},a.default.createElement("img",{src:t,alt:""})),a.default.createElement("div",{className:"mdx-hero-video__video"},f&&a.default.createElement("video",{ref:m,loop:!0,muted:!0,autoPlay:!0,playsInline:!0,onCanPlay:A,onLoadedMetadata:A},d))))}},98130:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(37207),t)},48949:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.JobsPerDepartment=void 0;const n=r(70655),i=r(31665),a=r(744),s=n.__importDefault(r(67294)),o=r(31174),l=e=>{let{message:t}=e;return s.default.createElement(s.default.Fragment,null,s.default.createElement(i.Typography,{variant:"h1",className:"mdx-jpd__header"},"Current job openings"),!!t&&s.default.createElement(i.Typography,{variant:"body1"},t))};t.JobsPerDepartment=e=>{let{jobData:t,titleFilter:r="",useDummyData:n=!1,...i}=e,c=n?o.jobsPerDepartmentDummyData:null==t?void 0:t.departments;return c&&(u=c)&&u.some((e=>e.jobs&&e.jobs.length>0))?(c&&r&&(c=c.map((e=>({...e,jobs:e.jobs.filter((e=>e.title.includes(r)))})))),s.default.createElement("div",{...i},s.default.createElement(l,null),c.map((e=>s.default.createElement(a.SingleDepartmentJobs,{key:e.name,department:e}))))):s.default.createElement(l,{message:"No job openings to show"});var u}},744:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.SingleDepartmentJobs=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(67294)),s=r(6980);r(37477);const o=n.__importDefault(r(88746));t.SingleDepartmentJobs=e=>{let{department:t}=e;return t.jobs&&0!==t.jobs.length?a.default.createElement("div",{className:"mdx-jpd__single-job-department-container"},a.default.createElement(i.Typography,{variant:"subtitle2",className:"mdx-jpd__department-title"},t.name),a.default.createElement("ul",{className:"mdx-jpd__job-list"},t.jobs.map(((e,t)=>{var r;return a.default.createElement("li",{key:t,className:"mdx-jpd__job-list-item"},a.default.createElement(o.default,{href:e.absolute_url,target:"_blank",className:"mdx-jpd__job-link"},a.default.createElement("div",{className:"mdx-jpd__job-title-container"},a.default.createElement(i.Typography,{variant:"h5",className:"mdx-jpd__job-title"},e.title),a.default.createElement(s.IconExternalLink,{className:"mdx-jpd__external-link-icon"})),!(null==(r=e.location)||!r.name)&&a.default.createElement(i.Typography,{variant:"subtitle2",component:"div"},e.location.name)))})))):null}},39610:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(48949),t)},31174:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.jobsPerDepartmentDummyData=void 0,t.jobsPerDepartmentDummyData=[{id:87842,name:"App",parent_id:43806,child_ids:[87847,87852,87850,87848,45530,87849],jobs:[]},{id:54504,name:"Brand Design Studio",parent_id:null,child_ids:[],jobs:[]},{id:45532,name:"Business Development",parent_id:null,child_ids:[],jobs:[]},{id:87841,name:"Codex",parent_id:43806,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5329400",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2662332,location:{name:"Remote (Worldwide)"},metadata:null,id:5329400,updated_at:"2023-10-13T09:40:03-04:00",requisition_id:"Cod-6",title:"Technical Business Development Lead [Codex]"}]},{id:84549,name:"Communications",parent_id:null,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5276254",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2645076,location:{name:"Remote (Worldwide)"},metadata:null,id:5276254,updated_at:"2023-10-19T03:08:59-04:00",requisition_id:"PROV-Com-16",title:"Motion Designer"}]},{id:45531,name:"Design",parent_id:null,child_ids:[],jobs:[]},{id:87847,name:"Desktop",parent_id:87842,child_ids:[],jobs:[]},{id:87852,name:"Documentation",parent_id:87842,child_ids:[],jobs:[]},{id:45547,name:"Engineering ",parent_id:null,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5419957",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2693761,location:{name:"Remote (Worldwide)"},metadata:null,id:5419957,updated_at:"2023-10-24T07:30:00-04:00",requisition_id:"APP-QA-2",title:"Desktop QA Engineer "},{absolute_url:"https://jobs.status.im/?gh_jid=3694379",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2055210,location:{name:"Remote (Worldwide)"},metadata:null,id:3694379,updated_at:"2023-10-24T16:23:12-04:00",requisition_id:"BACK-1050",title:"Senior C++ Qt/QML developer for blockchain app"},{absolute_url:"https://jobs.status.im/?gh_jid=3702173",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2058858,location:{name:"Remote (Worldwide)"},metadata:null,id:3702173,updated_at:"2023-10-24T16:21:54-04:00",requisition_id:"93",title:"Senior Mobile ClojureScript UI Developer "}]},{id:49925,name:"Finance",parent_id:87845,child_ids:[],jobs:[]},{id:87854,name:"Infrastructure",parent_id:43806,child_ids:[],jobs:[]},{id:87853,name:"Insights",parent_id:87845,child_ids:[],jobs:[]},{id:87850,name:"Keycard",parent_id:87842,child_ids:[],jobs:[]},{id:145838,name:"Leadership",parent_id:null,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5447463",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2704948,location:{name:"Remote (Worldwide)"},metadata:null,id:5447463,updated_at:"2023-10-24T09:07:22-04:00",requisition_id:"LEAD-1",title:"Chief of Staff [whole ecosystem]"}]},{id:74156,name:"Legal",parent_id:87845,child_ids:[],jobs:[]},{id:91698,name:"Logos",parent_id:null,child_ids:[],jobs:[]},{id:43807,name:"Marketing",parent_id:null,child_ids:[],jobs:[]},{id:87848,name:"Mobile",parent_id:87842,child_ids:[],jobs:[]},{id:87843,name:"Nimbus",parent_id:43806,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5370820",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2679527,location:{name:"Remote (Worldwide)"},metadata:null,id:5370820,updated_at:"2023-10-19T03:05:07-04:00",requisition_id:"LIDO-1",title:"Senior DevOps Engineer (Blockchain)"}]},{id:144866,name:"Nomos",parent_id:43806,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5433423",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2315608,location:{name:"Remote (Worldwide)"},metadata:null,id:5433423,updated_at:"2023-10-12T11:20:52-04:00",requisition_id:"PROV-Nom-5",title:"Applied Network Researcher"}]},{id:45548,name:"People Operations",parent_id:87845,child_ids:[],jobs:[]},{id:45530,name:"Product Design",parent_id:87842,child_ids:[],jobs:[]},{id:90941,name:"Program Management",parent_id:null,child_ids:[],jobs:[]},{id:43806,name:"Research & Development",parent_id:null,child_ids:[87842,87841,87854,87843,144866,87846,87981,87847,87852,87850,87848,45530,87849],jobs:[]},{id:87851,name:"Security",parent_id:87845,child_ids:[],jobs:[]},{id:87845,name:"Services",parent_id:null,child_ids:[49925,87853,74156,45548,87851],jobs:[]},{id:91697,name:"Status App ",parent_id:null,child_ids:[],jobs:[]},{id:54783,name:"Technical Writing ",parent_id:null,child_ids:[],jobs:[]},{id:87846,name:"Vac",parent_id:43806,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=4460860",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2331302,location:{name:"Remote (Worldwide)"},metadata:null,id:4460860,updated_at:"2023-10-04T05:13:53-04:00",requisition_id:"PROV-zkV-1",title:"Zero Knowledge Research Engineer "}]},{id:87981,name:"Waku",parent_id:43806,child_ids:[],jobs:[{absolute_url:"https://jobs.status.im/?gh_jid=5456032",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2707470,location:{name:"Remote (Worldwide)"},metadata:null,id:5456032,updated_at:"2023-10-23T11:40:19-04:00",requisition_id:"WAK-GL-1",title:"Growth Lead"},{absolute_url:"https://jobs.status.im/?gh_jid=3693623",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2055187,location:{name:"Remote (Worldwide)"},metadata:null,id:3693623,updated_at:"2023-10-04T05:13:53-04:00",requisition_id:"PROV-Sec-2",title:"Protocol Engineer"},{absolute_url:"https://jobs.status.im/?gh_jid=3157908",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:1830496,location:{name:"Remote, Worldwide"},metadata:null,id:3157908,updated_at:"2023-10-04T05:13:53-04:00",requisition_id:"PROV-Sec-3",title:"Protocol Researcher (Distributed Systems)"},{absolute_url:"https://jobs.status.im/?gh_jid=5175038",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2584916,location:{name:"Remote (Worldwide)"},metadata:null,id:5175038,updated_at:"2023-10-09T05:53:53-04:00",requisition_id:"SDK-2",title:"Software Engineer (Chat SDK)"},{absolute_url:"https://jobs.status.im/?gh_jid=5310503",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2656108,location:{name:"Remote (Worldwide)"},metadata:null,id:5310503,updated_at:"2023-10-19T03:12:53-04:00",requisition_id:"PROV-Wak-13",title:"Software Engineer Distributed Systems Testing"},{absolute_url:"https://jobs.status.im/?gh_jid=5423094",data_compliance:[{type:"gdpr",requires_consent:!1,requires_processing_consent:!1,requires_retention_consent:!1,retention_period:null}],internal_job_id:2694724,location:{name:"Remote (Worldwide)"},metadata:null,id:5423094,updated_at:"2023-10-24T12:39:05-04:00",requisition_id:"WAK-BD-1",title:"Technical Business Development Lead "}]},{id:87849,name:"Web",parent_id:87842,child_ids:[],jobs:[]},{id:0,name:"No Department",parent_id:null,child_ids:[],jobs:[]}]},69935:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.LogoCarousel=void 0;const n=r(70655),i=n.__importDefault(r(47002)),a=n.__importDefault(r(86010)),s=n.__importStar(r(67294));r(11620);const o=r(65701),l=r(3796),c=r(67713);t.LogoCarousel=e=>{let{title:t,className:r,items:n=[],children:u,...h}=e;const d=(0,c.useHydrated)(),p=(0,s.useRef)(null),f=(0,s.useRef)(null);var m;"undefined"!=typeof window&&d&&!f.current&&(f.current=(null==(m=p.current)?void 0:m.querySelector(".mdx-grid__content"))??null,console.log(f.current,p.current));return s.default.createElement("div",{ref:p,className:(0,a.default)(r,"mdx-logo-carousel"),...h},s.default.createElement(o.SectionHeader,{title:t},s.default.createElement(l.ScrollButtons,{containerRef:f})),s.default.createElement(o.Grid,{className:"mdx-logo-carousel__inner",xs:{wrap:!1,gap:"96px",scrollButtons:!1}},n.map((e=>s.default.createElement(o.Grid.Item,{className:"mdx-logo-carousel__item"},s.default.createElement(i.default,{className:"mdx-logo-carousel__logo",title:e.title,sources:{dark:e.logoSrcDark??e.logoSrc??"",light:e.logoSrc??e.logoSrcDark??""}}))))))}},80756:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(69935),t)},45706:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.NewsletterSubscription=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(65701),l=r(3538),c=r(38840);r(58287);t.NewsletterSubscription=e=>{var t,r;let{title:n,description:u,mailingListId:h,...d}=e;const p=(null==(t=(0,c.useThemeOptions)())||null==(r=t.newsletterSubscription)?void 0:r.mailingListId)??0,f=h??p,m=(0,l.useNewsletterApi)(),g=!m.message||m.error,v=n??"Newsletter",A=u??s.default.createElement(s.default.Fragment,null,"Subscribe",s.default.createElement("br",null),"to our newsletter");return s.default.createElement(o.CallToActionSection,{className:"mdx-ns",title:s.default.createElement("span",{className:"mdx-ns__title"},v),description:s.default.createElement("div",{className:"mdx-ns__inner"},s.default.createElement("span",{className:"mdx-ns__description"},A),s.default.createElement("form",{onSubmit:e=>{e.preventDefault();const t=e.target,r=t.elements.namedItem("name").value,n=t.elements.namedItem("email").value;m.subscribe(f,n,r)}},m.message&&s.default.createElement(i.Toast,{title:m.message,className:"mdx-ns__toast",icon:m.error?i.ErrorIcon:i.CheckIcon}),s.default.createElement("div",{className:(0,a.default)("mdx-ns__inputs",!g&&"hidden")},s.default.createElement(i.TextField,{inputProps:{type:"text",name:"name"},variant:"underlined",placeholder:"First name or pseudonym"}),s.default.createElement(i.TextField,{inputProps:{type:"email",name:"email",required:!0},variant:"underlined",placeholder:"Email address (required)"})),s.default.createElement(i.Button,{size:"large",color:"primary",variant:"outlined",className:(0,a.default)("mdx-ns__submit-button",!g&&"hidden"),disabled:m.busy},"Subscribe"))),columns:2})}},1765:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(45706),t)},9737:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.PoweredBy=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(65701);r(63428);t.PoweredBy=e=>{let{items:t=[],className:r,...n}=e;return a.default.createElement(s.Grid,{className:(0,i.default)(r,"mdx-powered-by"),xs:{cols:1,wrap:!0,gap:"0 1rem"},lg:{cols:2},...n},t.map(((e,t)=>a.default.createElement(s.Grid.Item,{key:t,xs:1},a.default.createElement(s.AppCard,{...e})))))}},73465:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(9737),t)},44564:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ProfileCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(88746)),s=n.__importDefault(r(67294)),o=r(6980);r(36489);const l=n.__importDefault(r(86010));t.ProfileCard=e=>{let{imgSrc:t,name:r,githubUsername:n,githubLink:c,discordUsername:u,discordLink:h,...d}=e;return s.default.createElement("div",{...d,className:(0,l.default)("mdx-profile-card",d.className)},s.default.createElement("div",{className:"mdx-profile-card__profile"},void 0===t?s.default.createElement(o.IconAvatar,{className:"mdx-profile-card__avatar"}):s.default.createElement("img",{alt:"string"==typeof r?r:"",className:"mdx-profile-card__avatar",src:t}),s.default.createElement(i.Typography,{className:"mdx-profile-card__name",variant:"h3",component:"h4"},r)),s.default.createElement("div",{className:"mdx-profile-card__buttons"},n&&c&&s.default.createElement(a.default,{href:c,target:"_blank",className:"mdx-profile-card__link"},s.default.createElement(o.IconGithub,null)),u&&h&&s.default.createElement(a.default,{href:h,target:"_blank",className:"mdx-profile-card__link"},s.default.createElement(o.IconDiscordWhite,null))))}},46877:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(44564),t)},47562:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Roadmap=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(65701),o=r(74219);r(74166);t.Roadmap=e=>{let{title:t,description:r,timeline:n=[],className:l,children:c,...u}=e;const h=(new Date).getFullYear(),d=(e,t,r)=>{if(t===r.length-1)return"none";return e.period[0]===r[t+1].period[0]?"solid":"dashed"},p=e=>{const t=(new Date).getFullYear(),r=(()=>{const e=(new Date).getMonth();return"Q"+Math.ceil((e+1)/3)})();let n,i;return Array.isArray(e)?[n,i]=e:n=e,nt?"transparent":i&&"+"!==i?i<=r?"filled":"transparent":"filled"};return a.default.createElement("div",{className:(0,i.default)(l,"mdx-roadmap"),...u},a.default.createElement(s.SectionHeader,{className:"mdx-roadmap__header",title:t,description:r},c),n.length>0&&a.default.createElement(s.Grid,{className:"mdx-roadmap__timeline",xs:{cols:6,wrap:!1,gap:"0 1rem",scrollButtons:!0},spacingButtons:!0,leftLabel:"Past",rightLabel:"Future"},n.map(((e,t)=>a.default.createElement(s.Grid.Item,{key:t,xs:1},a.default.createElement(o.TimelineItem,{...e,index:t,period:e.period??h,description:e.description,borderStyle:d(e,t,n),periodStyle:p((null==e?void 0:e.period)??h),className:(0,i.default)("mdx-roadmap__timeline-item",e.className)}))))))}},77383:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(47562),t)},35481:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ScrollButtons=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294));r(79832);t.ScrollButtons=e=>{let{leftLabel:t,rightLabel:r,containerRef:n,containerId:o,spacing:l="grouped",...c}=e;const u=e=>{var t,r,i;const a=n?n.current:document.querySelector(`#${o}`);if(!a)return;const s=(null==(t=a.children[0])||null==t.getBoundingClientRect||null==(r=t.getBoundingClientRect())?void 0:r.width)??236;a.scrollTo({behavior:"smooth",left:a.scrollLeft+((null==(i=a.getBoundingClientRect())?void 0:i.width)-s)*e})};return s.default.createElement("div",{...c,className:(0,a.default)(c.className,"mdx-scroll-buttons","spaced"===l&&"mdx-scroll-buttons--spaced")},s.default.createElement(i.IconButtonGroup,{size:"small",color:"primary"},s.default.createElement(i.IconButton,{className:(0,a.default)("mdx-scroll-buttons__button",t&&t.length>0&&"mdx-scroll-buttons__button--with-label"),size:"small",onClick:u.bind(null,-1)},s.default.createElement(i.ChevronLeftIcon,null),t&&t.length>0&&s.default.createElement("span",{className:"mdx-scroll-buttons__label"},t)),s.default.createElement(i.IconButton,{className:(0,a.default)("mdx-scroll-buttons__button",r&&r.length&&"mdx-scroll-buttons__button--with-label"),size:"small",onClick:u.bind(null,1)},r&&r.length>0&&s.default.createElement("span",{className:"mdx-scroll-buttons__label"},r),s.default.createElement(i.ChevronRightIcon,null))))}},3796:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(35481),t)},66872:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ScrollToBottom=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importStar(r(67294)),o=r(89164),l=r(9259),c=r(52606),u=r(40213),h=n.__importDefault(r(49933));t.ScrollToBottom=e=>{const{children:t,className:r,style:n,...d}=e,p=(0,u.useScrollY)(),f=(0,c.useIsMobile)(),m=(0,o.useWindowSize)(),g=(0,s.useMemo)((()=>{var e,t;return((null==(e=document.querySelector(".mdx-hero"))||null==(t=e.getBoundingClientRect())?void 0:t.bottom)??0)+window.scrollY}),[m.height]);return s.default.createElement(i.IconButton,{onClick:()=>{var e;const t=document.querySelector(".main-wrapper article"),r=null==t||null==(e=t.children)?void 0:e[1];if(r)return window.scrollTo({left:0,top:r.getBoundingClientRect().top-(f?200:0),behavior:"smooth"});window.scrollTo({top:document.body.scrollHeight,behavior:"smooth"})},size:f?"small":"large",className:(0,a.default)(h.default.scrollToBottom,r,p>20&&h.default.hide),style:(0,l.makeStyle)({...n??{}},{vh:m.height/100+"px",maxTop:g+"px"}),...d},s.default.createElement(i.ChevronDownIcon,{color:"primary"}))}},19538:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(66872),t)},18291:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.SectionHeader=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294)),o=r(65701);r(92476);t.SectionHeader=e=>{let{title:t,description:r,className:n,children:l,...c}=e;const u=!!r;return s.default.createElement(o.Box,{className:(0,a.default)(n,"mdx-section-header",u&&"mdx-section-header--with-description"),...c},s.default.createElement(i.Typography,{className:"mdx-section-header__title",component:"h2",variant:"h5"},t,!u&&l&&s.default.createElement("div",{className:"mdx-section-header__extra"},l)),r&&s.default.createElement(i.Typography,{className:"mdx-section-header__description",component:"p",variant:"h3"},r,s.default.createElement("div",{className:"mdx-section-header__extra"},l)))}},83729:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(18291),t)},36835:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.Showcase=void 0;const n=r(70655),i=n.__importDefault(r(86010)),a=n.__importDefault(r(67294)),s=r(65701);r(13344);t.Showcase=e=>{let{items:t=[],className:r,...n}=e;return a.default.createElement(s.Grid,{className:(0,i.default)(r,"mdx-showcase"),xs:{cols:2,wrap:!0,gap:"1.5rem 1em"},lg:{cols:3,gap:"1rem"},...n},t.map(((e,t)=>a.default.createElement(s.Grid.Item,{key:t,className:"mdx-showcase__item",xs:1},a.default.createElement(s.ShowcaseCard,{...e})))))}},78090:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(36835),t)},40245:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.ShowcaseCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(47002)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294));r(29188);t.ShowcaseCard=e=>{let{index:t,name:r,logoSrc:n,logoSrcDark:l,description:c,className:u,size:h="large",borderStyle:d="solid",children:p,...f}=e;return o.default.createElement("div",{className:(0,s.default)(u,"mdx-showcase-card",`mdx-showcase-card--${h}`,`mdx-showcase-card--border-${d}`),...f},o.default.createElement("div",{className:"mdx-showcase-card__inner"},t&&o.default.createElement(i.Typography,{className:"mdx-showcase-card__index",variant:"subtitle2",component:"div"},t),(n||l)&&o.default.createElement(a.default,{sources:{dark:l??n??"",light:n??l??""},alt:"string"==typeof r?r:"",className:"mdx-showcase-card__logo"}),r&&o.default.createElement(i.Typography,{variant:"h2",component:"h2",className:"mdx-showcase-card__name"},r),c&&o.default.createElement(i.Typography,{variant:"body1",component:"p",className:"mdx-showcase-card__description"},c)))}},65162:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(40245),t)},82732:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.SocialCard=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(47002)),s=n.__importDefault(r(86010)),o=n.__importDefault(r(67294)),l=r(6980);r(63933);t.SocialCard=e=>{let{title:t,logoSrc:r,logoSrcDark:n,description:c,...u}=e;return o.default.createElement("a",{target:"_blank",...u,className:(0,s.default)(u.className,"mdx-social-card")},o.default.createElement("div",{className:"mdx-social-card__row"},(r||n)&&o.default.createElement(a.default,{sources:{dark:n??r??"",light:r??n??""},alt:t??"social card logo",className:"mdx-social-card__logo"}),o.default.createElement(l.IconExternalLink,{className:"mdx-social-card__external-link"})),o.default.createElement(i.Typography,{variant:"body1",component:"span",className:"mdx-social-card__description"},c))}},92498:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(82732),t)},89885:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.TimelineItem=void 0;const n=r(70655),i=r(31665),a=n.__importDefault(r(86010)),s=n.__importDefault(r(67294));r(86782);t.TimelineItem=e=>{let{index:t,period:r,description:n,borderStyle:o,periodStyle:l,className:c,children:u,...h}=e;return s.default.createElement("div",{className:(0,a.default)(c,"mdx-timeline-item","dashed"===o&&"mdx-timeline-item--border-dashed"),...h},s.default.createElement("div",{className:"mdx-timeline-item__header"},s.default.createElement("div",{className:"mdx-timeline-item__period-container"},"none"!==o&&s.default.createElement("div",{className:(0,a.default)("mdx-timeline-item__border")}),s.default.createElement(i.Typography,{variant:"subtitle2",component:"span",className:(0,a.default)("mdx-timeline-item__period",`mdx-timeline-item__period--${l}`)},(e=>{if(Array.isArray(e)&&e.length>0){let t=e[0].toString();return e.length>1&&("+"===e[1]?t+="+":t+=" "+e[1]),t}return""})(r))),s.default.createElement(i.Typography,{variant:"h2",component:"span",className:"mdx-timeline-item__index"},"number"==typeof t?`${t<9?"0":""}${t+1}`:t)),s.default.createElement(i.Typography,{variant:"h5",component:"p",className:"mdx-timeline-item__description"},n))}},74219:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});r(70655).__exportStar(r(89885),t)},65701:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0});const n=r(70655);n.__exportStar(r(66169),t),n.__exportStar(r(84614),t),n.__exportStar(r(64314),t),n.__exportStar(r(32765),t),n.__exportStar(r(54442),t),n.__exportStar(r(78256),t),n.__exportStar(r(6796),t),n.__exportStar(r(24084),t),n.__exportStar(r(34155),t),n.__exportStar(r(47513),t),n.__exportStar(r(78378),t),n.__exportStar(r(72295),t),n.__exportStar(r(48126),t),n.__exportStar(r(16693),t),n.__exportStar(r(22447),t),n.__exportStar(r(67812),t),n.__exportStar(r(28292),t),n.__exportStar(r(85486),t),n.__exportStar(r(98130),t),n.__exportStar(r(39610),t),n.__exportStar(r(80756),t),n.__exportStar(r(1765),t),n.__exportStar(r(73465),t),n.__exportStar(r(46877),t),n.__exportStar(r(77383),t),n.__exportStar(r(19538),t),n.__exportStar(r(83729),t),n.__exportStar(r(78090),t),n.__exportStar(r(65162),t),n.__exportStar(r(92498),t),n.__exportStar(r(74219),t)},59567:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.lsdUtils=t.LsdUtils=void 0;const n=r(31665),i=r(70917);class a{constructor(){var e=this;this._breakpoints={},this.getBreakpoints=e=>{if(this._breakpoints[e.name])return this._breakpoints[e.name];const t={};for(let r=0;rthis.getBreakpoints(e)[t],this.breakpoints=function(e){return void 0===e&&(e=[]),n.THEME_BREAKPOINTS.filter((t=>!e.find((e=>e===t))))},this.typography=function(e,t){return void 0===t&&(t=!1),"subtitle3"===e?"\n font-size: 12px !important;\n font-weight: 400 !important;\n line-height: 16px !important;\n ":`\n font-size: var(--lsd-${e}-fontSize)${t?"!important":""};\n font-weight: var(--lsd-${e}-fontWeight)${t?"!important":""};\n line-height: var(--lsd-${e}-lineHeight)${t?"!important":""};\n `},this.breakpoint=function(t,r,n,i){void 0===n&&(n="up");const{min:a,max:s}=e.getBreakpoint(t,r);let o="@media ";if("up"===n)o+=`(min-width: ${a}px)`;else if("down"===n)o+=`(max-width: ${s}px)`;else if("between"===n&&i){o+=`(min-width: ${a}px) and (max-width: ${e.getBreakpoint(t,i).min-1}px)`}else o+=`(min-width: ${a}px) and (max-width: ${s}px)`;return`${o}`},this.responsive=function(e,r,n){void 0===n&&(n="up");const a=t.lsdUtils.breakpoint(e,r,n);return e=>i.css` + ${a} { + ${e} + } + `}}}t.LsdUtils=a,t.lsdUtils=new a},9259:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.makeStyle=void 0;t.makeStyle=function(e,t){return void 0===t&&(t={}),{...e,...Object.entries(t).reduce(((e,t)=>{let[r,n]=t;return{...e,[`--${r}`]:n}}),{})}}},24052:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.calcHeroInfoMb=t.isMobile=t.generateTextShadow=t.isTouchDevice=t.calcScrollThreshold=t.mapFloat=t.random=void 0;t.random=(e,t)=>Math.random()*(t-e)+e;t.mapFloat=(e,t,r,n,i)=>(e-t)*(i-n)/(r-t)+n;t.calcScrollThreshold=()=>.4*window.innerHeight;t.isTouchDevice=()=>"undefined"!=typeof window&&("ontouchstart"in window||navigator.maxTouchPoints>0||navigator.msMaxTouchPoints&&navigator.msMaxTouchPoints>0);t.generateTextShadow=e=>{let t="";for(let r=0;r"undefined"!=typeof window&&window.innerWidth<997;t.calcHeroInfoMb=e=>(0,t.mapFloat)(scrollY,0,(0,t.calcScrollThreshold)(),0,100)},3538:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.useNewsletterApi=void 0;const n=r(67294);t.useNewsletterApi=()=>{const[e,t]=(0,n.useState)(!1),[r,i]=(0,n.useState)({error:!1,message:""});return{busy:e,error:r.error,message:r.message,subscribe:async(e,r,n)=>{t(!0);try{const t=await fetch("https://odoo.logos.co/website_mass_mailing/subscribe2",{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({jsonrpc:"2.0",method:"call",params:{name:n,value:r,list_id:e,subscription_type:"email"}})}),a=await t.json();i({error:!1,message:a.result.message})}catch(a){i({error:!0,message:"Something went wrong!"})}t(!1)}}}},40213:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.useScrollY=void 0;const n=r(21457),i=r(67294);t.useScrollY=()=>{const[e,t]=(0,i.useState)(0);return(0,n.useWindowEventListener)("scroll",(()=>{t(window.scrollY)})),e}},77366:(e,t,r)=>{"use strict";function n(){return n=Object.assign?Object.assign.bind():function(e){for(var t=1;tv});var i=r(67294),a=r(45042),s=/^((children|dangerouslySetInnerHTML|key|ref|autoFocus|defaultValue|defaultChecked|innerHTML|suppressContentEditableWarning|suppressHydrationWarning|valueLink|abbr|accept|acceptCharset|accessKey|action|allow|allowUserMedia|allowPaymentRequest|allowFullScreen|allowTransparency|alt|async|autoComplete|autoPlay|capture|cellPadding|cellSpacing|challenge|charSet|checked|cite|classID|className|cols|colSpan|content|contentEditable|contextMenu|controls|controlsList|coords|crossOrigin|data|dateTime|decoding|default|defer|dir|disabled|disablePictureInPicture|download|draggable|encType|enterKeyHint|form|formAction|formEncType|formMethod|formNoValidate|formTarget|frameBorder|headers|height|hidden|high|href|hrefLang|htmlFor|httpEquiv|id|inputMode|integrity|is|keyParams|keyType|kind|label|lang|list|loading|loop|low|marginHeight|marginWidth|max|maxLength|media|mediaGroup|method|min|minLength|multiple|muted|name|nonce|noValidate|open|optimum|pattern|placeholder|playsInline|poster|preload|profile|radioGroup|readOnly|referrerPolicy|rel|required|reversed|role|rows|rowSpan|sandbox|scope|scoped|scrolling|seamless|selected|shape|size|sizes|slot|span|spellCheck|src|srcDoc|srcLang|srcSet|start|step|style|summary|tabIndex|target|title|translate|type|useMap|value|width|wmode|wrap|about|datatype|inlist|prefix|property|resource|typeof|vocab|autoCapitalize|autoCorrect|autoSave|color|incremental|fallback|inert|itemProp|itemScope|itemType|itemID|itemRef|on|option|results|security|unselectable|accentHeight|accumulate|additive|alignmentBaseline|allowReorder|alphabetic|amplitude|arabicForm|ascent|attributeName|attributeType|autoReverse|azimuth|baseFrequency|baselineShift|baseProfile|bbox|begin|bias|by|calcMode|capHeight|clip|clipPathUnits|clipPath|clipRule|colorInterpolation|colorInterpolationFilters|colorProfile|colorRendering|contentScriptType|contentStyleType|cursor|cx|cy|d|decelerate|descent|diffuseConstant|direction|display|divisor|dominantBaseline|dur|dx|dy|edgeMode|elevation|enableBackground|end|exponent|externalResourcesRequired|fill|fillOpacity|fillRule|filter|filterRes|filterUnits|floodColor|floodOpacity|focusable|fontFamily|fontSize|fontSizeAdjust|fontStretch|fontStyle|fontVariant|fontWeight|format|from|fr|fx|fy|g1|g2|glyphName|glyphOrientationHorizontal|glyphOrientationVertical|glyphRef|gradientTransform|gradientUnits|hanging|horizAdvX|horizOriginX|ideographic|imageRendering|in|in2|intercept|k|k1|k2|k3|k4|kernelMatrix|kernelUnitLength|kerning|keyPoints|keySplines|keyTimes|lengthAdjust|letterSpacing|lightingColor|limitingConeAngle|local|markerEnd|markerMid|markerStart|markerHeight|markerUnits|markerWidth|mask|maskContentUnits|maskUnits|mathematical|mode|numOctaves|offset|opacity|operator|order|orient|orientation|origin|overflow|overlinePosition|overlineThickness|panose1|paintOrder|pathLength|patternContentUnits|patternTransform|patternUnits|pointerEvents|points|pointsAtX|pointsAtY|pointsAtZ|preserveAlpha|preserveAspectRatio|primitiveUnits|r|radius|refX|refY|renderingIntent|repeatCount|repeatDur|requiredExtensions|requiredFeatures|restart|result|rotate|rx|ry|scale|seed|shapeRendering|slope|spacing|specularConstant|specularExponent|speed|spreadMethod|startOffset|stdDeviation|stemh|stemv|stitchTiles|stopColor|stopOpacity|strikethroughPosition|strikethroughThickness|string|stroke|strokeDasharray|strokeDashoffset|strokeLinecap|strokeLinejoin|strokeMiterlimit|strokeOpacity|strokeWidth|surfaceScale|systemLanguage|tableValues|targetX|targetY|textAnchor|textDecoration|textRendering|textLength|to|transform|u1|u2|underlinePosition|underlineThickness|unicode|unicodeBidi|unicodeRange|unitsPerEm|vAlphabetic|vHanging|vIdeographic|vMathematical|values|vectorEffect|version|vertAdvY|vertOriginX|vertOriginY|viewBox|viewTarget|visibility|widths|wordSpacing|writingMode|x|xHeight|x1|x2|xChannelSelector|xlinkActuate|xlinkArcrole|xlinkHref|xlinkRole|xlinkShow|xlinkTitle|xlinkType|xmlBase|xmlns|xmlnsXlink|xmlLang|xmlSpace|y|y1|y2|yChannelSelector|z|zoomAndPan|for|class|autofocus)|(([Dd][Aa][Tt][Aa]|[Aa][Rr][Ii][Aa]|x)-.*))$/,o=(0,a.Z)((function(e){return s.test(e)||111===e.charCodeAt(0)&&110===e.charCodeAt(1)&&e.charCodeAt(2)<91})),l=r(80123),c=r(70444),u=r(48137),h=r(27278),d=o,p=function(e){return"theme"!==e},f=function(e){return"string"==typeof e&&e.charCodeAt(0)>96?d:p},m=function(e,t,r){var n;if(t){var i=t.shouldForwardProp;n=e.__emotion_forwardProp&&i?function(t){return e.__emotion_forwardProp(t)&&i(t)}:i}return"function"!=typeof n&&r&&(n=e.__emotion_forwardProp),n},g=function(e){var t=e.cache,r=e.serialized,n=e.isStringTag;return(0,c.hC)(t,r,n),(0,h.L)((function(){return(0,c.My)(t,r,n)})),null},v=function e(t,r){var a,s,o=t.__emotion_real===t,h=o&&t.__emotion_base||t;void 0!==r&&(a=r.label,s=r.target);var d=m(t,r,o),p=d||f(h),v=!p("as");return function(){var A=arguments,y=o&&void 0!==t.__emotion_styles?t.__emotion_styles.slice(0):[];if(void 0!==a&&y.push("label:"+a+";"),null==A[0]||void 0===A[0].raw)y.push.apply(y,A);else{0,y.push(A[0][0]);for(var x=A.length,b=1;b{"use strict";r.d(t,{Zo:()=>u,kt:()=>p});var n=r(67294);function i(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t=0||(i[r]=e[r]);return i}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(i[r]=e[r])}return i}var l=n.createContext({}),c=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):s(s({},t),e)),r},u=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},h={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,i=e.mdxType,a=e.originalType,l=e.parentName,u=o(e,["components","mdxType","originalType","parentName"]),d=c(r),p=i,f=d["".concat(l,".").concat(p)]||d[p]||h[p]||a;return r?n.createElement(f,s(s({ref:t},u),{},{components:r})):n.createElement(f,s({ref:t},u))}));function p(e,t){var r=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var a=r.length,s=new Array(a);s[0]=d;var o={};for(var l in t)hasOwnProperty.call(t,l)&&(o[l]=t[l]);o.originalType=e,o.mdxType="string"==typeof e?e:i,s[1]=o;for(var c=2;c{"use strict";function n(){return n=Object.assign?Object.assign.bind():function(e){for(var t=1;tmh,AdaptiveEvents:()=>gh,ArcballControls:()=>vs,Backdrop:()=>cu,BakeShadows:()=>ch,Billboard:()=>yi,Bounds:()=>jc,Box:()=>bc,CameraShake:()=>Yc,Center:()=>Vc,Circle:()=>wc,Clone:()=>ns,Cloud:()=>wu,ComputedAttribute:()=>$a,Cone:()=>Ec,ContactShadows:()=>au,CubeCamera:()=>ss,CubicBezierLine:()=>Mi,CurveModifier:()=>oc,CycleRaycast:()=>A,Cylinder:()=>_c,Detailed:()=>oh,DeviceOrientationControls:()=>ls,Dodecahedron:()=>Dc,Edges:()=>Va,Effects:()=>Da,Environment:()=>$c,EnvironmentCube:()=>ru,EnvironmentMap:()=>eu,EnvironmentPortal:()=>tu,Extrude:()=>Uc,FirstPersonControls:()=>Ss,Float:()=>qc,FlyControls:()=>us,GizmoHelper:()=>Us,GizmoViewcube:()=>Ks,GizmoViewport:()=>$s,GradientTexture:()=>Ua,Html:()=>v,Icosahedron:()=>Fc,Image:()=>za,Instance:()=>Zu,Instances:()=>$u,IsObject:()=>ka,Lathe:()=>Nc,Lightformer:()=>mu,Line:()=>Ei,Loader:()=>_,MapControls:()=>ds,Merged:()=>eh,MeshDistortMaterial:()=>cc,MeshReflectorMaterial:()=>mc,MeshWobbleMaterial:()=>hc,Octahedron:()=>Lc,OrbitControls:()=>ps,OrthographicCamera:()=>is,PerspectiveCamera:()=>as,Plane:()=>Mc,Point:()=>Uu,PointMaterial:()=>vc,PointMaterialImpl:()=>gc,PointerLockControls:()=>Es,Points:()=>ku,PointsBuffer:()=>Nu,Polyhedron:()=>Pc,PositionalAudio:()=>Ti,Preload:()=>lh,PresentationControls:()=>pi,QuadraticBezierLine:()=>Si,Reflector:()=>hu,Ring:()=>Rc,RoundedBox:()=>Oc,Sampler:()=>Za,ScreenQuad:()=>zc,Scroll:()=>P,ScrollControls:()=>I,Segment:()=>sh,Segments:()=>ih,Select:()=>vi,Shadow:()=>uu,Sky:()=>Au,Sphere:()=>Sc,SpotLight:()=>fu,Stage:()=>ou,Stars:()=>bu,Stats:()=>go,Tetrahedron:()=>Bc,Text:()=>Ia,Torus:()=>Cc,TorusKnot:()=>Ic,TrackballControls:()=>ms,Trail:()=>Ka,TransformControls:()=>bs,Tube:()=>Tc,calcPosFromAngles:()=>vu,isWebGL2Available:()=>La,meshBounds:()=>fh,shaderMaterial:()=>Na,softShadows:()=>yc,useAnimations:()=>nc,useAspect:()=>yo,useBVH:()=>tc,useBounds:()=>Wc,useBoxProjectedEnv:()=>ac,useCamera:()=>Ms,useContextBridge:()=>rc,useCubeTexture:()=>eo,useCursor:()=>y,useDepthBuffer:()=>Ao,useDetectGPU:()=>Ro,useFBO:()=>vo,useFBX:()=>ro,useGLTF:()=>lo,useGizmoContext:()=>Cs,useHelper:()=>Po,useIntersect:()=>ic,useKTX2:()=>ho,useMatcapTexture:()=>Eu,useNormalTexture:()=>_u,useProgress:()=>w,useScroll:()=>C,useSelect:()=>Ai,useTexture:()=>Oa,useTrail:()=>qa});var i=r(67294),a=r(73935),s=r(99477),o=r(64232);const l=new s.Vector3,c=new s.Vector3,u=new s.Vector3;function h(e,t,r){const n=l.setFromMatrixPosition(e.matrixWorld);n.project(t);const i=r.width/2,a=r.height/2;return[n.x*i+i,-n.y*a+a]}const d=e=>Math.abs(e)<1e-10?0:e;function p(e,t,r=""){let n="matrix3d(";for(let i=0;16!==i;i++)n+=d(t[i]*e.elements[i])+(15!==i?",":")");return r+n}const f=(m=[1,-1,1,1,1,-1,1,1,1,-1,1,1,1,-1,1,1],e=>p(e,m));var m;const g=(e,t)=>p(e,(e=>[1/e,1/e,1/e,1,-1/e,-1/e,-1/e,-1,1/e,1/e,1/e,1,1,1,1,1])(t),"translate(-50%,-50%)"),v=i.forwardRef((({children:e,eps:t=.001,style:r,className:p,prepend:m,center:v,fullscreen:A,portal:y,distanceFactor:x,sprite:b=!1,transform:w=!1,occlude:E,onOcclude:_,zIndexRange:S=[16777271,0],calculatePosition:M=h,as:T="div",wrapperClass:C,pointerEvents:I="auto",...B},R)=>{var P;const F=(0,o.useThree)((({gl:e})=>e)),L=(0,o.useThree)((({camera:e})=>e)),D=(0,o.useThree)((({scene:e})=>e)),U=(0,o.useThree)((({size:e})=>e)),N=(0,o.useThree)((({raycaster:e})=>e)),[k]=i.useState((()=>document.createElement(T))),O=i.useRef(null),G=i.useRef(0),z=i.useRef([0,0]),V=i.useRef(null),H=i.useRef(null),Q=null!==(P=null==y?void 0:y.current)&&void 0!==P?P:F.domElement.parentNode;i.useEffect((()=>{if(O.current){if(D.updateMatrixWorld(),w)k.style.cssText="position:absolute;top:0;left:0;pointer-events:none;overflow:hidden;";else{const e=M(O.current,L,U);k.style.cssText=`position:absolute;top:0;left:0;transform:translate3d(${e[0]}px,${e[1]}px,0);transform-origin:0 0;`}return Q&&(m?Q.prepend(k):Q.appendChild(k)),()=>{Q&&Q.removeChild(k),a.unmountComponentAtNode(k)}}}),[Q,w]),i.useLayoutEffect((()=>{C&&(k.className=C)}),[C]);const j=i.useMemo((()=>w?{position:"absolute",top:0,left:0,width:U.width,height:U.height,transformStyle:"preserve-3d",pointerEvents:"none"}:{position:"absolute",transform:v?"translate3d(-50%,-50%,0)":"none",...A&&{top:-U.height/2,left:-U.width/2,width:U.width,height:U.height},...r}),[r,v,A,U,w]),W=i.useMemo((()=>({position:"absolute",pointerEvents:I})),[I]);i.useLayoutEffect((()=>{w?a.render(i.createElement("div",{ref:V,style:j},i.createElement("div",{ref:H,style:W},i.createElement("div",{ref:R,className:p,style:r,children:e}))),k):a.render(i.createElement("div",{ref:R,style:j,className:p,children:e}),k)}));const X=i.useRef(!0);return(0,o.useFrame)((()=>{if(O.current){L.updateMatrixWorld(),O.current.updateWorldMatrix(!0,!1);const e=w?z.current:M(O.current,L,U);if(w||Math.abs(G.current-L.zoom)>t||Math.abs(z.current[0]-e[0])>t||Math.abs(z.current[1]-e[1])>t){const t=function(e,t){const r=l.setFromMatrixPosition(e.matrixWorld),n=c.setFromMatrixPosition(t.matrixWorld),i=r.sub(n),a=t.getWorldDirection(u);return i.angleTo(a)>Math.PI/2}(O.current,L);let r=!1;"boolean"==typeof E?!0===E&&(r=[D]):Array.isArray(E)&&(r=E.map((e=>e.current)));const n=X.current;if(r){const e=function(e,t,r,n){const i=l.setFromMatrixPosition(e.matrixWorld),a=i.clone();a.project(t),r.setFromCamera(a,t);const s=r.intersectObjects(n,!0);if(s.length){const e=s[0].distance;return i.distanceTo(r.ray.origin)e.raycaster)),c=(0,o.useThree)((e=>e.get)),u=(0,o.useThree)((e=>e.gl));return i.useEffect((()=>{var i;let o,h=[];const d=l.filter,p=null!==(i=null==t?void 0:t.current)&&void 0!==i?i:u.domElement.parentNode,f=()=>p&&e&&e(h,Math.round(s.current)%h.length);l.filter=(e,t)=>{let r=[...e];r.length===h.length&&h.every((e=>r.map((e=>e.object.uuid)).includes(e.object.uuid)))||(s.current=0,h=r,f()),d&&(r=d(r,t));for(let n=0;n{var t,r;s.current=e(s.current),null==(t=c().events.handlers)||t.onPointerCancel(void 0),null==(r=c().events.handlers)||r.onPointerMove(o),f()},g=e=>{(e.keyCode||e.which===a)&&(r&&e.preventDefault(),h.length>1&&m((e=>e+1)))},v=e=>{r&&e.preventDefault();let t=0;e||(e=window.event),e.wheelDelta?t=e.wheelDelta/120:e.detail&&(t=-e.detail/3),h.length>1&&m((e=>Math.abs(e-t)))},A=e=>o=e;return document.addEventListener("pointermove",A,{passive:!0}),n&&document.addEventListener("wheel",v),void 0!==a&&document.addEventListener("keydown",g),()=>{l.filter=d,void 0!==a&&document.removeEventListener("keydown",g),n&&document.removeEventListener("wheel",v),document.removeEventListener("pointermove",A)}}),[u,c,l,r,n,a]),null}function y(e,t="pointer",r="auto"){i.useEffect((()=>{if(e)return document.body.style.cursor=t,()=>{document.body.style.cursor=r}}),[e])}var x=r(14671);let b=0;const w=(0,x.Z)((e=>(s.DefaultLoadingManager.onStart=(t,r,n)=>{e({active:!0,item:t,loaded:r,total:n,progress:(r-b)/(n-b)*100})},s.DefaultLoadingManager.onLoad=()=>{e({active:!1})},s.DefaultLoadingManager.onError=t=>e((e=>({errors:[...e.errors,t]}))),s.DefaultLoadingManager.onProgress=(t,r,n)=>{r===n&&(b=n),e({active:!0,item:t,loaded:r,total:n,progress:(r-b)/(n-b)*100||100})},{errors:[],active:!1,progress:0,item:"",loaded:0,total:0}))),E=e=>`Loading ${e.toFixed(2)}%`;function _({containerStyles:e,innerStyles:t,barStyles:r,dataStyles:n,dataInterpolation:a=E,initialState:s=(e=>e)}){const{active:o,progress:l}=w(),c=i.useRef(0),u=i.useRef(0),h=i.useRef(null),[d,p]=i.useState(s(o));i.useEffect((()=>{let e;return o!==d&&(e=setTimeout((()=>p(o)),300)),()=>clearTimeout(e)}),[d,o]);const f=i.useCallback((()=>{h.current&&(c.current+=(l-c.current)/2,(c.current>.95*l||100===l)&&(c.current=l),h.current.innerText=a(c.current),c.current(f(),()=>cancelAnimationFrame(u.current))),[f]),d?i.createElement("div",{style:{...S.container,opacity:o?1:0,...e}},i.createElement("div",null,i.createElement("div",{style:{...S.inner,...t}},i.createElement("div",{style:{...S.bar,transform:`scaleX(${l/100})`,...r}}),i.createElement("span",{ref:h,style:{...S.data,...n}})))):null}const S={container:{position:"absolute",top:0,left:0,width:"100%",height:"100%",background:"#171717",display:"flex",alignItems:"center",justifyContent:"center",transition:"opacity 300ms ease",zIndex:1e3},inner:{width:100,height:3,background:"#272727",textAlign:"center"},bar:{height:3,width:"100%",background:"white",transition:"transform 200ms",transformOrigin:"left center"},data:{display:"inline-block",position:"relative",fontVariantNumeric:"tabular-nums",marginTop:"0.8em",color:"#f0f0f0",fontSize:"0.6em",fontFamily:'-apple-system, BlinkMacSystemFont, "Inter", "Segoe UI", "Helvetica Neue", Helvetica, Arial, Roboto, Ubuntu, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"',whiteSpace:"nowrap"}};const M=function(e){return function(t){e.forEach((function(e){"function"==typeof e?e(t):null!=e&&(e.current=t)}))}},T=i.createContext(null);function C(){return i.useContext(T)}function I({eps:e=1e-5,enabled:t=!0,infinite:r,horizontal:n,pages:a=1,distance:l=1,damping:c=4,style:u={},children:h}){const{gl:d,size:p,invalidate:f,events:m,raycaster:g}=(0,o.useThree)(),[v]=i.useState((()=>document.createElement("div"))),[A]=i.useState((()=>document.createElement("div"))),[y]=i.useState((()=>document.createElement("div"))),x=d.domElement.parentNode,b=i.useRef(0),w=i.useMemo((()=>{const t={el:v,eps:e,fill:A,fixed:y,horizontal:n,damping:c,offset:0,delta:0,scroll:b,pages:a,range(e,t,r=0){const n=e-r,i=n+t+2*r;return this.offseti?1:(this.offset-n)/(i-n)},curve(e,t,r=0){return Math.sin(this.range(e,t,r)*Math.PI)},visible(e,t,r=0){const n=e-r,i=n+t+2*r;return this.offset>=n&&this.offset<=i}};return t}),[e,c,n,a]);i.useEffect((()=>{v.style.position="absolute",v.style.width="100%",v.style.height="100%",v.style[n?"overflowX":"overflowY"]="auto",v.style[n?"overflowY":"overflowX"]="hidden",v.style.top="0px",v.style.left="0px";for(const r in u)v.style[r]=u[r];y.style.position="sticky",y.style.top="0px",y.style.left="0px",y.style.width="100%",y.style.height="100%",y.style.overflow="hidden",v.appendChild(y),A.style.height=n?"100%":a*l*100+"%",A.style.width=n?a*l*100+"%":"100%",A.style.pointerEvents="none",v.appendChild(A),x.appendChild(v),v[n?"scrollLeft":"scrollTop"]=1;const e=m.connected||d.domElement;requestAnimationFrame((()=>null==m.connect?void 0:m.connect(v)));const t=g.computeOffsets;return g.computeOffsets=({clientX:e,clientY:t})=>({offsetX:e-x.offsetLeft,offsetY:t-x.offsetTop}),()=>{x.removeChild(v),g.computeOffsets=t,null==m.connect||m.connect(e)}}),[a,l,n,v,A,y,x]),i.useEffect((()=>{const e=p[n?"width":"height"],i=v[n?"scrollWidth":"scrollHeight"],a=i-e;let s=0,o=!0,l=!0;const c=()=>{if(t&&!l&&(f(),s=v[n?"scrollLeft":"scrollTop"],b.current=s/a,r)){if(!o)if(s>=a){const e=1-w.offset;v[n?"scrollLeft":"scrollTop"]=1,b.current=w.offset=-e,o=!0}else if(s<=0){const e=1+w.offset;v[n?"scrollLeft":"scrollTop"]=i,b.current=w.offset=e,o=!0}o&&setTimeout((()=>o=!1),40)}};v.addEventListener("scroll",c,{passive:!0}),requestAnimationFrame((()=>l=!1));const u=e=>v.scrollLeft+=e.deltaY/2;return n&&v.addEventListener("wheel",u,{passive:!0}),()=>{v.removeEventListener("scroll",c),n&&v.removeEventListener("wheel",u)}}),[v,p,r,w,f,n,t]);let E=0;return(0,o.useFrame)(((t,r)=>{w.offset=s.MathUtils.damp(E=w.offset,b.current,c,r),w.delta=s.MathUtils.damp(w.delta,Math.abs(E-w.offset),c,r),w.delta>e&&f()})),i.createElement(T.Provider,{value:w},h)}const B=i.forwardRef((({children:e},t)=>{const r=i.useRef(null),n=C(),{width:a,height:s}=(0,o.useThree)((e=>e.viewport));return(0,o.useFrame)((()=>{r.current.position.x=n.horizontal?-a*(n.pages-1)*n.offset:0,r.current.position.y=n.horizontal?0:s*(n.pages-1)*n.offset})),i.createElement("group",{ref:M([t,r])},e)})),R=i.forwardRef((({children:e,style:t,...r},s)=>{const l=C(),c=i.useRef(null),{width:u,height:h}=(0,o.useThree)((e=>e.size)),d=i.useContext(o.context);return(0,o.useFrame)((()=>{l.delta>l.eps&&(c.current.style.transform=`translate3d(${l.horizontal?-u*(l.pages-1)*l.offset:0}px,${l.horizontal?0:h*(l.pages-1)*-l.offset}px,0)`)})),a.render(i.createElement("div",n({ref:M([s,c]),style:{...t,position:"absolute",top:0,left:0,willChange:"transform"}},r),i.createElement(T.Provider,{value:l},i.createElement(o.context.Provider,{value:d},e))),l.fixed),null})),P=i.forwardRef((({html:e,...t},r)=>{const a=e?R:B;return i.createElement(a,n({ref:r},t))}));var F=q(),L=e=>j(e,F),D=q();L.write=e=>j(e,D);var U=q();L.onStart=e=>j(e,U);var N=q();L.onFrame=e=>j(e,N);var k=q();L.onFinish=e=>j(e,k);var O=[];L.setTimeout=(e,t)=>{let r=L.now()+t,n=()=>{let e=O.findIndex((e=>e.cancel==n));~e&&O.splice(e,1),H-=~e?1:0},i={time:r,handler:e,cancel:n};return O.splice(G(r),0,i),H+=1,W(),i};var G=e=>~(~O.findIndex((t=>t.time>e))||~O.length);L.cancel=e=>{U.delete(e),N.delete(e),k.delete(e),F.delete(e),D.delete(e)},L.sync=e=>{Q=!0,L.batchedUpdates(e),Q=!1},L.throttle=e=>{let t;function r(){try{e(...t)}finally{t=null}}function n(...e){t=e,L.onStart(r)}return n.handler=e,n.cancel=()=>{U.delete(r),t=null},n};var z=typeof window<"u"?window.requestAnimationFrame:()=>{};L.use=e=>z=e,L.now=typeof performance<"u"?()=>performance.now():Date.now,L.batchedUpdates=e=>e(),L.catch=console.error,L.frameLoop="always",L.advance=()=>{"demand"!==L.frameLoop?console.warn("Cannot call the manual advancement of rafz whilst frameLoop is not set as demand"):Y()};var V=-1,H=0,Q=!1;function j(e,t){Q?(t.delete(e),e(0)):(t.add(e),W())}function W(){V<0&&(V=0,"demand"!==L.frameLoop&&z(X))}function X(){~V&&(z(X),L.batchedUpdates(Y))}function Y(){let e=V;V=L.now();let t=G(V);t&&(K(O.splice(0,t),(e=>e.handler())),H-=t),H?(U.flush(),F.flush(e?Math.min(64,V-e):16.667),N.flush(),D.flush(),k.flush()):V=-1}function q(){let e=new Set,t=e;return{add(r){H+=t!=e||e.has(r)?0:1,e.add(r)},delete:r=>(H-=t==e&&e.has(r)?1:0,e.delete(r)),flush(r){t.size&&(e=new Set,H-=t.size,K(t,(t=>t(r)&&e.add(t))),H+=e.size,t=e)}}}function K(e,t){e.forEach((e=>{try{t(e)}catch(Jr){L.catch(Jr)}}))}var J=Object.defineProperty,Z={};function $(){}((e,t)=>{for(var r in t)J(e,r,{get:t[r],enumerable:!0})})(Z,{assign:()=>pe,colors:()=>ue,createStringInterpolator:()=>se,skipAnimation:()=>he,to:()=>oe,willAdvance:()=>de});var ee={arr:Array.isArray,obj:e=>!!e&&"Object"===e.constructor.name,fun:e=>"function"==typeof e,str:e=>"string"==typeof e,num:e=>"number"==typeof e,und:e=>void 0===e};function te(e,t){if(ee.arr(e)){if(!ee.arr(t)||e.length!==t.length)return!1;for(let r=0;re.forEach(t);function ne(e,t,r){if(ee.arr(e))for(let n=0;nee.und(e)?[]:ee.arr(e)?e:[e];function ae(e,t){if(e.size){let r=Array.from(e);e.clear(),re(r,t)}}var se,oe,le=(e,...t)=>ae(e,(e=>e(...t))),ce=()=>typeof window>"u"||!window.navigator||/ServerSideRendering|^Deno\//.test(window.navigator.userAgent),ue=null,he=!1,de=$,pe=e=>{e.to&&(oe=e.to),e.now&&(L.now=e.now),void 0!==e.colors&&(ue=e.colors),null!=e.skipAnimation&&(he=e.skipAnimation),e.createStringInterpolator&&(se=e.createStringInterpolator),e.requestAnimationFrame&&L.use(e.requestAnimationFrame),e.batchedUpdates&&(L.batchedUpdates=e.batchedUpdates),e.willAdvance&&(de=e.willAdvance),e.frameLoop&&(L.frameLoop=e.frameLoop)},fe=new Set,me=[],ge=[],ve=0,Ae={get idle(){return!fe.size&&!me.length},start(e){ve>e.priority?(fe.add(e),L.onStart(ye)):(xe(e),L(we))},advance:we,sort(e){if(ve)L.onFrame((()=>Ae.sort(e)));else{let t=me.indexOf(e);~t&&(me.splice(t,1),be(e))}},clear(){me=[],fe.clear()}};function ye(){fe.forEach(xe),fe.clear(),L(we)}function xe(e){me.includes(e)||be(e)}function be(e){me.splice(function(e,t){let r=e.findIndex(t);return r<0?e.length:r}(me,(t=>t.priority>e.priority)),0,e)}function we(e){let t=ge;for(let r=0;r0}var Ee="[-+]?\\d*\\.?\\d+",_e=Ee+"%";function Se(...e){return"\\(\\s*("+e.join(")\\s*,\\s*(")+")\\s*\\)"}var Me=new RegExp("rgb"+Se(Ee,Ee,Ee)),Te=new RegExp("rgba"+Se(Ee,Ee,Ee,Ee)),Ce=new RegExp("hsl"+Se(Ee,_e,_e)),Ie=new RegExp("hsla"+Se(Ee,_e,_e,Ee)),Be=/^#([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,Re=/^#([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,Pe=/^#([0-9a-fA-F]{6})$/,Fe=/^#([0-9a-fA-F]{8})$/;function Le(e,t,r){return r<0&&(r+=1),r>1&&(r-=1),r<1/6?e+6*(t-e)*r:r<.5?t:r<2/3?e+(t-e)*(2/3-r)*6:e}function De(e,t,r){let n=r<.5?r*(1+t):r+t-r*t,i=2*r-n,a=Le(i,n,e+1/3),s=Le(i,n,e),o=Le(i,n,e-1/3);return Math.round(255*a)<<24|Math.round(255*s)<<16|Math.round(255*o)<<8}function Ue(e){let t=parseInt(e,10);return t<0?0:t>255?255:t}function Ne(e){return(parseFloat(e)%360+360)%360/360}function ke(e){let t=parseFloat(e);return t<0?0:t>1?255:Math.round(255*t)}function Oe(e){let t=parseFloat(e);return t<0?0:t>100?1:t/100}function Ge(e){let t=function(e){let t;return"number"==typeof e?e>>>0===e&&e>=0&&e<=4294967295?e:null:(t=Pe.exec(e))?parseInt(t[1]+"ff",16)>>>0:ue&&void 0!==ue[e]?ue[e]:(t=Me.exec(e))?(Ue(t[1])<<24|Ue(t[2])<<16|Ue(t[3])<<8|255)>>>0:(t=Te.exec(e))?(Ue(t[1])<<24|Ue(t[2])<<16|Ue(t[3])<<8|ke(t[4]))>>>0:(t=Be.exec(e))?parseInt(t[1]+t[1]+t[2]+t[2]+t[3]+t[3]+"ff",16)>>>0:(t=Fe.exec(e))?parseInt(t[1],16)>>>0:(t=Re.exec(e))?parseInt(t[1]+t[1]+t[2]+t[2]+t[3]+t[3]+t[4]+t[4],16)>>>0:(t=Ce.exec(e))?(255|De(Ne(t[1]),Oe(t[2]),Oe(t[3])))>>>0:(t=Ie.exec(e))?(De(Ne(t[1]),Oe(t[2]),Oe(t[3]))|ke(t[4]))>>>0:null}(e);return null===t?e:(t=t||0,`rgba(${(4278190080&t)>>>24}, ${(16711680&t)>>>16}, ${(65280&t)>>>8}, ${(255&t)/255})`)}var ze=(e,t,r)=>{if(ee.fun(e))return e;if(ee.arr(e))return ze({range:e,output:t,extrapolate:r});if(ee.str(e.output[0]))return se(e);let n=e,i=n.output,a=n.range||[0,1],s=n.extrapolateLeft||n.extrapolate||"extend",o=n.extrapolateRight||n.extrapolate||"extend",l=n.easing||(e=>e);return e=>{let t=function(e,t){for(var r=1;r=e);++r);return r-1}(e,a);return function(e,t,r,n,i,a,s,o,l){let c=l?l(e):e;if(cr){if("identity"===o)return c;"clamp"===o&&(c=r)}return n===i?n:t===r?e<=t?n:i:(t===-1/0?c=-c:r===1/0?c-=t:c=(c-t)/(r-t),c=a(c),n===-1/0?c=-c:i===1/0?c+=n:c=c*(i-n)+n,c)}(e,a[t],a[t+1],i[t],i[t+1],l,s,o,n.map)}};var Ve=1.70158,He=1.525*Ve,Qe=Ve+1,je=2*Math.PI/3,We=2*Math.PI/4.5,Xe=e=>e<1/2.75?7.5625*e*e:e<2/2.75?7.5625*(e-=1.5/2.75)*e+.75:e<2.5/2.75?7.5625*(e-=2.25/2.75)*e+.9375:7.5625*(e-=2.625/2.75)*e+.984375,Ye={linear:e=>e,easeInQuad:e=>e*e,easeOutQuad:e=>1-(1-e)*(1-e),easeInOutQuad:e=>e<.5?2*e*e:1-Math.pow(-2*e+2,2)/2,easeInCubic:e=>e*e*e,easeOutCubic:e=>1-Math.pow(1-e,3),easeInOutCubic:e=>e<.5?4*e*e*e:1-Math.pow(-2*e+2,3)/2,easeInQuart:e=>e*e*e*e,easeOutQuart:e=>1-Math.pow(1-e,4),easeInOutQuart:e=>e<.5?8*e*e*e*e:1-Math.pow(-2*e+2,4)/2,easeInQuint:e=>e*e*e*e*e,easeOutQuint:e=>1-Math.pow(1-e,5),easeInOutQuint:e=>e<.5?16*e*e*e*e*e:1-Math.pow(-2*e+2,5)/2,easeInSine:e=>1-Math.cos(e*Math.PI/2),easeOutSine:e=>Math.sin(e*Math.PI/2),easeInOutSine:e=>-(Math.cos(Math.PI*e)-1)/2,easeInExpo:e=>0===e?0:Math.pow(2,10*e-10),easeOutExpo:e=>1===e?1:1-Math.pow(2,-10*e),easeInOutExpo:e=>0===e?0:1===e?1:e<.5?Math.pow(2,20*e-10)/2:(2-Math.pow(2,-20*e+10))/2,easeInCirc:e=>1-Math.sqrt(1-Math.pow(e,2)),easeOutCirc:e=>Math.sqrt(1-Math.pow(e-1,2)),easeInOutCirc:e=>e<.5?(1-Math.sqrt(1-Math.pow(2*e,2)))/2:(Math.sqrt(1-Math.pow(-2*e+2,2))+1)/2,easeInBack:e=>Qe*e*e*e-Ve*e*e,easeOutBack:e=>1+Qe*Math.pow(e-1,3)+Ve*Math.pow(e-1,2),easeInOutBack:e=>e<.5?Math.pow(2*e,2)*(2*(He+1)*e-He)/2:(Math.pow(2*e-2,2)*((He+1)*(2*e-2)+He)+2)/2,easeInElastic:e=>0===e?0:1===e?1:-Math.pow(2,10*e-10)*Math.sin((10*e-10.75)*je),easeOutElastic:e=>0===e?0:1===e?1:Math.pow(2,-10*e)*Math.sin((10*e-.75)*je)+1,easeInOutElastic:e=>0===e?0:1===e?1:e<.5?-Math.pow(2,20*e-10)*Math.sin((20*e-11.125)*We)/2:Math.pow(2,-20*e+10)*Math.sin((20*e-11.125)*We)/2+1,easeInBounce:e=>1-Xe(1-e),easeOutBounce:Xe,easeInOutBounce:e=>e<.5?(1-Xe(1-2*e))/2:(1+Xe(2*e-1))/2,steps:(e,t="end")=>r=>{let n=(r="end"===t?Math.min(r,.999):Math.max(r,.001))*e;return((e,t,r)=>Math.min(Math.max(r,e),t))(0,1,("end"===t?Math.floor(n):Math.ceil(n))/e)}},qe=Symbol.for("FluidValue.get"),Ke=Symbol.for("FluidValue.observers"),Je=e=>!(!e||!e[qe]),Ze=e=>e&&e[qe]?e[qe]():e,$e=e=>e[Ke]||null;function et(e,t){let r=e[Ke];r&&r.forEach((e=>{!function(e,t){e.eventObserved?e.eventObserved(t):e(t)}(e,t)}))}var tt=class{[qe];[Ke];constructor(e){if(!e&&!(e=this.get))throw Error("Unknown getter");rt(this,e)}},rt=(e,t)=>st(e,qe,t);function nt(e,t){if(e[qe]){let r=e[Ke];r||st(e,Ke,r=new Set),r.has(t)||(r.add(t),e.observerAdded&&e.observerAdded(r.size,t))}return t}function it(e,t){let r=e[Ke];if(r&&r.has(t)){let n=r.size-1;n?r.delete(t):e[Ke]=null,e.observerRemoved&&e.observerRemoved(n,t)}}var at,st=(e,t,r)=>Object.defineProperty(e,t,{value:r,writable:!0,configurable:!0}),ot=/[+\-]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,lt=/(#(?:[0-9a-f]{2}){2,4}|(#[0-9a-f]{3})|(rgb|hsl)a?\((-?\d+%?[,\s]+){2,3}\s*[\d\.]+%?\))/gi,ct=new RegExp(`(${ot.source})(%|[a-z]+)`,"i"),ut=/rgba\(([0-9\.-]+), ([0-9\.-]+), ([0-9\.-]+), ([0-9\.-]+)\)/gi,ht=/var\((--[a-zA-Z0-9-_]+),? ?([a-zA-Z0-9 ()%#.,-]+)?\)/,dt=e=>{let[t,r]=pt(e);if(!t||ce())return e;let n=window.getComputedStyle(document.documentElement).getPropertyValue(t);if(n)return n.trim();if(r&&r.startsWith("--")){return window.getComputedStyle(document.documentElement).getPropertyValue(r)||e}return r&&ht.test(r)?dt(r):r||e},pt=e=>{let t=ht.exec(e);if(!t)return[,];let[,r,n]=t;return[r,n]},ft=(e,t,r,n,i)=>`rgba(${Math.round(t)}, ${Math.round(r)}, ${Math.round(n)}, ${i})`,mt=e=>{at||(at=ue?new RegExp(`(${Object.keys(ue).join("|")})(?!\\w)`,"g"):/^\b$/);let t=e.output.map((e=>Ze(e).replace(ht,dt).replace(lt,Ge).replace(at,Ge))),r=t.map((e=>e.match(ot).map(Number))),n=r[0].map(((e,t)=>r.map((e=>{if(!(t in e))throw Error('The arity of each "output" value must be equal');return e[t]})))).map((t=>ze({...e,output:t})));return e=>{let r=!ct.test(t[0])&&t.find((e=>ct.test(e)))?.replace(ot,""),i=0;return t[0].replace(ot,(()=>`${n[i++](e)}${r||""}`)).replace(ut,ft)}},gt="react-spring: ",vt=e=>{let t=e,r=!1;if("function"!=typeof t)throw new TypeError(`${gt}once requires a function parameter`);return(...e)=>{r||(t(...e),r=!0)}},At=vt(console.warn);var yt=vt(console.warn);function xt(e){return ee.str(e)&&("#"==e[0]||/\d/.test(e)||!ce()&&ht.test(e)||e in(ue||{}))}new WeakMap;new Set,new WeakMap,new WeakMap,new WeakMap;var bt=ce()?i.useEffect:i.useLayoutEffect;function wt(){let e=(0,i.useState)()[1],t=(()=>{let e=(0,i.useRef)(!1);return bt((()=>(e.current=!0,()=>{e.current=!1})),[]),e})();return()=>{t.current&&e(Math.random())}}var Et=e=>(0,i.useEffect)(e,_t),_t=[];function St(e){let t=(0,i.useRef)();return(0,i.useEffect)((()=>{t.current=e})),t.current}var Mt=Symbol.for("Animated:node"),Tt=e=>e&&e[Mt],Ct=(e,t)=>((e,t,r)=>Object.defineProperty(e,t,{value:r,writable:!0,configurable:!0}))(e,Mt,t),It=e=>e&&e[Mt]&&e[Mt].getPayload(),Bt=class{payload;constructor(){Ct(this,this)}getPayload(){return this.payload||[]}},Rt=class extends Bt{constructor(e){super(),this._value=e,ee.num(this._value)&&(this.lastPosition=this._value)}done=!0;elapsedTime;lastPosition;lastVelocity;v0;durationProgress=0;static create(e){return new Rt(e)}getPayload(){return[this]}getValue(){return this._value}setValue(e,t){return ee.num(e)&&(this.lastPosition=e,t&&(e=Math.round(e/t)*t,this.done&&(this.lastPosition=e))),this._value!==e&&(this._value=e,!0)}reset(){let{done:e}=this;this.done=!1,ee.num(this._value)&&(this.elapsedTime=0,this.durationProgress=0,this.lastPosition=this._value,e&&(this.lastVelocity=null),this.v0=null)}},Pt=class extends Rt{_string=null;_toString;constructor(e){super(0),this._toString=ze({output:[e,e]})}static create(e){return new Pt(e)}getValue(){return this._string??(this._string=this._toString(this._value))}setValue(e){if(ee.str(e)){if(e==this._string)return!1;this._string=e,this._value=1}else{if(!super.setValue(e))return!1;this._string=null}return!0}reset(e){e&&(this._toString=ze({output:[this.getValue(),e]})),this._value=0,super.reset()}},Ft={dependencies:null},Lt=class extends Bt{constructor(e){super(),this.source=e,this.setValue(e)}getValue(e){let t={};return ne(this.source,((r,n)=>{(e=>!!e&&e[Mt]===e)(r)?t[n]=r.getValue(e):Je(r)?t[n]=Ze(r):e||(t[n]=r)})),t}setValue(e){this.source=e,this.payload=this._makePayload(e)}reset(){this.payload&&re(this.payload,(e=>e.reset()))}_makePayload(e){if(e){let t=new Set;return ne(e,this._addToPayload,t),Array.from(t)}}_addToPayload(e){Ft.dependencies&&Je(e)&&Ft.dependencies.add(e);let t=It(e);t&&re(t,(e=>this.add(e)))}},Dt=class extends Lt{constructor(e){super(e)}static create(e){return new Dt(e)}getValue(){return this.source.map((e=>e.getValue()))}setValue(e){let t=this.getPayload();return e.length==t.length?t.map(((t,r)=>t.setValue(e[r]))).some(Boolean):(super.setValue(e.map(Ut)),!0)}};function Ut(e){return(xt(e)?Pt:Rt).create(e)}function Nt(e){let t=Tt(e);return t?t.constructor:ee.arr(e)?Dt:xt(e)?Pt:Rt}var kt=(e,t)=>{let r=!ee.fun(e)||e.prototype&&e.prototype.isReactComponent;return(0,i.forwardRef)(((n,a)=>{let s=(0,i.useRef)(null),o=r&&(0,i.useCallback)((e=>{s.current=function(e,t){return e&&(ee.fun(e)?e(t):e.current=t),t}(a,e)}),[a]),[l,c]=function(e,t){let r=new Set;return Ft.dependencies=r,e.style&&(e={...e,style:t.createAnimatedStyle(e.style)}),e=new Lt(e),Ft.dependencies=null,[e,r]}(n,t),u=wt(),h=()=>{let e=s.current;r&&!e||!1===(!!e&&t.applyAnimatedValues(e,l.getValue(!0)))&&u()},d=new Ot(h,c),p=(0,i.useRef)();bt((()=>(p.current=d,re(c,(e=>nt(e,d))),()=>{p.current&&(re(p.current.deps,(e=>it(e,p.current))),L.cancel(p.current.update))}))),(0,i.useEffect)(h,[]),Et((()=>()=>{let e=p.current;re(e.deps,(t=>it(t,e)))}));let f=t.getComponentProps(l.getValue());return i.createElement(e,{...f,ref:o})}))},Ot=class{constructor(e,t){this.update=e,this.deps=t}eventObserved(e){"change"==e.type&&L.write(this.update)}};var Gt=Symbol.for("AnimatedComponent"),zt=e=>ee.str(e)?e:e&&ee.str(e.displayName)?e.displayName:ee.fun(e)&&e.name||null;function Vt(e,...t){return ee.fun(e)?e(...t):e}var Ht=(e,t)=>!0===e||!!(t&&e&&(ee.fun(e)?e(t):ie(e).includes(t))),Qt=(e,t)=>ee.obj(e)?t&&e[t]:e,jt=(e,t)=>!0===e.default?e[t]:e.default?e.default[t]:void 0,Wt=e=>e,Xt=(e,t=Wt)=>{let r=Yt;e.default&&!0!==e.default&&(e=e.default,r=Object.keys(e));let n={};for(let i of r){let r=t(e[i],i);ee.und(r)||(n[i]=r)}return n},Yt=["config","onProps","onStart","onChange","onPause","onResume","onRest"],qt={config:1,from:1,to:1,ref:1,loop:1,reset:1,pause:1,cancel:1,reverse:1,immediate:1,default:1,delay:1,onProps:1,onStart:1,onChange:1,onPause:1,onResume:1,onRest:1,onResolve:1,items:1,trail:1,sort:1,expires:1,initial:1,enter:1,update:1,leave:1,children:1,onDestroyed:1,keys:1,callId:1,parentId:1};function Kt(e){let t=function(e){let t={},r=0;if(ne(e,((e,n)=>{qt[n]||(t[n]=e,r++)})),r)return t}(e);if(t){let r={to:t};return ne(e,((e,n)=>n in t||(r[n]=e))),r}return{...e}}function Jt(e){return e=Ze(e),ee.arr(e)?e.map(Jt):xt(e)?Z.createStringInterpolator({range:[0,1],output:[e,e]})(1):e}function Zt(e){for(let t in e)return!0;return!1}function $t(e){return ee.fun(e)||ee.arr(e)&&ee.obj(e[0])}function er(e,t){e.ref?.delete(e),t?.delete(e)}function tr(e,t){t&&e.ref!==t&&(e.ref?.delete(e),t.add(e),e.ref=t)}var rr={tension:170,friction:26,mass:1,damping:1,easing:Ye.linear,clamp:!1};function nr(e,t){if(ee.und(t.decay)){let r=!ee.und(t.tension)||!ee.und(t.friction);(r||!ee.und(t.frequency)||!ee.und(t.damping)||!ee.und(t.mass))&&(e.duration=void 0,e.decay=void 0),r&&(e.frequency=void 0)}else e.duration=void 0}var ir=[];function ar(e,{key:t,props:r,defaultProps:n,state:i,actions:a}){return new Promise(((s,o)=>{let l,c,u=Ht(r.cancel??n?.cancel,t);if(u)p();else{ee.und(r.pause)||(i.paused=Ht(r.pause,t));let e=n?.pause;!0!==e&&(e=i.paused||Ht(e,t)),l=Vt(r.delay||0,t),e?(i.resumeQueue.add(d),a.pause()):(a.resume(),d())}function h(){i.resumeQueue.add(d),i.timeouts.delete(c),c.cancel(),l=c.time-L.now()}function d(){l>0&&!Z.skipAnimation?(i.delayed=!0,c=L.setTimeout(p,l),i.pauseQueue.add(h),i.timeouts.add(c)):p()}function p(){i.delayed&&(i.delayed=!1),i.pauseQueue.delete(h),i.timeouts.delete(c),e<=(i.cancelId||0)&&(u=!0);try{a.start({...r,callId:e,cancel:u},s)}catch(j){o(j)}}}))}var sr=(e,t)=>1==t.length?t[0]:t.some((e=>e.cancelled))?cr(e.get()):t.every((e=>e.noop))?or(e.get()):lr(e.get(),t.every((e=>e.finished))),or=e=>({value:e,noop:!0,finished:!0,cancelled:!1}),lr=(e,t,r=!1)=>({value:e,finished:t,cancelled:r}),cr=e=>({value:e,cancelled:!0,finished:!1});function ur(e,t,r,n){let{callId:i,parentId:a,onRest:s}=t,{asyncTo:o,promise:l}=r;return a||e!==o||t.reset?r.promise=(async()=>{r.asyncId=i,r.asyncTo=e;let c,u,h,d=Xt(t,((e,t)=>"onRest"===t?void 0:e)),p=new Promise(((e,t)=>(c=e,u=t))),f=e=>{let t=i<=(r.cancelId||0)&&cr(n)||i!==r.asyncId&&lr(n,!1);if(t)throw e.result=t,u(e),e},m=(e,t)=>{let a=new dr,s=new pr;return(async()=>{if(Z.skipAnimation)throw hr(r),s.result=lr(n,!1),u(s),s;f(a);let o=ee.obj(e)?{...e}:{...t,to:e};o.parentId=i,ne(d,((e,t)=>{ee.und(o[t])&&(o[t]=e)}));let l=await n.start(o);return f(a),r.paused&&await new Promise((e=>{r.resumeQueue.add(e)})),l})()};if(Z.skipAnimation)return hr(r),lr(n,!1);try{let t;t=ee.arr(e)?(async e=>{for(let t of e)await m(t)})(e):Promise.resolve(e(m,n.stop.bind(n))),await Promise.all([t.then(c),p]),h=lr(n.get(),!0,!1)}catch(V){if(V instanceof dr)h=V.result;else{if(!(V instanceof pr))throw V;h=V.result}}finally{i==r.asyncId&&(r.asyncId=a,r.asyncTo=a?o:void 0,r.promise=a?l:void 0)}return ee.fun(s)&&L.batchedUpdates((()=>{s(h,n,n.item)})),h})():l}function hr(e,t){ae(e.timeouts,(e=>e.cancel())),e.pauseQueue.clear(),e.resumeQueue.clear(),e.asyncId=e.asyncTo=e.promise=void 0,t&&(e.cancelId=t)}var dr=class extends Error{result;constructor(){super("An async animation has been interrupted. You see this error because you forgot to use `await` or `.catch(...)` on its returned promise.")}},pr=class extends Error{result;constructor(){super("SkipAnimationSignal")}},fr=e=>e instanceof gr,mr=1,gr=class extends tt{id=mr++;_priority=0;get priority(){return this._priority}set priority(e){this._priority!=e&&(this._priority=e,this._onPriorityChange(e))}get(){let e=Tt(this);return e&&e.getValue()}to(...e){return Z.to(this,e)}interpolate(...e){return At(`${gt}The "interpolate" function is deprecated in v9 (use "to" instead)`),Z.to(this,e)}toJSON(){return this.get()}observerAdded(e){1==e&&this._attach()}observerRemoved(e){0==e&&this._detach()}_attach(){}_detach(){}_onChange(e,t=!1){et(this,{type:"change",parent:this,value:e,idle:t})}_onPriorityChange(e){this.idle||Ae.sort(this),et(this,{type:"priority",parent:this,priority:e})}},vr=Symbol.for("SpringPhase"),Ar=e=>(1&e[vr])>0,yr=e=>(2&e[vr])>0,xr=e=>(4&e[vr])>0,br=(e,t)=>t?e[vr]|=3:e[vr]&=-3,wr=(e,t)=>t?e[vr]|=4:e[vr]&=-5,Er=class extends gr{key;animation=new class{changed=!1;values=ir;toValues=null;fromValues=ir;to;from;config=new class{tension;friction;frequency;damping;mass;velocity=0;restVelocity;precision;progress;duration;easing;clamp;bounce;decay;round;constructor(){Object.assign(this,rr)}};immediate=!1};queue;defaultProps={};_state={paused:!1,delayed:!1,pauseQueue:new Set,resumeQueue:new Set,timeouts:new Set};_pendingCalls=new Set;_lastCallId=0;_lastToId=0;_memoizedDuration=0;constructor(e,t){if(super(),!ee.und(e)||!ee.und(t)){let r=ee.obj(e)?{...e}:{...t,from:e};ee.und(r.default)&&(r.default=!0),this.start(r)}}get idle(){return!(yr(this)||this._state.asyncTo)||xr(this)}get goal(){return Ze(this.animation.to)}get velocity(){let e=Tt(this);return e instanceof Rt?e.lastVelocity||0:e.getPayload().map((e=>e.lastVelocity||0))}get hasAnimated(){return Ar(this)}get isAnimating(){return yr(this)}get isPaused(){return xr(this)}get isDelayed(){return this._state.delayed}advance(e){let t=!0,r=!1,n=this.animation,{toValues:i}=n,{config:a}=n,s=It(n.to);!s&&Je(n.to)&&(i=ie(Ze(n.to))),n.values.forEach(((o,l)=>{if(o.done)return;let c=o.constructor==Pt?1:s?s[l].lastPosition:i[l],u=n.immediate,h=c;if(!u){if(h=o.lastPosition,a.tension<=0)return void(o.done=!0);let t,r=o.elapsedTime+=e,i=n.fromValues[l],s=null!=o.v0?o.v0:o.v0=ee.arr(a.velocity)?a.velocity[l]:a.velocity,d=a.precision||(i==c?.005:Math.min(1,.001*Math.abs(c-i)));if(ee.und(a.duration))if(a.decay){let e=!0===a.decay?.998:a.decay,n=Math.exp(-(1-e)*r);h=i+s/(1-e)*(1-n),u=Math.abs(o.lastPosition-h)<=d,t=s*n}else{t=null==o.lastVelocity?s:o.lastVelocity;let r,n=a.restVelocity||d/10,l=a.clamp?0:a.bounce,p=!ee.und(l),f=i==c?o.v0>0:in,r||(u=Math.abs(c-h)<=d,!u));++e){p&&(m=h==c||h>c==f,m&&(t=-t*l,h=c)),t+=(1e-6*-a.tension*(h-c)+.001*-a.friction*t)/a.mass*g,h+=t*g}}else{let n=1;a.duration>0&&(this._memoizedDuration!==a.duration&&(this._memoizedDuration=a.duration,o.durationProgress>0&&(o.elapsedTime=a.duration*o.durationProgress,r=o.elapsedTime+=e)),n=(a.progress||0)+r/this._memoizedDuration,n=n>1?1:n<0?0:n,o.durationProgress=n),h=i+a.easing(n)*(c-i),t=(h-o.lastPosition)/e,u=1==n}o.lastVelocity=t,Number.isNaN(h)&&(console.warn("Got NaN while animating:",this),u=!0)}s&&!s[l].done&&(u=!1),u?o.done=!0:t=!1,o.setValue(h,a.round)&&(r=!0)}));let o=Tt(this),l=o.getValue();if(t){let e=Ze(n.to);l===e&&!r||a.decay?r&&a.decay&&this._onChange(l):(o.setValue(e),this._onChange(e)),this._stop()}else r&&this._onChange(l)}set(e){return L.batchedUpdates((()=>{this._stop(),this._focus(e),this._set(e)})),this}pause(){this._update({pause:!0})}resume(){this._update({pause:!1})}finish(){if(yr(this)){let{to:e,config:t}=this.animation;L.batchedUpdates((()=>{this._onStart(),t.decay||this._set(e,!1),this._stop()}))}return this}update(e){return(this.queue||(this.queue=[])).push(e),this}start(e,t){let r;return ee.und(e)?(r=this.queue||[],this.queue=[]):r=[ee.obj(e)?e:{...t,to:e}],Promise.all(r.map((e=>this._update(e)))).then((e=>sr(this,e)))}stop(e){let{to:t}=this.animation;return this._focus(this.get()),hr(this._state,e&&this._lastCallId),L.batchedUpdates((()=>this._stop(t,e))),this}reset(){this._update({reset:!0})}eventObserved(e){"change"==e.type?this._start():"priority"==e.type&&(this.priority=e.priority+1)}_prepareNode(e){let t=this.key||"",{to:r,from:n}=e;r=ee.obj(r)?r[t]:r,(null==r||$t(r))&&(r=void 0),n=ee.obj(n)?n[t]:n,null==n&&(n=void 0);let i={to:r,from:n};return Ar(this)||(e.reverse&&([r,n]=[n,r]),n=Ze(n),ee.und(n)?Tt(this)||this._set(r):this._set(n)),i}_update({...e},t){let{key:r,defaultProps:n}=this;e.default&&Object.assign(n,Xt(e,((e,t)=>/^on/.test(t)?Qt(e,r):e))),Br(this,e,"onProps"),Rr(this,"onProps",e,this);let i=this._prepareNode(e);if(Object.isFrozen(this))throw Error("Cannot animate a `SpringValue` object that is frozen. Did you forget to pass your component to `animated(...)` before animating its props?");let a=this._state;return ar(++this._lastCallId,{key:r,props:e,defaultProps:n,state:a,actions:{pause:()=>{xr(this)||(wr(this,!0),le(a.pauseQueue),Rr(this,"onPause",lr(this,_r(this,this.animation.to)),this))},resume:()=>{xr(this)&&(wr(this,!1),yr(this)&&this._resume(),le(a.resumeQueue),Rr(this,"onResume",lr(this,_r(this,this.animation.to)),this))},start:this._merge.bind(this,i)}}).then((r=>{if(e.loop&&r.finished&&(!t||!r.noop)){let t=Sr(e);if(t)return this._update(t,!0)}return r}))}_merge(e,t,r){if(t.cancel)return this.stop(!0),r(cr(this));let n=!ee.und(e.to),i=!ee.und(e.from);if(n||i){if(!(t.callId>this._lastToId))return r(cr(this));this._lastToId=t.callId}let{key:a,defaultProps:s,animation:o}=this,{to:l,from:c}=o,{to:u=l,from:h=c}=e;i&&!n&&(!t.default||ee.und(u))&&(u=h),t.reverse&&([u,h]=[h,u]);let d=!te(h,c);d&&(o.from=h),h=Ze(h);let p=!te(u,l);p&&this._focus(u);let f=$t(t.to),{config:m}=o,{decay:g,velocity:v}=m;(n||i)&&(m.velocity=0),t.config&&!f&&function(e,t,r){r&&(nr(r={...r},t),t={...r,...t}),nr(e,t),Object.assign(e,t);for(let s in rr)null==e[s]&&(e[s]=rr[s]);let{frequency:n,damping:i}=e,{mass:a}=e;ee.und(n)||(n<.01&&(n=.01),i<0&&(i=0),e.tension=Math.pow(2*Math.PI/n,2)*a,e.friction=4*Math.PI*i*a/n)}(m,Vt(t.config,a),t.config!==s.config?Vt(s.config,a):void 0);let A=Tt(this);if(!A||ee.und(u))return r(lr(this,!0));let y=ee.und(t.reset)?i&&!t.default:!ee.und(h)&&Ht(t.reset,a),x=y?h:this.get(),b=Jt(u),w=ee.num(b)||ee.arr(b)||xt(b),E=!f&&(!w||Ht(s.immediate||t.immediate,a));if(p){let e=Nt(u);if(e!==A.constructor){if(!E)throw Error(`Cannot animate between ${A.constructor.name} and ${e.name}, as the "to" prop suggests`);A=this._set(b)}}let _=A.constructor,S=Je(u),M=!1;if(!S){let e=y||!Ar(this)&&d;(p||e)&&(M=te(Jt(x),b),S=!M),(!te(o.immediate,E)&&!E||!te(m.decay,g)||!te(m.velocity,v))&&(S=!0)}if(M&&yr(this)&&(o.changed&&!y?S=!0:S||this._stop(l)),!f&&((S||Je(l))&&(o.values=A.getPayload(),o.toValues=Je(u)?null:_==Pt?[1]:ie(b)),o.immediate!=E&&(o.immediate=E,!E&&!y&&this._set(l)),S)){let{onRest:e}=o;re(Ir,(e=>Br(this,t,e)));let n=lr(this,_r(this,l));le(this._pendingCalls,n),this._pendingCalls.add(r),o.changed&&L.batchedUpdates((()=>{o.changed=!y,e?.(n,this),y?Vt(s.onRest,n):o.onStart?.(n,this)}))}y&&this._set(x),f?r(ur(t.to,t,this._state,this)):S?this._start():yr(this)&&!p?this._pendingCalls.add(r):r(or(x))}_focus(e){let t=this.animation;e!==t.to&&($e(this)&&this._detach(),t.to=e,$e(this)&&this._attach())}_attach(){let e=0,{to:t}=this.animation;Je(t)&&(nt(t,this),fr(t)&&(e=t.priority+1)),this.priority=e}_detach(){let{to:e}=this.animation;Je(e)&&it(e,this)}_set(e,t=!0){let r=Ze(e);if(!ee.und(r)){let e=Tt(this);if(!e||!te(r,e.getValue())){let n=Nt(r);e&&e.constructor==n?e.setValue(r):Ct(this,n.create(r)),e&&L.batchedUpdates((()=>{this._onChange(r,t)}))}}return Tt(this)}_onStart(){let e=this.animation;e.changed||(e.changed=!0,Rr(this,"onStart",lr(this,_r(this,e.to)),this))}_onChange(e,t){t||(this._onStart(),Vt(this.animation.onChange,e,this)),Vt(this.defaultProps.onChange,e,this),super._onChange(e,t)}_start(){let e=this.animation;Tt(this).reset(Ze(e.to)),e.immediate||(e.fromValues=e.values.map((e=>e.lastPosition))),yr(this)||(br(this,!0),xr(this)||this._resume())}_resume(){Z.skipAnimation?this.finish():Ae.start(this)}_stop(e,t){if(yr(this)){br(this,!1);let r=this.animation;re(r.values,(e=>{e.done=!0})),r.toValues&&(r.onChange=r.onPause=r.onResume=void 0),et(this,{type:"idle",parent:this});let n=t?cr(this.get()):lr(this.get(),_r(this,e??r.to));le(this._pendingCalls,n),r.changed&&(r.changed=!1,Rr(this,"onRest",n,this))}}};function _r(e,t){let r=Jt(t);return te(Jt(e.get()),r)}function Sr(e,t=e.loop,r=e.to){let n=Vt(t);if(n){let i=!0!==n&&Kt(n),a=(i||e).reverse,s=!i||i.reset;return Mr({...e,loop:t,default:!1,pause:void 0,to:!a||$t(r)?r:void 0,from:s?e.from:void 0,reset:s,...i})}}function Mr(e){let{to:t,from:r}=e=Kt(e),n=new Set;return ee.obj(t)&&Cr(t,n),ee.obj(r)&&Cr(r,n),e.keys=n.size?Array.from(n):null,e}function Tr(e){let t=Mr(e);return ee.und(t.default)&&(t.default=Xt(t)),t}function Cr(e,t){ne(e,((e,r)=>null!=e&&t.add(r)))}var Ir=["onStart","onRest","onChange","onPause","onResume"];function Br(e,t,r){e.animation[r]=t[r]!==jt(t,r)?Qt(t[r],e.key):void 0}function Rr(e,t,...r){e.animation[t]?.(...r),e.defaultProps[t]?.(...r)}var Pr=["onStart","onChange","onRest"],Fr=1,Lr=class{id=Fr++;springs={};queue=[];ref;_flush;_initialProps;_lastAsyncId=0;_active=new Set;_changed=new Set;_started=!1;_item;_state={paused:!1,pauseQueue:new Set,resumeQueue:new Set,timeouts:new Set};_events={onStart:new Map,onChange:new Map,onRest:new Map};constructor(e,t){this._onFrame=this._onFrame.bind(this),t&&(this._flush=t),e&&this.start({default:!0,...e})}get idle(){return!this._state.asyncTo&&Object.values(this.springs).every((e=>e.idle&&!e.isDelayed&&!e.isPaused))}get item(){return this._item}set item(e){this._item=e}get(){let e={};return this.each(((t,r)=>e[r]=t.get())),e}set(e){for(let t in e){let r=e[t];ee.und(r)||this.springs[t].set(r)}}update(e){return e&&this.queue.push(Mr(e)),this}start(e){let{queue:t}=this;return e?t=ie(e).map(Mr):this.queue=[],this._flush?this._flush(this,t):(zr(this,t),Dr(this,t))}stop(e,t){if(e!==!!e&&(t=e),t){let r=this.springs;re(ie(t),(t=>r[t].stop(!!e)))}else hr(this._state,this._lastAsyncId),this.each((t=>t.stop(!!e)));return this}pause(e){if(ee.und(e))this.start({pause:!0});else{let t=this.springs;re(ie(e),(e=>t[e].pause()))}return this}resume(e){if(ee.und(e))this.start({pause:!1});else{let t=this.springs;re(ie(e),(e=>t[e].resume()))}return this}each(e){ne(this.springs,e)}_onFrame(){let{onStart:e,onChange:t,onRest:r}=this._events,n=this._active.size>0,i=this._changed.size>0;(n&&!this._started||i&&!this._started)&&(this._started=!0,ae(e,(([e,t])=>{t.value=this.get(),e(t,this,this._item)})));let a=!n&&this._started,s=i||a&&r.size?this.get():null;i&&t.size&&ae(t,(([e,t])=>{t.value=s,e(t,this,this._item)})),a&&(this._started=!1,ae(r,(([e,t])=>{t.value=s,e(t,this,this._item)})))}eventObserved(e){if("change"==e.type)this._changed.add(e.parent),e.idle||this._active.add(e.parent);else{if("idle"!=e.type)return;this._active.delete(e.parent)}L.onFrame(this._onFrame)}};function Dr(e,t){return Promise.all(t.map((t=>Ur(e,t)))).then((t=>sr(e,t)))}async function Ur(e,t,r){let{keys:n,to:i,from:a,loop:s,onRest:o,onResolve:l}=t,c=ee.obj(t.default)&&t.default;s&&(t.loop=!1),!1===i&&(t.to=null),!1===a&&(t.from=null);let u=ee.arr(i)||ee.fun(i)?i:void 0;u?(t.to=void 0,t.onRest=void 0,c&&(c.onRest=void 0)):re(Pr,(r=>{let n=t[r];if(ee.fun(n)){let i=e._events[r];t[r]=({finished:e,cancelled:t})=>{let r=i.get(n);r?(e||(r.finished=!1),t&&(r.cancelled=!0)):i.set(n,{value:null,finished:e||!1,cancelled:t||!1})},c&&(c[r]=t[r])}}));let h=e._state;t.pause===!h.paused?(h.paused=t.pause,le(t.pause?h.pauseQueue:h.resumeQueue)):h.paused&&(t.pause=!0);let d=(n||Object.keys(e.springs)).map((r=>e.springs[r].start(t))),p=!0===t.cancel||!0===jt(t,"cancel");(u||p&&h.asyncId)&&d.push(ar(++e._lastAsyncId,{props:t,state:h,actions:{pause:$,resume:$,start(t,r){p?(hr(h,e._lastAsyncId),r(cr(e))):(t.onRest=o,r(ur(u,t,h,e)))}}})),h.paused&&await new Promise((e=>{h.resumeQueue.add(e)}));let f=sr(e,await Promise.all(d));if(s&&f.finished&&(!r||!f.noop)){let r=Sr(t,s,i);if(r)return zr(e,[r]),Ur(e,r,!0)}return l&&L.batchedUpdates((()=>l(f,e,e.item))),f}function Nr(e,t){let r={...e.springs};return t&&re(ie(t),(e=>{ee.und(e.keys)&&(e=Mr(e)),ee.obj(e.to)||(e={...e,to:void 0}),Gr(r,e,(e=>Or(e)))})),kr(e,r),r}function kr(e,t){ne(t,((t,r)=>{e.springs[r]||(e.springs[r]=t,nt(t,e))}))}function Or(e,t){let r=new Er;return r.key=e,t&&nt(r,t),r}function Gr(e,t,r){t.keys&&re(t.keys,(n=>{(e[n]||(e[n]=r(n)))._prepareNode(t)}))}function zr(e,t){re(t,(t=>{Gr(e.springs,t,(t=>Or(t,e)))}))}var Vr=({children:e,...t})=>{let r=(0,i.useContext)(Hr),n=t.pause||!!r.pause,a=t.immediate||!!r.immediate;t=function(e,t){let[r]=(0,i.useState)((()=>({inputs:t,result:e()}))),n=(0,i.useRef)(),a=n.current,s=a;return s?t&&s.inputs&&function(e,t){if(e.length!==t.length)return!1;for(let r=0;r{n.current=s,a==r&&(r.inputs=r.result=void 0)}),[s]),s.result}((()=>({pause:n,immediate:a})),[n,a]);let{Provider:s}=Hr;return i.createElement(s,{value:t},e)},Hr=function(e,t){return Object.assign(e,i.createContext(t)),e.Provider._context=e,e.Consumer._context=e,e}(Vr,{});Vr.Provider=Hr.Provider,Vr.Consumer=Hr.Consumer;var Qr=()=>{let e=[],t=function(t){yt(`${gt}Directly calling start instead of using the api object is deprecated in v9 (use ".start" instead), this will be removed in later 0.X.0 versions`);let n=[];return re(e,((e,i)=>{if(ee.und(t))n.push(e.start());else{let a=r(t,e,i);a&&n.push(e.start(a))}})),n};t.current=e,t.add=function(t){e.includes(t)||e.push(t)},t.delete=function(t){let r=e.indexOf(t);~r&&e.splice(r,1)},t.pause=function(){return re(e,(e=>e.pause(...arguments))),this},t.resume=function(){return re(e,(e=>e.resume(...arguments))),this},t.set=function(t){re(e,((e,r)=>{let n=ee.fun(t)?t(r,e):t;n&&e.set(n)}))},t.start=function(t){let r=[];return re(e,((e,n)=>{if(ee.und(t))r.push(e.start());else{let i=this._getProps(t,e,n);i&&r.push(e.start(i))}})),r},t.stop=function(){return re(e,(e=>e.stop(...arguments))),this},t.update=function(t){return re(e,((e,r)=>e.update(this._getProps(t,e,r)))),this};let r=function(e,t,r){return ee.fun(e)?e(r,t):e};return t._getProps=r,t};function jr(e,t,r){let n=ee.fun(t)&&t;n&&!r&&(r=[]);let a=(0,i.useMemo)((()=>n||3==arguments.length?Qr():void 0),[]),s=(0,i.useRef)(0),o=wt(),l=(0,i.useMemo)((()=>({ctrls:[],queue:[],flush(e,t){let r=Nr(e,t);return s.current>0&&!l.queue.length&&!Object.keys(r).some((t=>!e.springs[t]))?Dr(e,t):new Promise((n=>{kr(e,r),l.queue.push((()=>{n(Dr(e,t))})),o()}))}})),[]),c=(0,i.useRef)([...l.ctrls]),u=[],h=St(e)||0;function d(e,r){for(let i=e;i{re(c.current.slice(e,h),(e=>{er(e,a),e.stop(!0)})),c.current.length=e,d(h,e)}),[e]),(0,i.useMemo)((()=>{d(0,Math.min(h,e))}),r);let p=c.current.map(((e,t)=>Nr(e,u[t]))),f=(0,i.useContext)(Vr),m=St(f),g=f!==m&&Zt(f);bt((()=>{s.current++,l.ctrls=c.current;let{queue:e}=l;e.length&&(l.queue=[],re(e,(e=>e()))),re(c.current,((e,t)=>{a?.add(e),g&&e.start({default:f});let r=u[t];r&&(tr(e,r.ref),e.ref?e.queue.push(r):e.start(r))}))})),Et((()=>()=>{re(l.ctrls,(e=>e.stop(!0)))}));let v=p.map((e=>({...e})));return a?[v,a]:v}function Wr(e,t){let r=ee.fun(e),[[n],i]=jr(1,r?e:[e],r?t||[]:t);return r||2==arguments.length?[n,i]:n}var Xr=class extends gr{constructor(e,t){super(),this.source=e,this.calc=ze(...t);let r=this._get(),n=Nt(r);Ct(this,n.create(r))}key;idle=!0;calc;_active=new Set;advance(e){let t=this._get();te(t,this.get())||(Tt(this).setValue(t),this._onChange(t,this.idle)),!this.idle&&qr(this._active)&&Kr(this)}_get(){let e=ee.arr(this.source)?this.source.map(Ze):ie(Ze(this.source));return this.calc(...e)}_start(){this.idle&&!qr(this._active)&&(this.idle=!1,re(It(this),(e=>{e.done=!1})),Z.skipAnimation?(L.batchedUpdates((()=>this.advance())),Kr(this)):Ae.start(this))}_attach(){let e=1;re(ie(this.source),(t=>{Je(t)&&nt(t,this),fr(t)&&(t.idle||this._active.add(t),e=Math.max(e,t.priority+1))})),this.priority=e,this._start()}_detach(){re(ie(this.source),(e=>{Je(e)&&it(e,this)})),this._active.clear(),Kr(this)}eventObserved(e){"change"==e.type?e.idle?this.advance():(this._active.add(e.parent),this._start()):"idle"==e.type?this._active.delete(e.parent):"priority"==e.type&&(this.priority=ie(this.source).reduce(((e,t)=>Math.max(e,(fr(t)?t.priority:0)+1)),0))}};function Yr(e){return!1!==e.idle}function qr(e){return!e.size||Array.from(e).every(Yr)}function Kr(e){e.idle||(e.idle=!0,re(It(e),(e=>{e.done=!0})),et(e,{type:"idle",parent:e}))}Z.assign({createStringInterpolator:mt,to:(e,t)=>new Xr(e,t)});Ae.advance;var Jr=["primitive"].concat(Object.keys(s).filter((e=>/^[A-Z]/.test(e))).map((e=>e[0].toLowerCase()+e.slice(1))));Z.assign({createStringInterpolator:mt,colors:{transparent:0,aliceblue:4042850303,antiquewhite:4209760255,aqua:16777215,aquamarine:2147472639,azure:4043309055,beige:4126530815,bisque:4293182719,black:255,blanchedalmond:4293643775,blue:65535,blueviolet:2318131967,brown:2771004159,burlywood:3736635391,burntsienna:3934150143,cadetblue:1604231423,chartreuse:2147418367,chocolate:3530104575,coral:4286533887,cornflowerblue:1687547391,cornsilk:4294499583,crimson:3692313855,cyan:16777215,darkblue:35839,darkcyan:9145343,darkgoldenrod:3095792639,darkgray:2846468607,darkgreen:6553855,darkgrey:2846468607,darkkhaki:3182914559,darkmagenta:2332068863,darkolivegreen:1433087999,darkorange:4287365375,darkorchid:2570243327,darkred:2332033279,darksalmon:3918953215,darkseagreen:2411499519,darkslateblue:1211993087,darkslategray:793726975,darkslategrey:793726975,darkturquoise:13554175,darkviolet:2483082239,deeppink:4279538687,deepskyblue:12582911,dimgray:1768516095,dimgrey:1768516095,dodgerblue:512819199,firebrick:2988581631,floralwhite:4294635775,forestgreen:579543807,fuchsia:4278255615,gainsboro:3705462015,ghostwhite:4177068031,gold:4292280575,goldenrod:3668254975,gray:2155905279,green:8388863,greenyellow:2919182335,grey:2155905279,honeydew:4043305215,hotpink:4285117695,indianred:3445382399,indigo:1258324735,ivory:4294963455,khaki:4041641215,lavender:3873897215,lavenderblush:4293981695,lawngreen:2096890111,lemonchiffon:4294626815,lightblue:2916673279,lightcoral:4034953471,lightcyan:3774873599,lightgoldenrodyellow:4210742015,lightgray:3553874943,lightgreen:2431553791,lightgrey:3553874943,lightpink:4290167295,lightsalmon:4288707327,lightseagreen:548580095,lightskyblue:2278488831,lightslategray:2005441023,lightslategrey:2005441023,lightsteelblue:2965692159,lightyellow:4294959359,lime:16711935,limegreen:852308735,linen:4210091775,magenta:4278255615,maroon:2147483903,mediumaquamarine:1724754687,mediumblue:52735,mediumorchid:3126187007,mediumpurple:2473647103,mediumseagreen:1018393087,mediumslateblue:2070474495,mediumspringgreen:16423679,mediumturquoise:1221709055,mediumvioletred:3340076543,midnightblue:421097727,mintcream:4127193855,mistyrose:4293190143,moccasin:4293178879,navajowhite:4292783615,navy:33023,oldlace:4260751103,olive:2155872511,olivedrab:1804477439,orange:4289003775,orangered:4282712319,orchid:3664828159,palegoldenrod:4008225535,palegreen:2566625535,paleturquoise:2951671551,palevioletred:3681588223,papayawhip:4293907967,peachpuff:4292524543,peru:3448061951,pink:4290825215,plum:3718307327,powderblue:2967529215,purple:2147516671,rebeccapurple:1714657791,red:4278190335,rosybrown:3163525119,royalblue:1097458175,saddlebrown:2336560127,salmon:4202722047,sandybrown:4104413439,seagreen:780883967,seashell:4294307583,sienna:2689740287,silver:3233857791,skyblue:2278484991,slateblue:1784335871,slategray:1887473919,slategrey:1887473919,snow:4294638335,springgreen:16744447,steelblue:1182971135,tan:3535047935,teal:8421631,thistle:3636451583,tomato:4284696575,turquoise:1088475391,violet:4001558271,wheat:4125012991,white:4294967295,whitesmoke:4126537215,yellow:4294902015,yellowgreen:2597139199},frameLoop:"demand"}),(0,o.addEffect)((()=>{L.advance()}));var Zr=((e,{applyAnimatedValues:t=(()=>!1),createAnimatedStyle:r=(e=>new Lt(e)),getComponentProps:n=(e=>e)}={})=>{let i={applyAnimatedValues:t,createAnimatedStyle:r,getComponentProps:n},a=e=>{let t=zt(e)||"Anonymous";return(e=ee.str(e)?a[e]||(a[e]=kt(e,i)):e[Gt]||(e[Gt]=kt(e,i))).displayName=`Animated(${t})`,e};return ne(e,((t,r)=>{ee.arr(e)&&(r=zt(t)),a[r]=a(t)})),{animated:a}})(Jr,{applyAnimatedValues:o.applyProps}),$r=Zr.animated;const en={toVector:(e,t)=>(void 0===e&&(e=t),Array.isArray(e)?e:[e,e]),add:(e,t)=>[e[0]+t[0],e[1]+t[1]],sub:(e,t)=>[e[0]-t[0],e[1]-t[1]],addTo(e,t){e[0]+=t[0],e[1]+=t[1]},subTo(e,t){e[0]-=t[0],e[1]-=t[1]}};function tn(e,t,r){return 0===t||Math.abs(t)===1/0?Math.pow(e,5*r):e*t*r/(t+r*e)}function rn(e,t,r,n=.15){return 0===n?function(e,t,r){return Math.max(t,Math.min(e,r))}(e,t,r):er?+tn(e-r,r-t,n)+r:e}function nn(e){var t=function(e,t){if("object"!=typeof e||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!=typeof n)return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:String(t)}function an(e,t,r){return(t=nn(t))in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function sn(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function on(e){for(var t=1;t{var r,n;return t.target===e.currentTarget||(null===(r=e.currentTarget)||void 0===r||null===(n=r.contains)||void 0===n?void 0:n.call(r,t.target))}))}(e).map((e=>e.identifier))}function yn(e,t){const[r,n]=Array.from(e.touches).filter((e=>t.includes(e.identifier)));return vn(r,n)}function xn(e){const t=gn(e);return fn(e)?t.identifier:t.pointerId}function bn(e){const t=gn(e);return[t.clientX,t.clientY]}function wn(e){let{deltaX:t,deltaY:r,deltaMode:n}=e;return 1===n?(t*=40,r*=40):2===n&&(t*=800,r*=800),[t,r]}function En(e,...t){return"function"==typeof e?e(...t):e}function _n(){}function Sn(...e){return 0===e.length?_n:1===e.length?e[0]:function(){let t;for(const r of e)t=r.apply(this,arguments)||t;return t}}function Mn(e,t){return Object.assign({},t,e||{})}class Tn{constructor(e,t,r){this.ctrl=e,this.args=t,this.key=r,this.state||(this.state={},this.computeValues([0,0]),this.computeInitial(),this.init&&this.init(),this.reset())}get state(){return this.ctrl.state[this.key]}set state(e){this.ctrl.state[this.key]=e}get shared(){return this.ctrl.state.shared}get eventStore(){return this.ctrl.gestureEventStores[this.key]}get timeoutStore(){return this.ctrl.gestureTimeoutStores[this.key]}get config(){return this.ctrl.config[this.key]}get sharedConfig(){return this.ctrl.config.shared}get handler(){return this.ctrl.handlers[this.key]}reset(){const{state:e,shared:t,ingKey:r,args:n}=this;t[r]=e._active=e.active=e._blocked=e._force=!1,e._step=[!1,!1],e.intentional=!1,e._movement=[0,0],e._distance=[0,0],e._direction=[0,0],e._delta=[0,0],e._bounds=[[-1/0,1/0],[-1/0,1/0]],e.args=n,e.axis=void 0,e.memo=void 0,e.elapsedTime=e.timeDelta=0,e.direction=[0,0],e.distance=[0,0],e.overflow=[0,0],e._movementBound=[!1,!1],e.velocity=[0,0],e.movement=[0,0],e.delta=[0,0],e.timeStamp=0}start(e){const t=this.state,r=this.config;t._active||(this.reset(),this.computeInitial(),t._active=!0,t.target=e.target,t.currentTarget=e.currentTarget,t.lastOffset=r.from?En(r.from,t):t.offset,t.offset=t.lastOffset,t.startTime=t.timeStamp=e.timeStamp)}computeValues(e){const t=this.state;t._values=e,t.values=this.config.transform(e)}computeInitial(){const e=this.state;e._initial=e._values,e.initial=e.values}compute(e){const{state:t,config:r,shared:n}=this;t.args=this.args;let i=0;if(e&&(t.event=e,r.preventDefault&&e.cancelable&&t.event.preventDefault(),t.type=e.type,n.touches=this.ctrl.pointerIds.size||this.ctrl.touchIds.size,n.locked=!!document.pointerLockElement,Object.assign(n,function(e){const t={};if("buttons"in e&&(t.buttons=e.buttons),"shiftKey"in e){const{shiftKey:r,altKey:n,metaKey:i,ctrlKey:a}=e;Object.assign(t,{shiftKey:r,altKey:n,metaKey:i,ctrlKey:a})}return t}(e)),n.down=n.pressed=n.buttons%2==1||n.touches>0,i=e.timeStamp-t.timeStamp,t.timeStamp=e.timeStamp,t.elapsedTime=t.timeStamp-t.startTime),t._active){const e=t._delta.map(Math.abs);en.addTo(t._distance,e)}this.axisIntent&&this.axisIntent(e);const[a,s]=t._movement,[o,l]=r.threshold,{_step:c,values:u}=t;if(r.hasCustomTransform?(!1===c[0]&&(c[0]=Math.abs(a)>=o&&u[0]),!1===c[1]&&(c[1]=Math.abs(s)>=l&&u[1])):(!1===c[0]&&(c[0]=Math.abs(a)>=o&&Math.sign(a)*o),!1===c[1]&&(c[1]=Math.abs(s)>=l&&Math.sign(s)*l)),t.intentional=!1!==c[0]||!1!==c[1],!t.intentional)return;const h=[0,0];if(r.hasCustomTransform){const[e,t]=u;h[0]=!1!==c[0]?e-c[0]:0,h[1]=!1!==c[1]?t-c[1]:0}else h[0]=!1!==c[0]?a-c[0]:0,h[1]=!1!==c[1]?s-c[1]:0;this.restrictToAxis&&!t._blocked&&this.restrictToAxis(h);const d=t.offset,p=t._active&&!t._blocked||t.active;p&&(t.first=t._active&&!t.active,t.last=!t._active&&t.active,t.active=n[this.ingKey]=t._active,e&&(t.first&&("bounds"in r&&(t._bounds=En(r.bounds,t)),this.setup&&this.setup()),t.movement=h,this.computeOffset()));const[f,m]=t.offset,[[g,v],[A,y]]=t._bounds;t.overflow=[fv?1:0,my?1:0],t._movementBound[0]=!!t.overflow[0]&&(!1===t._movementBound[0]?t._movement[0]:t._movementBound[0]),t._movementBound[1]=!!t.overflow[1]&&(!1===t._movementBound[1]?t._movement[1]:t._movementBound[1]);const x=t._active&&r.rubberband||[0,0];if(t.offset=function(e,[t,r],[n,i]){const[[a,s],[o,l]]=e;return[rn(t,a,s,n),rn(r,o,l,i)]}(t._bounds,t.offset,x),t.delta=en.sub(t.offset,d),this.computeMovement(),p&&(!t.last||i>32)){t.delta=en.sub(t.offset,d);const e=t.delta.map(Math.abs);en.addTo(t.distance,e),t.direction=t.delta.map(Math.sign),t._direction=t._delta.map(Math.sign),!t.first&&i>0&&(t.velocity=[e[0]/i,e[1]/i],t.timeDelta=i)}}emit(){const e=this.state,t=this.shared,r=this.config;if(e._active||this.clean(),(e._blocked||!e.intentional)&&!e._force&&!r.triggerAllEvents)return;const n=this.handler(on(on(on({},t),e),{},{[this.aliasKey]:e.values}));void 0!==n&&(e.memo=n)}clean(){this.eventStore.clean(),this.timeoutStore.clean()}}class Cn extends Tn{constructor(...e){super(...e),an(this,"aliasKey","xy")}reset(){super.reset(),this.state.axis=void 0}init(){this.state.offset=[0,0],this.state.lastOffset=[0,0]}computeOffset(){this.state.offset=en.add(this.state.lastOffset,this.state.movement)}computeMovement(){this.state.movement=en.sub(this.state.offset,this.state.lastOffset)}axisIntent(e){const t=this.state,r=this.config;if(!t.axis&&e){const n="object"==typeof r.axisThreshold?r.axisThreshold[mn(e)]:r.axisThreshold;t.axis=function([e,t],r){const n=Math.abs(e),i=Math.abs(t);return n>i&&n>r?"x":i>n&&i>r?"y":void 0}(t._movement,n)}t._blocked=(r.lockDirection||!!r.axis)&&!t.axis||!!r.axis&&r.axis!==t.axis}restrictToAxis(e){if(this.config.axis||this.config.lockDirection)switch(this.state.axis){case"x":e[1]=0;break;case"y":e[0]=0}}}const In=e=>e,Bn={enabled:(e=!0)=>e,eventOptions:(e,t,r)=>on(on({},r.shared.eventOptions),e),preventDefault:(e=!1)=>e,triggerAllEvents:(e=!1)=>e,rubberband(e=0){switch(e){case!0:return[.15,.15];case!1:return[0,0];default:return en.toVector(e)}},from:e=>"function"==typeof e?e:null!=e?en.toVector(e):void 0,transform(e,t,r){const n=e||r.shared.transform;return this.hasCustomTransform=!!n,n||In},threshold:e=>en.toVector(e,0)};const Rn=on(on({},Bn),{},{axis(e,t,{axis:r}){if(this.lockDirection="lock"===r,!this.lockDirection)return r},axisThreshold:(e=0)=>e,bounds(e={}){if("function"==typeof e)return t=>Rn.bounds(e(t));if("current"in e)return()=>e.current;if("function"==typeof HTMLElement&&e instanceof HTMLElement)return e;const{left:t=-1/0,right:r=1/0,top:n=-1/0,bottom:i=1/0}=e;return[[t,r],[n,i]]}}),Pn={ArrowRight:(e,t=1)=>[e*t,0],ArrowLeft:(e,t=1)=>[-1*e*t,0],ArrowUp:(e,t=1)=>[0,-1*e*t],ArrowDown:(e,t=1)=>[0,e*t]};const Fn="undefined"!=typeof window&&window.document&&window.document.createElement;function Ln(){return Fn&&"ontouchstart"in window||Fn&&window.navigator.maxTouchPoints>1}const Dn={isBrowser:Fn,gesture:function(){try{return"constructor"in GestureEvent}catch(Ha){return!1}}(),touch:Ln(),touchscreen:Ln(),pointer:Fn&&"onpointerdown"in window,pointerLock:Fn&&"exitPointerLock"in window.document},Un={mouse:0,touch:0,pen:8},Nn=on(on({},Rn),{},{device(e,t,{pointer:{touch:r=!1,lock:n=!1,mouse:i=!1}={}}){return this.pointerLock=n&&Dn.pointerLock,Dn.touch&&r?"touch":this.pointerLock?"mouse":Dn.pointer&&!i?"pointer":Dn.touch?"touch":"mouse"},preventScrollAxis(e,t,{preventScroll:r}){if(this.preventScrollDelay="number"==typeof r?r:r||void 0===r&&e?250:void 0,Dn.touchscreen&&!1!==r)return e||(void 0!==r?"y":void 0)},pointerCapture(e,t,{pointer:{capture:r=!0,buttons:n=1,keys:i=!0}={}}){return this.pointerButtons=n,this.keys=i,!this.pointerLock&&"pointer"===this.device&&r},threshold(e,t,{filterTaps:r=!1,tapsThreshold:n=3,axis:i}){const a=en.toVector(e,r?n:i?1:0);return this.filterTaps=r,this.tapsThreshold=n,a},swipe({velocity:e=.5,distance:t=50,duration:r=250}={}){return{velocity:this.transform(en.toVector(e)),distance:this.transform(en.toVector(t)),duration:r}},delay(e=0){switch(e){case!0:return 180;case!1:return 0;default:return e}},axisThreshold:e=>e?on(on({},Un),e):Un,keyboardDisplacement:(e=10)=>e});function kn(e){const[t,r]=e.overflow,[n,i]=e._delta,[a,s]=e._direction;(t<0&&n>0&&a<0||t>0&&n<0&&a>0)&&(e._movement[0]=e._movementBound[0]),(r<0&&i>0&&s<0||r>0&&i<0&&s>0)&&(e._movement[1]=e._movementBound[1])}const On=on(on({},Bn),{},{device(e,t,{shared:r,pointer:{touch:n=!1}={}}){if(r.target&&!Dn.touch&&Dn.gesture)return"gesture";if(Dn.touch&&n)return"touch";if(Dn.touchscreen){if(Dn.pointer)return"pointer";if(Dn.touch)return"touch"}},bounds(e,t,{scaleBounds:r={},angleBounds:n={}}){const i=e=>{const t=Mn(En(r,e),{min:-1/0,max:1/0});return[t.min,t.max]},a=e=>{const t=Mn(En(n,e),{min:-1/0,max:1/0});return[t.min,t.max]};return"function"!=typeof r&&"function"!=typeof n?[i(),a()]:e=>[i(e),a(e)]},threshold(e,t,r){this.lockDirection="lock"===r.axis;return en.toVector(e,this.lockDirection?[.1,3]:0)},modifierKey:e=>void 0===e?"ctrlKey":e,pinchOnWheel:(e=!0)=>e});const Gn=on(on({},Rn),{},{mouseOnly:(e=!0)=>e});const zn=Rn;const Vn=Rn;const Hn=on(on({},Rn),{},{mouseOnly:(e=!0)=>e}),Qn=new Map,jn=new Map;function Wn(e){Qn.set(e.key,e.engine),jn.set(e.key,e.resolver)}const Xn={key:"drag",engine:class extends Cn{constructor(...e){super(...e),an(this,"ingKey","dragging")}reset(){super.reset();const e=this.state;e._pointerId=void 0,e._pointerActive=!1,e._keyboardActive=!1,e._preventScroll=!1,e._delayed=!1,e.swipe=[0,0],e.tap=!1,e.canceled=!1,e.cancel=this.cancel.bind(this)}setup(){const e=this.state;if(e._bounds instanceof HTMLElement){const t=e._bounds.getBoundingClientRect(),r=e.currentTarget.getBoundingClientRect(),n={left:t.left-r.left+e.offset[0],right:t.right-r.right+e.offset[0],top:t.top-r.top+e.offset[1],bottom:t.bottom-r.bottom+e.offset[1]};e._bounds=Rn.bounds(n)}}cancel(){const e=this.state;e.canceled||(e.canceled=!0,e._active=!1,setTimeout((()=>{this.compute(),this.emit()}),0))}setActive(){this.state._active=this.state._pointerActive||this.state._keyboardActive}clean(){this.pointerClean(),this.state._pointerActive=!1,this.state._keyboardActive=!1,super.clean()}pointerDown(e){const t=this.config,r=this.state;if(null!=e.buttons&&(Array.isArray(t.pointerButtons)?!t.pointerButtons.includes(e.buttons):-1!==t.pointerButtons&&t.pointerButtons!==e.buttons))return;const n=this.ctrl.setEventIds(e);t.pointerCapture&&e.target.setPointerCapture(e.pointerId),n&&n.size>1&&r._pointerActive||(this.start(e),this.setupPointer(e),r._pointerId=xn(e),r._pointerActive=!0,this.computeValues(bn(e)),this.computeInitial(),t.preventScrollAxis&&"mouse"!==mn(e)?(r._active=!1,this.setupScrollPrevention(e)):t.delay>0?(this.setupDelayTrigger(e),t.triggerAllEvents&&(this.compute(e),this.emit())):this.startPointerDrag(e))}startPointerDrag(e){const t=this.state;t._active=!0,t._preventScroll=!0,t._delayed=!1,this.compute(e),this.emit()}pointerMove(e){const t=this.state,r=this.config;if(!t._pointerActive)return;const n=xn(e);if(void 0!==t._pointerId&&n!==t._pointerId)return;const i=bn(e);return document.pointerLockElement===e.target?t._delta=[e.movementX,e.movementY]:(t._delta=en.sub(i,t._values),this.computeValues(i)),en.addTo(t._movement,t._delta),this.compute(e),t._delayed&&t.intentional?(this.timeoutStore.remove("dragDelay"),t.active=!1,void this.startPointerDrag(e)):r.preventScrollAxis&&!t._preventScroll?t.axis?t.axis===r.preventScrollAxis||"xy"===r.preventScrollAxis?(t._active=!1,void this.clean()):(this.timeoutStore.remove("startPointerDrag"),void this.startPointerDrag(e)):void 0:void this.emit()}pointerUp(e){this.ctrl.setEventIds(e);try{this.config.pointerCapture&&e.target.hasPointerCapture(e.pointerId)&&e.target.releasePointerCapture(e.pointerId)}catch(s){0}const t=this.state,r=this.config;if(!t._active||!t._pointerActive)return;const n=xn(e);if(void 0!==t._pointerId&&n!==t._pointerId)return;this.state._pointerActive=!1,this.setActive(),this.compute(e);const[i,a]=t._distance;if(t.tap=i<=r.tapsThreshold&&a<=r.tapsThreshold,t.tap&&r.filterTaps)t._force=!0;else{const[e,n]=t._delta,[i,a]=t._movement,[s,o]=r.swipe.velocity,[l,c]=r.swipe.distance,u=r.swipe.duration;if(t.elapsedTimes&&Math.abs(i)>l&&(t.swipe[0]=Math.sign(e)),u>o&&Math.abs(a)>c&&(t.swipe[1]=Math.sign(n))}}this.emit()}pointerClick(e){!this.state.tap&&e.detail>0&&(e.preventDefault(),e.stopPropagation())}setupPointer(e){const t=this.config,r=t.device;t.pointerLock&&e.currentTarget.requestPointerLock(),t.pointerCapture||(this.eventStore.add(this.sharedConfig.window,r,"change",this.pointerMove.bind(this)),this.eventStore.add(this.sharedConfig.window,r,"end",this.pointerUp.bind(this)),this.eventStore.add(this.sharedConfig.window,r,"cancel",this.pointerUp.bind(this)))}pointerClean(){this.config.pointerLock&&document.pointerLockElement===this.state.currentTarget&&document.exitPointerLock()}preventScroll(e){this.state._preventScroll&&e.cancelable&&e.preventDefault()}setupScrollPrevention(e){this.state._preventScroll=!1,function(e){"persist"in e&&"function"==typeof e.persist&&e.persist()}(e);const t=this.eventStore.add(this.sharedConfig.window,"touch","change",this.preventScroll.bind(this),{passive:!1});this.eventStore.add(this.sharedConfig.window,"touch","end",t),this.eventStore.add(this.sharedConfig.window,"touch","cancel",t),this.timeoutStore.add("startPointerDrag",this.startPointerDrag.bind(this),this.config.preventScrollDelay,e)}setupDelayTrigger(e){this.state._delayed=!0,this.timeoutStore.add("dragDelay",(()=>{this.state._step=[0,0],this.startPointerDrag(e)}),this.config.delay)}keyDown(e){const t=Pn[e.key];if(t){const r=this.state,n=e.shiftKey?10:e.altKey?.1:1;this.start(e),r._delta=t(this.config.keyboardDisplacement,n),r._keyboardActive=!0,en.addTo(r._movement,r._delta),this.compute(e),this.emit()}}keyUp(e){e.key in Pn&&(this.state._keyboardActive=!1,this.setActive(),this.compute(e),this.emit())}bind(e){const t=this.config.device;e(t,"start",this.pointerDown.bind(this)),this.config.pointerCapture&&(e(t,"change",this.pointerMove.bind(this)),e(t,"end",this.pointerUp.bind(this)),e(t,"cancel",this.pointerUp.bind(this)),e("lostPointerCapture","",this.pointerUp.bind(this))),this.config.keys&&(e("key","down",this.keyDown.bind(this)),e("key","up",this.keyUp.bind(this))),this.config.filterTaps&&e("click","",this.pointerClick.bind(this),{capture:!0,passive:!1})}},resolver:Nn},Yn={key:"hover",engine:class extends Cn{constructor(...e){super(...e),an(this,"ingKey","hovering")}enter(e){this.config.mouseOnly&&"mouse"!==e.pointerType||(this.start(e),this.computeValues(bn(e)),this.compute(e),this.emit())}leave(e){if(this.config.mouseOnly&&"mouse"!==e.pointerType)return;const t=this.state;if(!t._active)return;t._active=!1;const r=bn(e);t._movement=t._delta=en.sub(r,t._values),this.computeValues(r),this.compute(e),t.delta=t.movement,this.emit()}bind(e){e("pointer","enter",this.enter.bind(this)),e("pointer","leave",this.leave.bind(this))}},resolver:Hn},qn={key:"move",engine:class extends Cn{constructor(...e){super(...e),an(this,"ingKey","moving")}move(e){this.config.mouseOnly&&"mouse"!==e.pointerType||(this.state._active?this.moveChange(e):this.moveStart(e),this.timeoutStore.add("moveEnd",this.moveEnd.bind(this)))}moveStart(e){this.start(e),this.computeValues(bn(e)),this.compute(e),this.computeInitial(),this.emit()}moveChange(e){if(!this.state._active)return;const t=bn(e),r=this.state;r._delta=en.sub(t,r._values),en.addTo(r._movement,r._delta),this.computeValues(t),this.compute(e),this.emit()}moveEnd(e){this.state._active&&(this.state._active=!1,this.compute(e),this.emit())}bind(e){e("pointer","change",this.move.bind(this)),e("pointer","leave",this.moveEnd.bind(this))}},resolver:Gn},Kn={key:"pinch",engine:class extends Tn{constructor(...e){super(...e),an(this,"ingKey","pinching"),an(this,"aliasKey","da")}init(){this.state.offset=[1,0],this.state.lastOffset=[1,0],this.state._pointerEvents=new Map}reset(){super.reset();const e=this.state;e._touchIds=[],e.canceled=!1,e.cancel=this.cancel.bind(this),e.turns=0}computeOffset(){const{type:e,movement:t,lastOffset:r}=this.state;this.state.offset="wheel"===e?en.add(t,r):[(1+t[0])*r[0],t[1]+r[1]]}computeMovement(){const{offset:e,lastOffset:t}=this.state;this.state.movement=[e[0]/t[0],e[1]-t[1]]}axisIntent(){const e=this.state,[t,r]=e._movement;if(!e.axis){const n=30*Math.abs(t)-Math.abs(r);n<0?e.axis="angle":n>0&&(e.axis="scale")}}restrictToAxis(e){this.config.lockDirection&&("scale"===this.state.axis?e[1]=0:"angle"===this.state.axis&&(e[0]=0))}cancel(){const e=this.state;e.canceled||setTimeout((()=>{e.canceled=!0,e._active=!1,this.compute(),this.emit()}),0)}touchStart(e){this.ctrl.setEventIds(e);const t=this.state,r=this.ctrl.touchIds;if(t._active&&t._touchIds.every((e=>r.has(e))))return;if(r.size<2)return;this.start(e),t._touchIds=Array.from(r).slice(0,2);const n=yn(e,t._touchIds);n&&this.pinchStart(e,n)}pointerStart(e){if(null!=e.buttons&&e.buttons%2!=1)return;this.ctrl.setEventIds(e),e.target.setPointerCapture(e.pointerId);const t=this.state,r=t._pointerEvents,n=this.ctrl.pointerIds;if(t._active&&Array.from(r.keys()).every((e=>n.has(e))))return;if(r.size<2&&r.set(e.pointerId,e),t._pointerEvents.size<2)return;this.start(e);const i=vn(...Array.from(r.values()));i&&this.pinchStart(e,i)}pinchStart(e,t){this.state.origin=t.origin,this.computeValues([t.distance,t.angle]),this.computeInitial(),this.compute(e),this.emit()}touchMove(e){if(!this.state._active)return;const t=yn(e,this.state._touchIds);t&&this.pinchMove(e,t)}pointerMove(e){const t=this.state._pointerEvents;if(t.has(e.pointerId)&&t.set(e.pointerId,e),!this.state._active)return;const r=vn(...Array.from(t.values()));r&&this.pinchMove(e,r)}pinchMove(e,t){const r=this.state,n=r._values[1],i=t.angle-n;let a=0;Math.abs(i)>270&&(a+=Math.sign(i)),this.computeValues([t.distance,t.angle-360*a]),r.origin=t.origin,r.turns=a,r._movement=[r._values[0]/r._initial[0]-1,r._values[1]-r._initial[1]],this.compute(e),this.emit()}touchEnd(e){this.ctrl.setEventIds(e),this.state._active&&this.state._touchIds.some((e=>!this.ctrl.touchIds.has(e)))&&(this.state._active=!1,this.compute(e),this.emit())}pointerEnd(e){const t=this.state;this.ctrl.setEventIds(e);try{e.target.releasePointerCapture(e.pointerId)}catch(r){}t._pointerEvents.has(e.pointerId)&&t._pointerEvents.delete(e.pointerId),t._active&&t._pointerEvents.size<2&&(t._active=!1,this.compute(e),this.emit())}gestureStart(e){e.cancelable&&e.preventDefault();const t=this.state;t._active||(this.start(e),this.computeValues([e.scale,e.rotation]),t.origin=[e.clientX,e.clientY],this.compute(e),this.emit())}gestureMove(e){if(e.cancelable&&e.preventDefault(),!this.state._active)return;const t=this.state;this.computeValues([e.scale,e.rotation]),t.origin=[e.clientX,e.clientY];const r=t._movement;t._movement=[e.scale-1,e.rotation],t._delta=en.sub(t._movement,r),this.compute(e),this.emit()}gestureEnd(e){this.state._active&&(this.state._active=!1,this.compute(e),this.emit())}wheel(e){const t=this.config.modifierKey;t&&!e[t]||(this.state._active?this.wheelChange(e):this.wheelStart(e),this.timeoutStore.add("wheelEnd",this.wheelEnd.bind(this)))}wheelStart(e){this.start(e),this.wheelChange(e)}wheelChange(e){"uv"in e||e.cancelable&&e.preventDefault();const t=this.state;t._delta=[-wn(e)[1]/100*t.offset[0],0],en.addTo(t._movement,t._delta),kn(t),this.state.origin=[e.clientX,e.clientY],this.compute(e),this.emit()}wheelEnd(){this.state._active&&(this.state._active=!1,this.compute(),this.emit())}bind(e){const t=this.config.device;t&&(e(t,"start",this[t+"Start"].bind(this)),e(t,"change",this[t+"Move"].bind(this)),e(t,"end",this[t+"End"].bind(this)),e(t,"cancel",this[t+"End"].bind(this)),e("lostPointerCapture","",this[t+"End"].bind(this))),this.config.pinchOnWheel&&e("wheel","",this.wheel.bind(this),{passive:!1})}},resolver:On},Jn={key:"scroll",engine:class extends Cn{constructor(...e){super(...e),an(this,"ingKey","scrolling")}scroll(e){this.state._active||this.start(e),this.scrollChange(e),this.timeoutStore.add("scrollEnd",this.scrollEnd.bind(this))}scrollChange(e){e.cancelable&&e.preventDefault();const t=this.state,r=function(e){var t,r;const{scrollX:n,scrollY:i,scrollLeft:a,scrollTop:s}=e.currentTarget;return[null!==(t=null!=n?n:a)&&void 0!==t?t:0,null!==(r=null!=i?i:s)&&void 0!==r?r:0]}(e);t._delta=en.sub(r,t._values),en.addTo(t._movement,t._delta),this.computeValues(r),this.compute(e),this.emit()}scrollEnd(){this.state._active&&(this.state._active=!1,this.compute(),this.emit())}bind(e){e("scroll","",this.scroll.bind(this))}},resolver:zn},Zn={key:"wheel",engine:class extends Cn{constructor(...e){super(...e),an(this,"ingKey","wheeling")}wheel(e){this.state._active||this.start(e),this.wheelChange(e),this.timeoutStore.add("wheelEnd",this.wheelEnd.bind(this))}wheelChange(e){const t=this.state;t._delta=wn(e),en.addTo(t._movement,t._delta),kn(t),this.compute(e),this.emit()}wheelEnd(){this.state._active&&(this.state._active=!1,this.compute(),this.emit())}bind(e){e("wheel","",this.wheel.bind(this))}},resolver:Vn};function $n(e,t){if(null==e)return{};var r,n,i=function(e,t){if(null==e)return{};var r,n,i={},a=Object.keys(e);for(n=0;n=0||(i[r]=e[r]);return i}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(i[r]=e[r])}return i}const ei={target(e){if(e)return()=>"current"in e?e.current:e},enabled:(e=!0)=>e,window:(e=(Dn.isBrowser?window:void 0))=>e,eventOptions:({passive:e=!0,capture:t=!1}={})=>({passive:e,capture:t}),transform:e=>e},ti=["target","eventOptions","window","enabled","transform"];function ri(e={},t){const r={};for(const[n,i]of Object.entries(t))switch(typeof i){case"function":r[n]=i.call(r,e[n],n,e);break;case"object":r[n]=ri(e[n],i);break;case"boolean":i&&(r[n]=e[n])}return r}class ni{constructor(e,t){an(this,"_listeners",new Set),this._ctrl=e,this._gestureKey=t}add(e,t,r,n,i){const a=this._listeners,s=function(e,t=""){const r=ln[e];return e+(r&&r[t]||t)}(t,r),o=on(on({},this._gestureKey?this._ctrl.config[this._gestureKey].eventOptions:{}),i);e.addEventListener(s,n,o);const l=()=>{e.removeEventListener(s,n,o),a.delete(l)};return a.add(l),l}clean(){this._listeners.forEach((e=>e())),this._listeners.clear()}}class ii{constructor(){an(this,"_timeouts",new Map)}add(e,t,r=140,...n){this.remove(e),this._timeouts.set(e,window.setTimeout(t,r,...n))}remove(e){const t=this._timeouts.get(e);t&&window.clearTimeout(t)}clean(){this._timeouts.forEach((e=>{window.clearTimeout(e)})),this._timeouts.clear()}}class ai{constructor(e){an(this,"gestures",new Set),an(this,"_targetEventStore",new ni(this)),an(this,"gestureEventStores",{}),an(this,"gestureTimeoutStores",{}),an(this,"handlers",{}),an(this,"config",{}),an(this,"pointerIds",new Set),an(this,"touchIds",new Set),an(this,"state",{shared:{shiftKey:!1,metaKey:!1,ctrlKey:!1,altKey:!1}}),function(e,t){t.drag&&si(e,"drag");t.wheel&&si(e,"wheel");t.scroll&&si(e,"scroll");t.move&&si(e,"move");t.pinch&&si(e,"pinch");t.hover&&si(e,"hover")}(this,e)}setEventIds(e){return fn(e)?(this.touchIds=new Set(An(e)),this.touchIds):"pointerId"in e?("pointerup"===e.type||"pointercancel"===e.type?this.pointerIds.delete(e.pointerId):"pointerdown"===e.type&&this.pointerIds.add(e.pointerId),this.pointerIds):void 0}applyHandlers(e,t){this.handlers=e,this.nativeHandlers=t}applyConfig(e,t){this.config=function(e,t,r={}){const n=e,{target:i,eventOptions:a,window:s,enabled:o,transform:l}=n,c=$n(n,ti);if(r.shared=ri({target:i,eventOptions:a,window:s,enabled:o,transform:l},ei),t){const e=jn.get(t);r[t]=ri(on({shared:r.shared},c),e)}else for(const u in c){const e=jn.get(u);e&&(r[u]=ri(on({shared:r.shared},c[u]),e))}return r}(e,t,this.config)}clean(){this._targetEventStore.clean();for(const e of this.gestures)this.gestureEventStores[e].clean(),this.gestureTimeoutStores[e].clean()}effect(){return this.config.shared.target&&this.bind(),()=>this._targetEventStore.clean()}bind(...e){const t=this.config.shared,r={};let n;if(!t.target||(n=t.target(),n)){if(t.enabled){for(const t of this.gestures){const i=this.config[t],a=oi(r,i.eventOptions,!!n);if(i.enabled){new(Qn.get(t))(this,e,t).bind(a)}}const i=oi(r,t.eventOptions,!!n);for(const t in this.nativeHandlers)i(t,"",(r=>this.nativeHandlers[t](on(on({},this.state.shared),{},{event:r,args:e}))),void 0,!0)}for(const e in r)r[e]=Sn(...r[e]);if(!n)return r;for(const e in r){const{device:t,capture:i,passive:a}=pn(e);this._targetEventStore.add(n,t,"",r[e],{capture:i,passive:a})}}}}function si(e,t){e.gestures.add(t),e.gestureEventStores[t]=new ni(e,t),e.gestureTimeoutStores[t]=new ii}const oi=(e,t,r)=>(n,i,a,s={},o=!1)=>{var l,c;const u=null!==(l=s.capture)&&void 0!==l?l:t.capture,h=null!==(c=s.passive)&&void 0!==c?c:t.passive;let d=o?n:hn(n,i,u);r&&h&&(d+="Passive"),e[d]=e[d]||[],e[d].push(a)},li=/^on(Drag|Wheel|Scroll|Move|Pinch|Hover)/;function ci(e,t,r,n,i,a){if(!e.has(r))return;if(!Qn.has(n))return void 0;const s=r+"Start",o=r+"End";i[n]=e=>{let n;return e.first&&s in t&&t[s](e),r in t&&(n=t[r](e)),e.last&&o in t&&t[o](e),n},a[n]=a[n]||{}}function ui(e,t){const[r,n,i]=function(e){const t={},r={},n=new Set;for(let i in e)li.test(i)?(n.add(RegExp.lastMatch),r[i]=e[i]):t[i]=e[i];return[r,t,n]}(e),a={};return ci(i,r,"onDrag","drag",a,t),ci(i,r,"onWheel","wheel",a,t),ci(i,r,"onScroll","scroll",a,t),ci(i,r,"onPinch","pinch",a,t),ci(i,r,"onMove","move",a,t),ci(i,r,"onHover","hover",a,t),{handlers:a,config:t,nativeHandlers:n}}function hi(e,t={},r,n){const a=i.useMemo((()=>new ai(e)),[]);if(a.applyHandlers(e,n),a.applyConfig(t,r),i.useEffect(a.effect.bind(a)),i.useEffect((()=>a.clean.bind(a)),[]),void 0===t.target)return a.bind.bind(a)}function di(e,t){const r=([Xn,Kn,Jn,Zn,qn,Yn].forEach(Wn),function(e,t){const{handlers:r,nativeHandlers:n,config:i}=ui(e,t||{});return hi(r,i,void 0,n)});return r(e,t||{})}function pi({snap:e,global:t,cursor:r=!0,children:a,speed:l=1,rotation:c=[0,0,0],zoom:u=1,polar:h=[0,Math.PI/2],azimuth:d=[-1/0,1/0],config:p={mass:1,tension:170,friction:26}}){const{size:f,gl:m}=(0,o.useThree)(),g=i.useMemo((()=>[c[0]+h[0],c[0]+h[1]]),[c[0],h[0],h[1]]),v=i.useMemo((()=>[c[1]+d[0],c[1]+d[1]]),[c[1],d[0],d[1]]),A=i.useMemo((()=>[s.MathUtils.clamp(c[0],...g),s.MathUtils.clamp(c[1],...v),c[2]]),[c[0],c[1],c[2],g,v]),[y,x]=Wr((()=>({scale:1,rotation:A,config:p})));i.useEffect((()=>{x.start({scale:1,rotation:A,config:p})}),[A]),i.useEffect((()=>{t&&r&&(m.domElement.style.cursor="grab")}),[t,r,m.domElement]);const b=di({onHover:({last:e})=>{r&&!t&&(m.domElement.style.cursor=e?"auto":"grab")},onDrag:({down:t,delta:[n,i],memo:[a,o]=y.rotation.animation.to||A})=>{r&&(m.domElement.style.cursor=t?"grabbing":"grab"),n=s.MathUtils.clamp(o+n/f.width*Math.PI*l,...v),i=s.MathUtils.clamp(a+i/f.height*Math.PI*l,...g);const c=e&&!t&&"boolean"!=typeof e?e:p;return x.start({scale:t&&i>g[1]/2?u:1,rotation:e&&!t?A:[i,n,0],config:e=>"scale"===e?{...c,friction:3*c.friction}:c}),[i,n]}},{target:t?m.domElement:void 0});return i.createElement($r.group,n({},null==b?void 0:b(),y),a)}var fi=r(31480),mi=r(60374);const gi=i.createContext([]);function vi({box:e,multiple:t,children:r,onChange:a,border:l="1px solid #55aaff",backgroundColor:c="rgba(75, 160, 255, 0.1)",filter:u=(e=>e),...h}){const{camera:d,raycaster:p,gl:f,controls:m,size:g,get:v}=(0,o.useThree)(),[A,y]=i.useState(!1),[x,b]=i.useReducer(((e,{object:t,shift:r})=>void 0===t?[]:Array.isArray(t)?t:r?e.includes(t)?e.filter((e=>e!==t)):[t,...e]:e[0]===t?[]:[t]),[]);i.useEffect((()=>{null==a||a(x)}),[x]);const w=i.useCallback((e=>{e.stopPropagation(),b({object:u([e.object])[0],shift:t&&e.shiftKey})}),[]),E=i.useCallback((e=>!A&&b({})),[A]),_=i.useRef(null);return i.useEffect((()=>{if(!e||!t)return;const r=new fi.M(d,_.current),n=document.createElement("div");n.style.pointerEvents="none",n.style.border=l,n.style.backgroundColor=c,n.style.position="fixed";const i=new s.Vector2,a=new s.Vector2,o=new s.Vector2,h=p.enabled,A=null==m?void 0:m.enabled;let y=!1;function x(e,t){var r;const{offsetX:n,offsetY:i}=null!==(r=null==p.computeOffsets?void 0:p.computeOffsets(e,v()))&&void 0!==r?r:e,{width:a,height:s}=g;t.set(n/a*2-1,-i/s*2+1)}function w(e){e.shiftKey&&(!function(e){var t;m&&(m.enabled=!1),p.enabled=!1,y=!0,null==(t=f.domElement.parentElement)||t.appendChild(n),n.style.left=`${e.clientX}px`,n.style.top=`${e.clientY}px`,n.style.width="0px",n.style.height="0px",i.x=e.clientX,i.y=e.clientY}(e),x(e,r.startPoint))}let E=[];function S(e){if(y){!function(e){o.x=Math.max(i.x,e.clientX),o.y=Math.max(i.y,e.clientY),a.x=Math.min(i.x,e.clientX),a.y=Math.min(i.y,e.clientY),n.style.left=`${a.x}px`,n.style.top=`${a.y}px`,n.style.width=o.x-a.x+"px",n.style.height=o.y-a.y+"px"}(e),x(e,r.endPoint);const t=r.select().sort((e=>e.uuid)).filter((e=>e.isMesh));(0,mi.Z)(t,E)||(E=t,b({object:u(t)}))}}function M(e){var t;y&&y&&(m&&(m.enabled=A),p.enabled=h,y=!1,null==(t=n.parentElement)||t.removeChild(n))}return document.addEventListener("pointerdown",w,{passive:!0}),document.addEventListener("pointermove",S,{passive:!0,capture:!0}),document.addEventListener("pointerup",M,{passive:!0}),()=>{document.removeEventListener("pointerdown",w),document.removeEventListener("pointermove",S),document.removeEventListener("pointerup",M)}}),[g,p,d,m,f]),i.createElement("group",n({ref:_,onClick:w,onPointerOver:()=>y(!0),onPointerOut:()=>y(!1),onPointerMissed:E},h),i.createElement(gi.Provider,{value:x},r))}function Ai(){return i.useContext(gi)}const yi=i.forwardRef((function({follow:e=!0,lockX:t=!1,lockY:r=!1,lockZ:a=!1,...s},l){const c=i.useRef();return(0,o.useFrame)((({camera:n})=>{if(!e||!c.current)return;const i=c.current.rotation.clone();c.current.quaternion.copy(n.quaternion),t&&(c.current.rotation.x=i.x),r&&(c.current.rotation.y=i.y),a&&(c.current.rotation.z=i.z)})),i.createElement("group",n({ref:M([c,l])},s))}));var xi=r(43075),bi=r(10977),wi=r(51815);const Ei=i.forwardRef((function({points:e,color:t="black",vertexColors:r,lineWidth:a,dashed:o,...l},c){const[u]=i.useState((()=>new xi.w)),[h]=i.useState((()=>new bi.Y)),[d]=i.useState((()=>new s.Vector2(512,512))),p=i.useMemo((()=>{const t=new wi.L,n=e.map((e=>e instanceof s.Vector3?e.toArray():e));if(t.setPositions(n.flat()),r){const e=r.map((e=>e instanceof s.Color?e.toArray():e));t.setColors(e.flat())}return t}),[e,r]);return i.useLayoutEffect((()=>{u.computeLineDistances()}),[e,u]),i.useLayoutEffect((()=>{o?h.defines.USE_DASH="":delete h.defines.USE_DASH,h.needsUpdate=!0}),[o,h]),i.useEffect((()=>()=>p.dispose()),[p]),i.createElement("primitive",n({object:u,ref:c},l),i.createElement("primitive",{object:p,attach:"geometry"}),i.createElement("primitive",n({object:h,attach:"material",color:t,vertexColors:Boolean(r),resolution:d,linewidth:a,dashed:o},l)))})),_i=new s.Vector3,Si=i.forwardRef((function({start:e=[0,0,0],end:t=[0,0,0],mid:r,segments:a=20,...o},l){const c=i.useRef(null),[u]=i.useState((()=>new s.QuadraticBezierCurve3(void 0,void 0,void 0))),h=i.useCallback(((e,t,r,n=20)=>(e instanceof s.Vector3?u.v0.copy(e):u.v0.set(...e),t instanceof s.Vector3?u.v2.copy(t):u.v2.set(...t),r instanceof s.Vector3?u.v1.copy(r):u.v1.copy(u.v0.clone().add(u.v2.clone().sub(u.v0)).add(_i.set(0,u.v0.y-u.v2.y,0))),u.getPoints(n))),[]);i.useLayoutEffect((()=>{c.current.setPoints=(e,t,r)=>{const n=h(e,t,r);c.current.geometry&&c.current.geometry.setPositions(n.map((e=>e.toArray())).flat())}}),[]);const d=i.useMemo((()=>h(e,t,r,a)),[e,t,r,a]);return i.createElement(Ei,n({ref:M([c,l]),points:d},o))})),Mi=i.forwardRef((function({start:e,end:t,midA:r,midB:a,segments:o=20,...l},c){const u=i.useMemo((()=>{const n=e instanceof s.Vector3?e:new s.Vector3(...e),i=t instanceof s.Vector3?t:new s.Vector3(...t),l=r instanceof s.Vector3?r:new s.Vector3(...r),c=a instanceof s.Vector3?a:new s.Vector3(...a);return new s.CubicBezierCurve3(n,l,c,i).getPoints(o)}),[e,t,r,a,o]);return i.createElement(Ei,n({ref:c,points:u},l))})),Ti=i.forwardRef((({url:e,distance:t=1,loop:r=!0,autoplay:a,...l},c)=>{const u=i.useRef(),h=(0,o.useThree)((({camera:e})=>e)),[d]=i.useState((()=>new s.AudioListener)),p=(0,o.useLoader)(s.AudioLoader,e);return i.useEffect((()=>{const e=u.current;e&&(e.setBuffer(p),e.setRefDistance(t),e.setLoop(r),a&&!e.isPlaying&&e.play())}),[p,h,t,r]),i.useEffect((()=>{const e=u.current;return h.add(d),()=>{h.remove(d),e&&(e.isPlaying&&e.stop(),e.source&&e.source._connected&&e.disconnect())}}),[]),i.createElement("positionalAudio",n({ref:M([u,c]),args:[d]},l))}));function Ci(){var e,t=0,r=[],n=0,i=0;var a=d((function(e){i||o(1,e)})),s=d((function(e){i||o(-1,e)}));function o(r,n){i++;var a=0;try{n===m&&p();var s=r>0&&h(n);s?s.call(n,d((function(e){a++,o(1,e)})),d((function(e){a++,o(-1,e)}))):(t=r,e=n,l())}catch(Ha){t||a||o(-1,Ha)}}function l(){n||(setTimeout(c,0),n=1)}function c(){var e=r;n=0,r=[],e.forEach(u)}function u(e){e()}function h(e){var t=e&&(f(e)||"object"==typeof e)&&e.then;return f(t)&&t}function d(e){var t=0;return function(){for(var r=[],n=arguments.length;n--;)r[n]=arguments[n];t++||e.apply(this,r)}}function p(){throw new TypeError("Chaining cycle detected")}var f=function(e){return"function"==typeof e},m={then:function(n,i){var a=Ci();return r.push((function(){var r=t>0?n:i;if(f(r))try{var s=r(e);s===a&&p();var o=h(s);o?o.call(s,a.resolve,a.reject):a.resolve(s)}catch(l){a.reject(l)}else a[t>0?"resolve":"reject"](e)})),t&&l(),a},resolve:a,reject:s};return m}function Ii(){var e,t,r=new Promise((function(r,n){e=r,t=n}));return{then:r.then.bind(r),resolve:e,reject:t}}Ci.all=Ii.all=function(e){var t=0,r=[],n=Bi();return 0===e.length?n.resolve([]):e.forEach((function(i,a){var s=Bi();s.resolve(i),s.then((function(i){t++,r[a]=i,t===e.length&&n.resolve(r)}),n.reject)})),n};var Bi="function"==typeof Promise?Ii:Ci;function Ri(){var e=Object.create(null);function t(n,i){var a=n.id,s=n.name,o=n.dependencies;void 0===o&&(o=[]);var l=n.init;void 0===l&&(l=function(){});var c=n.getTransferables;if(void 0===c&&(c=null),!e[a])try{o=o.map((function(r){return r&&r.isWorkerModule&&(t(r,(function(e){if(e instanceof Error)throw e})),r=e[r.id].value),r})),l=r("<"+s+">.init",l),c&&(c=r("<"+s+">.getTransferables",c));var u=null;"function"==typeof l?u=l.apply(void 0,o):console.error("worker module init function failed to rehydrate"),e[a]={id:a,value:u,getTransferables:c},i(u)}catch(h){h&&h.noLog||console.error(h),i(h)}}function r(e,t){var r=void 0;self.troikaDefine=function(e){return r=e};var n=URL.createObjectURL(new Blob(["/** "+e.replace(/\*/g,"")+" **/\n\ntroikaDefine(\n"+t+"\n)"],{type:"application/javascript"}));try{importScripts(n)}catch(i){console.error(i)}return URL.revokeObjectURL(n),delete self.troikaDefine,r}self.addEventListener("message",(function(r){var n=r.data,i=n.messageId,a=n.action,s=n.data;try{"registerModule"===a&&t(s,(function(e){e instanceof Error?postMessage({messageId:i,success:!1,error:e.message}):postMessage({messageId:i,success:!0,result:{isCallable:"function"==typeof e}})})),"callModule"===a&&function(t,r){var n,i=t.id,a=t.args;e[i]&&"function"==typeof e[i].value||r(new Error("Worker module "+i+": not found or its 'init' did not return a function"));try{var s=(n=e[i]).value.apply(n,a);s&&"function"==typeof s.then?s.then(o,(function(e){return r(e instanceof Error?e:new Error(""+e))})):o(s)}catch(l){r(l)}function o(t){try{var n=e[i].getTransferables&&e[i].getTransferables(t);n&&Array.isArray(n)&&n.length||(n=void 0),r(t,n)}catch(l){console.error(l),r(l)}}}(s,(function(e,t){e instanceof Error?postMessage({messageId:i,success:!1,error:e.message}):postMessage({messageId:i,success:!0,result:e},t||void 0)}))}catch(o){postMessage({messageId:i,success:!1,error:o.stack})}}))}var Pi=function(){var e=!1;if("undefined"!=typeof window&&void 0!==window.document)try{new Worker(URL.createObjectURL(new Blob([""],{type:"application/javascript"}))).terminate(),e=!0}catch(t){console.log("Troika createWorkerModule: web workers not allowed; falling back to main thread execution. Cause: ["+t.message+"]")}return Pi=function(){return e},e},Fi=0,Li=0,Di=!1,Ui=Object.create(null),Ni=Object.create(null),ki=Object.create(null);function Oi(e){if(!(e&&"function"==typeof e.init||Di))throw new Error("requires `options.init` function");var t=e.dependencies,r=e.init,n=e.getTransferables,i=e.workerId;if(!Pi())return function(e){var t=function(){for(var e=[],r=arguments.length;r--;)e[r]=arguments[r];return t._getInitResult().then((function(t){if("function"==typeof t)return t.apply(void 0,e);throw new Error("Worker module function was called but `init` did not return a callable function")}))};return t._getInitResult=function(){var r=e.dependencies,n=e.init;r=Array.isArray(r)?r.map((function(e){return e&&e._getInitResult?e._getInitResult():e})):[];var i=Bi.all(r).then((function(e){return n.apply(null,e)}));return t._getInitResult=function(){return i},i},t}(e);null==i&&(i="#default");var a="workerModule"+ ++Fi,s=e.name||a,o=null;function l(){for(var e=[],t=arguments.length;t--;)e[t]=arguments[t];if(!o){o=zi(i,"registerModule",l.workerModuleData);var r=function(){o=null,Ni[i].delete(r)};(Ni[i]||(Ni[i]=new Set)).add(r)}return o.then((function(t){if(t.isCallable)return zi(i,"callModule",{id:a,args:e});throw new Error("Worker module function was called but `init` did not return a callable function")}))}return t=t&&t.map((function(e){return"function"!=typeof e||e.workerModuleData||(Di=!0,e=Oi({workerId:i,name:"<"+s+"> function dependency: "+e.name,init:"function(){return (\n"+Gi(e)+"\n)}"}),Di=!1),e&&e.workerModuleData&&(e=e.workerModuleData),e})),l.workerModuleData={isWorkerModule:!0,id:a,name:s,dependencies:t,init:Gi(r),getTransferables:n&&Gi(n)},l}function Gi(e){var t=e.toString();return!/^function/.test(t)&&/^\w+\s*\(/.test(t)&&(t="function "+t),t}function zi(e,t,r){var n=Bi(),i=++Li;return ki[i]=function(e){e.success?n.resolve(e.result):n.reject(new Error("Error in worker "+t+" call: "+e.error))},function(e){var t=Ui[e];if(!t){var r=Gi(Ri);(t=Ui[e]=new Worker(URL.createObjectURL(new Blob(["/** Worker Module Bootstrap: "+e.replace(/\*/g,"")+" **/\n\n;("+r+")()"],{type:"application/javascript"})))).onmessage=function(e){var t=e.data,r=t.messageId,n=ki[r];if(!n)throw new Error("WorkerModule response with empty or unknown messageId");delete ki[r],n(t)}}return t}(e).postMessage({messageId:i,action:t,data:r}),n}var Vi=Oi({name:"Thenable",dependencies:[Bi],init:function(e){return e}});function Hi(){var e=function(e){function t(e,t,r,n,i,a,s,o){var l=1-s;o.x=l*l*e+2*l*s*r+s*s*i,o.y=l*l*t+2*l*s*n+s*s*a}function r(e,t,r,n,i,a,s,o,l,c){var u=1-l;c.x=u*u*u*e+3*u*u*l*r+3*u*l*l*i+l*l*l*s,c.y=u*u*u*t+3*u*u*l*n+3*u*l*l*a+l*l*l*o}function n(e,t){for(var r,n,i,a,s,o=/([MLQCZ])([^MLQCZ]*)/g;r=o.exec(e);){var l=r[2].replace(/^\s*|\s*$/g,"").split(/[,\s]+/).map((function(e){return parseFloat(e)}));switch(r[1]){case"M":a=n=l[0],s=i=l[1];break;case"L":l[0]===a&&l[1]===s||t("L",a,s,a=l[0],s=l[1]);break;case"Q":t("Q",a,s,a=l[2],s=l[3],l[0],l[1]);break;case"C":t("C",a,s,a=l[4],s=l[5],l[0],l[1],l[2],l[3]);break;case"Z":a===n&&s===i||t("L",a,s,n,i)}}}function i(e,i,a){void 0===a&&(a=16);var s={x:0,y:0};n(e,(function(e,n,o,l,c,u,h,d,p){switch(e){case"L":i(n,o,l,c);break;case"Q":for(var f=n,m=o,g=1;g0;)n[i]=arguments[i+2];var a=o[t]||(o[t]=r.getUniformLocation(c,t));r["uniform"+e].apply(r,[a].concat(n))},setAttribute:function(e,t,n,a,o){var l=s[e];l||(l=s[e]={buf:r.createBuffer(),loc:r.getAttribLocation(c,e),data:null}),r.bindBuffer(r.ARRAY_BUFFER,l.buf),r.vertexAttribPointer(l.loc,t,r.FLOAT,!1,0,0),r.enableVertexAttribArray(l.loc),i?r.vertexAttribDivisor(l.loc,a):d("ANGLE_instanced_arrays").vertexAttribDivisorANGLE(l.loc,a),o!==l.data&&(r.bufferData(r.ARRAY_BUFFER,o,n),l.data=o)}})}}}l[e].transaction(a)}function m(e,t){u++;try{r.activeTexture(r.TEXTURE0+u);var n=c[e];n||(n=c[e]=r.createTexture(),r.bindTexture(r.TEXTURE_2D,n),r.texParameteri(r.TEXTURE_2D,r.TEXTURE_MIN_FILTER,r.NEAREST),r.texParameteri(r.TEXTURE_2D,r.TEXTURE_MAG_FILTER,r.NEAREST)),r.bindTexture(r.TEXTURE_2D,n),t(n,u)}finally{u--}}function g(e,t,n){var i=r.createFramebuffer();h.push(i),r.bindFramebuffer(r.FRAMEBUFFER,i),r.activeTexture(r.TEXTURE0+t),r.bindTexture(r.TEXTURE_2D,e),r.framebufferTexture2D(r.FRAMEBUFFER,r.COLOR_ATTACHMENT0,r.TEXTURE_2D,e,0);try{n(i)}finally{r.deleteFramebuffer(i),r.bindFramebuffer(r.FRAMEBUFFER,h[--h.length-1]||null)}}function v(){a={},l={},c={},u=-1,h.length=0}r.canvas.addEventListener("webglcontextlost",(function(e){v(),e.preventDefault()}),!1),s.set(r,n={gl:r,isWebGL2:i,getExtension:d,withProgram:f,withTexture:m,withTextureFramebuffer:g,handleContextLoss:v})}t(n)}function c(e,t,r,n,i,s,o,c){void 0===o&&(o=15),void 0===c&&(c=null),l(e,(function(e){var l=e.gl,u=e.withProgram;(0,e.withTexture)("copy",(function(e,h){l.texImage2D(l.TEXTURE_2D,0,l.RGBA,i,s,0,l.RGBA,l.UNSIGNED_BYTE,t),u("copy",a,"precision highp float;uniform sampler2D tex;varying vec2 vUV;void main(){gl_FragColor=texture2D(tex,vUV);}",(function(e){var t=e.setUniform;(0,e.setAttribute)("aUV",2,l.STATIC_DRAW,0,new Float32Array([0,0,2,0,0,2])),t("1i","image",h),l.bindFramebuffer(l.FRAMEBUFFER,c||null),l.disable(l.BLEND),l.colorMask(8&o,4&o,2&o,1&o),l.viewport(r,n,i,s),l.scissor(r,n,i,s),l.drawArrays(l.TRIANGLES,0,3)}))}))}))}var u=Object.freeze({__proto__:null,withWebGLContext:l,renderImageData:c,resizeWebGLCanvasWithoutClearing:function(e,t,r){var n=e.width,i=e.height;l(e,(function(a){var s=a.gl,o=new Uint8Array(n*i*4);s.readPixels(0,0,n,i,s.RGBA,s.UNSIGNED_BYTE,o),e.width=t,e.height=r,c(s,o,0,0,n,i)}))}});function h(e,t,r,n,a,s){void 0===s&&(s=1);var o=new Uint8Array(e*t),l=n[2]-n[0],c=n[3]-n[1],u=[];i(r,(function(e,t,r,n){u.push({x1:e,y1:t,x2:r,y2:n,minX:Math.min(e,r),minY:Math.min(t,n),maxX:Math.max(e,r),maxY:Math.max(t,n)})})),u.sort((function(e,t){return e.maxX-t.maxX}));for(var h=0;ha.minX&&t-na.minY){var s=f(e,t,a.x1,a.y1,a.x2,a.y2);st!=i.y2>t&&e<(i.x2-i.x1)*(t-i.y1)/(i.y2-i.y1)+i.x1&&(r+=i.y1p.y!=seg.w>p.y)&&(p.x<(seg.z-seg.x)*(p.y-seg.y)/(seg.w-seg.y)+seg.x);bool crossingUp=crossing&&vLineSegment.y"),c=l[0],u=l[1];c=String.fromCodePoint(n+=parseInt(c,36)),u=String.fromCodePoint(n+=parseInt(u,36)),i.set(c,u),t&&a.set(u,c)}})),{map:i,reverseMap:a}}function v(){if(!h){var e=g(f,!0),t=e.map,r=e.reverseMap;h=t,d=r,p=g(m,!1).map}}function A(e){return v(),h.get(e)||null}function y(e){return v(),d.get(e)||null}function x(e){return v(),p.get(e)||null}var b=r.L,w=r.R,E=r.EN,_=r.ES,S=r.ET,M=r.AN,T=r.CS,C=r.B,I=r.S,B=r.ON,R=r.BN,P=r.NSM,F=r.AL,L=r.LRO,D=r.RLO,U=r.LRE,N=r.RLE,k=r.PDF,O=r.LRI,G=r.RLI,z=r.FSI,V=r.PDI;var H;function Q(e){return function(){if(!H){var e=g("14>1,j>2,t>2,u>2,1a>g,2v3>1,1>1,1ge>1,1wd>1,b>1,1j>1,f>1,ai>3,-2>3,+1,8>1k0,-1jq>1y7,-1y6>1hf,-1he>1h6,-1h5>1ha,-1h8>1qi,-1pu>1,6>3u,-3s>7,6>1,1>1,f>1,1>1,+2,3>1,1>1,+13,4>1,1>1,6>1eo,-1ee>1,3>1mg,-1me>1mk,-1mj>1mi,-1mg>1mi,-1md>1,1>1,+2,1>10k,-103>1,1>1,4>1,5>1,1>1,+10,3>1,1>8,-7>8,+1,-6>7,+1,a>1,1>1,u>1,u6>1,1>1,+5,26>1,1>1,2>1,2>2,8>1,7>1,4>1,1>1,+5,b8>1,1>1,+3,1>3,-2>1,2>1,1>1,+2,c>1,3>1,1>1,+2,h>1,3>1,a>1,1>1,2>1,3>1,1>1,d>1,f>1,3>1,1a>1,1>1,6>1,7>1,13>1,k>1,1>1,+19,4>1,1>1,+2,2>1,1>1,+18,m>1,a>1,1>1,lk>1,1>1,4>1,2>1,f>1,3>1,1>1,+3,db>1,1>1,+3,3>1,1>1,+2,14qm>1,1>1,+1,6>1,4j>1,j>2,t>2,u>2,2>1,+1",!0),t=e.map;e.reverseMap.forEach((function(e,r){t.set(r,e)})),H=t}}(),H.get(e)||null}function j(e,t,r,n){var i=e.length;r=Math.max(0,null==r?0:+r),n=Math.min(i-1,null==n?i-1:+n);var a=[];return t.paragraphs.forEach((function(i){var s=Math.max(r,i.start),o=Math.min(n,i.end);if(s=s&&u(e[h])&l;h--)c[h]=i.level;for(var d=i.level,p=1/0,f=0;fd&&(d=m),m=p;g--)for(var v=0;v=g){for(var A=v;v+1=g;)v++;v>A&&a.push([A+r,v+r])}}})),a}function W(e,t,r,n){for(var i=j(e,t,r,n),a=[],s=0;s0)Y--;else if(K>0){for(q=0;!W[W.length-1]._isolate;)W.pop();var re=W[W.length-1]._isolInitIndex;null!=re&&(p.set(re,J),p.set(J,re)),W.pop(),K--}X=W[W.length-1],d[J]=X._level,X._override&&h(J,X._override)}else Z&k?(0===Y&&(q>0?q--:!X._isolate&&W.length>1&&(W.pop(),X=W[W.length-1])),d[J]=X._level):Z&C&&(d[J]=m.level);else d[J]=X._level,X._override&&Z!==R&&h(J,X._override)}for(var ne=[],ie=null,ae=m.start;ae<=m.end;ae++){var se=r[ae];if(!(se&o)){var oe=d[ae],le=se&i,ce=se===V;ie&&oe===ie._level?(ie._end=ae,ie._endsWithIsolInit=le):ne.push(ie={_start:ae,_end:ae,_level:oe,_startsWithPDI:ce,_endsWithIsolInit:le})}}for(var ue=[],he=0;he=0;we--)if(!(r[we]&o)){be=d[we];break}var Ee=ge[ge.length-1],_e=d[Ee],Se=m.level;if(!(r[Ee]&i))for(var Me=Ee+1;Me<=m.end;Me++)if(!(r[Me]&o)){Se=d[Me];break}ue.push({_seqIndices:ge,_sosType:Math.max(be,xe)%2?w:b,_eosType:Math.max(Se,_e)%2?w:b})}}for(var Te=0;Te=0;De--)if(!(r[Ie[De]]&o)){Le=r[Ie[De]];break}h(Fe,Le&(i|V)?B:Le)}}if(c.get(E))for(var Ue=0;Ue=-1;ke--){var Oe=-1===ke?Be:r[Ie[ke]];if(Oe&a){Oe===F&&h(Ne,M);break}}}if(c.get(F))for(var Ge=0;Ge=0&&(Qe=r[Ie[We]])&o;We--);for(var Xe=Ve+1;Xe=0&&r[Ie[Ke]]&(S|o);Ke--)h(Ie[Ke],E);for(var Je=Ye+1;Je=0&&r[Ie[et]]&o;et--)h(Ie[et],B);for(var tt=Ze+1;tt=0;pt--){var ft=ct[pt].char;if(ft===dt||ft===y(x(ht))||A(x(ft))===ht){lt.push([ct[pt].seqIndex,ut]),ct.length=pt;break}}}lt.sort((function(e,t){return e[0]-t[0]}));for(var mt=0;mt=0;_t--){var St=Ie[_t];if(r[St]&ot){var Mt=r[St]&st?w:b;xt=Mt!==Ht(St)?Mt:Ht(St);break}}}if(xt){if(r[Ie[vt]]=r[Ie[At]]=xt,xt!==Ht(Ie[vt]))for(var Tt=vt+1;Tt=0;Ft--){if(!(r[Ie[Ft]]&o)){Pt=r[Ie[Ft]]&st?w:b;break}Bt=Ft}for(var Lt=Re,Dt=It+1;Dt=0&&u(e[Gt])&l;Gt--)d[Gt]=m.level}}return{levels:d,paragraphs:f};function zt(t,n){for(var a=t;a/gm,(function(e,t){let r=s.ShaderChunk[t];return r?Wi(r):e}))}const Xi=[];for(let vh=0;vh<256;vh++)Xi[vh]=(vh<16?"0":"")+vh.toString(16);const Yi=Object.assign||function(){let e=arguments[0];for(let t=1,r=arguments.length;t/gm,"\n//!BEGIN_POST_CHUNK $1\n$&\n//!END_POST_CHUNK\n")));if(d){let r=d({vertexShader:e,fragmentShader:t});e=r.vertexShader,t=r.fragmentShader}if(h){let e=[];t=t.replace(/^\/\/!BEGIN_POST_CHUNK[^]+?^\/\/!END_POST_CHUNK/gm,(t=>(e.push(t),""))),u=`${h}\n${e.join("\n")}\n${u}`}if(p){const e=`\nuniform float ${p};\n`;i=e+i,l=e+l}o&&(i=`${i}\nvoid troikaVertexTransform${n}(inout vec3 position, inout vec3 normal, inout vec2 uv) {\n ${o}\n}\n`,a=`\ntroika_position_${n} = vec3(position);\ntroika_normal_${n} = vec3(normal);\ntroika_uv_${n} = vec2(uv);\ntroikaVertexTransform${n}(troika_position_${n}, troika_normal_${n}, troika_uv_${n});\n${a}\n`,e=(e=`vec3 troika_position_${n};\nvec3 troika_normal_${n};\nvec2 troika_uv_${n};\n${e}\n`).replace(/\b(position|normal|uv)\b/g,((e,t,r,i)=>/\battribute\s+vec[23]\s+$/.test(i.substr(0,r))?t:`troika_${t}_${n}`)));return e=ea(e,n,i,a,s),t=ea(t,n,l,c,u),{vertexShader:e,fragmentShader:t}}(n,t,r);s=Ji[a]=e}n.vertexShader=s.vertexShader,n.fragmentShader=s.fragmentShader,Yi(n.uniforms,this.uniforms),t.timeUniform&&(n.uniforms[t.timeUniform]={get value(){return Date.now()-qi}}),this[i]&&this[i](n)},o=function(){return l(t.chained?e:e.clone())},l=function(n){const i=Object.create(n,c);return Object.defineProperty(i,"baseMaterial",{value:e}),Object.defineProperty(i,"id",{value:Zi++}),i.uuid=function(){const e=4294967295*Math.random()|0,t=4294967295*Math.random()|0,r=4294967295*Math.random()|0,n=4294967295*Math.random()|0;return(Xi[255&e]+Xi[e>>8&255]+Xi[e>>16&255]+Xi[e>>24&255]+"-"+Xi[255&t]+Xi[t>>8&255]+"-"+Xi[t>>16&15|64]+Xi[t>>24&255]+"-"+Xi[63&r|128]+Xi[r>>8&255]+"-"+Xi[r>>16&255]+Xi[r>>24&255]+Xi[255&n]+Xi[n>>8&255]+Xi[n>>16&255]+Xi[n>>24&255]).toUpperCase()}(),i.uniforms=Yi({},n.uniforms,t.uniforms),i.defines=Yi({},n.defines,t.defines),i.defines[`TROIKA_DERIVED_MATERIAL_${r}`]="",i.extensions=Yi({},n.extensions,t.extensions),i._listeners=void 0,i},c={constructor:{value:o},isDerivedMaterial:{value:!0},customProgramCacheKey:{writable:!0,configurable:!0,value:function(){return e.customProgramCacheKey()+"|"+r}},onBeforeCompile:{get:()=>a,set(e){this[i]=e}},copy:{writable:!0,configurable:!0,value:function(t){return e.copy.call(this,t),e.isShaderMaterial||e.isDerivedMaterial||(Yi(this.extensions,t.extensions),Yi(this.defines,t.defines),Yi(this.uniforms,s.UniformsUtils.clone(t.uniforms))),this}},clone:{writable:!0,configurable:!0,value:function(){const t=new e.constructor;return l(t).copy(this)}},getDepthMaterial:{writable:!0,configurable:!0,value:function(){let r=this._depthMaterial;return r||(r=this._depthMaterial=$i(e.isDerivedMaterial?e.getDepthMaterial():new s.MeshDepthMaterial({depthPacking:s.RGBADepthPacking}),t),r.defines.IS_DEPTH_MATERIAL="",r.uniforms=this.uniforms),r}},getDistanceMaterial:{writable:!0,configurable:!0,value:function(){let r=this._distanceMaterial;return r||(r=this._distanceMaterial=$i(e.isDerivedMaterial?e.getDistanceMaterial():new s.MeshDistanceMaterial,t),r.defines.IS_DISTANCE_MATERIAL="",r.uniforms=this.uniforms),r}},dispose:{writable:!0,configurable:!0,value(){const{_depthMaterial:t,_distanceMaterial:r}=this;t&&t.dispose(),r&&r.dispose(),e.dispose.call(this)}}};return n[r]=o,new o}function ea(e,t,r,n,i){return(n||i||r)&&(e=e.replace(ji,`\n${r}\nvoid troikaOrigMain${t}() {`),e+=`\nvoid main() {\n ${n}\n troikaOrigMain${t}();\n ${i}\n}`),e}function ta(e,t){return"uniforms"===e?void 0:"function"==typeof t?t.toString():t}let ra=0;const na=new Map;s.DoubleSide;const ia=()=>(self.performance||Date).now(),aa=Hi();let sa;const oa=function(){const e=[];let t=0;function r(){const n=ia();for(;e.length&&ia()-n<5;)e.shift()();t=e.length?setTimeout(r,0):0}return(...n)=>{const i=Bi();return e.push((()=>{const e=ia();try{aa.webgl.generateIntoCanvas(...n),i.resolve({timing:ia()-e})}catch(t){i.reject(t)}})),t||(t=setTimeout(r,0)),i}}(),la=function(){const e={};let t=0;return function(r,n,i,a,s,o,l,c,u,h){const d="TroikaTextSDFGenerator_JS_"+t++%4;let p=e[d];return p||(p=e[d]={workerModule:Oi({name:d,workerId:d,dependencies:[Hi,ia],init(e,t){const r=e().javascript.generate;return function(...e){const n=t();return{textureData:r(...e),timing:t()-n}}},getTransferables:e=>[e.textureData.buffer]}),requests:0,idleTimer:null}),p.requests++,clearTimeout(p.idleTimer),p.workerModule(r,n,i,a,s,o).then((({textureData:e,timing:t})=>{const i=ia(),a=new Uint8Array(4*e.length);for(let r=0;r{!function(e){Ni[e]&&Ni[e].forEach((function(e){e()})),Ui[e]&&(Ui[e].terminate(),delete Ui[e])}(d)}),2e3)),{timing:t}}))}}();const ca=aa.webglUtils.resizeWebGLCanvasWithoutClearing;const ua=Oi({name:"Typr Font Parser",dependencies:[function(){return"undefined"==typeof window&&(self.window=self),function(e){var t={parse:function(e){var r=t._bin,n=new Uint8Array(e);if("ttcf"==r.readASCII(n,0,4)){var i=4;r.readUshort(n,i),i+=2,r.readUshort(n,i),i+=2;var a=r.readUint(n,i);i+=4;for(var s=[],o=0;o>>r&1)&&t++;return t},t._lctf.readClassDef=function(e,r){var n=t._bin,i=[],a=n.readUshort(e,r);if(r+=2,1==a){var s=n.readUshort(e,r);r+=2;var o=n.readUshort(e,r);r+=2;for(var l=0;l0&&(a.featureParams=i+s);var o=n.readUshort(e,r);r+=2,a.tab=[];for(var l=0;l255?-1:t.CFF.glyphByUnicode(e,t.CFF.tableSE[r])},t.CFF.readEncoding=function(e,r,n){t._bin;var i=[".notdef"],a=e[r];if(r++,0!=a)throw"error: unknown encoding format: "+a;var s=e[r];r++;for(var o=0;o>4,m=15&p;if(15!=f&&d.push(f),15!=m&&d.push(m),15==m)break}for(var g="",v=[0,1,2,3,4,5,6,7,8,9,".","e","e-","reserved","-","endOfNumber"],A=0;A=s.xMax||s.yMin>=s.yMax)return null;if(s.noc>0){s.endPts=[];for(var o=0;o=1&&o.fmt<=2){c=a.readUshort(e,n),n+=2;var h=a.readUshort(e,n);n+=2,u=t._lctf.numOfOnes(c);var d=t._lctf.numOfOnes(h);if(1==o.fmt){o.pairsets=[];var p=a.readUshort(e,n);n+=2;for(var f=0;f=1&&o.fmt<=2){if(1==o.fmt)o.delta=a.readShort(e,n),n+=2;else if(2==o.fmt){var c=a.readUshort(e,n);n+=2,o.newg=a.readUshorts(e,n,c),n+=2*o.newg.length}}else if(4==r){o.vals=[],c=a.readUshort(e,n),n+=2;for(var u=0;u>>8;if(0!=(h&=15))throw"unknown kern table format: "+h;r=t.kern.readFormat0(e,r,l)}return l},t.kern.parseV1=function(e,r,n,i){var a=t._bin;a.readFixed(e,r),r+=4;var s=a.readUint(e,r);r+=4;for(var o={glyph1:[],rval:[]},l=0;l>>8;if(0!=(u&=15))throw"unknown kern table format: "+u;r=t.kern.readFormat0(e,r,o)}return o},t.kern.readFormat0=function(e,r,n){var i=t._bin,a=-1,s=i.readUshort(e,r);r+=2,i.readUshort(e,r),r+=2,i.readUshort(e,r),r+=2,i.readUshort(e,r),r+=2;for(var o=0;o=i.map.length?0:i.map[t];if(4==i.format){for(var a=-1,s=0;st?0:65535&(0!=i.idRangeOffset[a]?i.glyphIdArray[t-i.startCount[a]+(i.idRangeOffset[a]>>1)-(i.idRangeOffset.length-a)]:t+i.idDelta[a])}if(12==i.format){if(t>i.groups[i.groups.length-1][1])return 0;for(s=0;s-1?t.U._simpleGlyph(i,n):t.U._compoGlyph(i,r,n))},t.U._simpleGlyph=function(e,r){for(var n=0;na)){for(var p=!0,f=0,m=0;ma)){for(p=!0,m=0;m>1,s.length=0,l=!0;else if("o3"==S||"o23"==S)s.length%2!=0&&!l&&(c=s.shift()+i.nominalWidthX),o+=s.length>>1,s.length=0,l=!0;else if("o4"==S)s.length>1&&!l&&(c=s.shift()+i.nominalWidthX,l=!0),u&&t.U.P.closePath(a),p+=s.pop(),t.U.P.moveTo(a,d,p),u=!0;else if("o5"==S)for(;s.length>0;)d+=s.shift(),p+=s.shift(),t.U.P.lineTo(a,d,p);else if("o6"==S||"o7"==S)for(var M=s.length,T="o6"==S,C=0;CMath.abs(b-p)?d=x+s.shift():p=b+s.shift(),t.U.P.curveTo(a,f,m,g,v,w,E),t.U.P.curveTo(a,A,y,x,b,d,p));else if("o14"==S){if(s.length>0&&!l&&(c=s.shift()+n.nominalWidthX,l=!0),4==s.length){var R=s.shift(),P=s.shift(),F=s.shift(),L=s.shift(),D=t.CFF.glyphBySE(n,F),U=t.CFF.glyphBySE(n,L);t.U._drawCFF(n.CharStrings[D],r,n,i,a),r.x=R,r.y=P,t.U._drawCFF(n.CharStrings[U],r,n,i,a)}u&&(t.U.P.closePath(a),u=!1)}else if("o19"==S||"o20"==S)s.length%2!=0&&!l&&(c=s.shift()+i.nominalWidthX),o+=s.length>>1,s.length=0,l=!0,h+=o+7>>3;else if("o21"==S)s.length>2&&!l&&(c=s.shift()+i.nominalWidthX,l=!0),p+=s.pop(),d+=s.pop(),u&&t.U.P.closePath(a),t.U.P.moveTo(a,d,p),u=!0;else if("o22"==S)s.length>1&&!l&&(c=s.shift()+i.nominalWidthX,l=!0),d+=s.pop(),u&&t.U.P.closePath(a),t.U.P.moveTo(a,d,p),u=!0;else if("o25"==S){for(;s.length>6;)d+=s.shift(),p+=s.shift(),t.U.P.lineTo(a,d,p);f=d+s.shift(),m=p+s.shift(),g=f+s.shift(),v=m+s.shift(),d=g+s.shift(),p=v+s.shift(),t.U.P.curveTo(a,f,m,g,v,d,p)}else if("o26"==S)for(s.length%2&&(d+=s.shift());s.length>0;)f=d,m=p+s.shift(),d=g=f+s.shift(),p=(v=m+s.shift())+s.shift(),t.U.P.curveTo(a,f,m,g,v,d,p);else if("o27"==S)for(s.length%2&&(p+=s.shift());s.length>0;)m=p,g=(f=d+s.shift())+s.shift(),v=m+s.shift(),d=g+s.shift(),p=v,t.U.P.curveTo(a,f,m,g,v,d,p);else if("o10"==S||"o29"==S){var N="o10"==S?i:n;if(0==s.length)console.debug("error: empty stack");else{var k=s.pop(),O=N.Subrs[k+N.Bias];r.x=d,r.y=p,r.nStems=o,r.haveWidth=l,r.width=c,r.open=u,t.U._drawCFF(O,r,n,i,a),d=r.x,p=r.y,o=r.nStems,l=r.haveWidth,c=r.width,u=r.open}}else if("o30"==S||"o31"==S){var G=s.length,z=(B=0,"o31"==S);for(B+=G-(M=-3&G);B>>1|(21845&p)<<1;f=(61680&(f=(52428&f)>>>2|(13107&f)<<2))>>>4|(3855&f)<<4,d[p]=((65280&f)>>>8|(255&f)<<8)>>>1}var m=function(e,t,n){for(var i=e.length,a=0,s=new r(t);a>>c]=u}else for(o=new r(i),a=0;a>>15-e[a]);return o},g=new t(288);for(p=0;p<144;++p)g[p]=8;for(p=144;p<256;++p)g[p]=9;for(p=256;p<280;++p)g[p]=7;for(p=280;p<288;++p)g[p]=8;var v=new t(32);for(p=0;p<32;++p)v[p]=5;var A=m(g,9,1),y=m(v,5,1),x=function(e){for(var t=e[0],r=1;rt&&(t=e[r]);return t},b=function(e,t,r){var n=t/8|0;return(e[n]|e[n+1]<<8)>>(7&t)&r},w=function(e,t){var r=t/8|0;return(e[r]|e[r+1]<<8|e[r+2]<<16)>>(7&t)},E=["unexpected EOF","invalid block type","invalid length/literal","invalid distance","stream finished","no stream handler",,"no callback","invalid UTF-8 data","extra field too long","date not in range 1980-2099","filename too long","stream finishing","invalid zip data"],_=function(e,t,r){var n=new Error(t||E[e]);if(n.code=e,Error.captureStackTrace&&Error.captureStackTrace(n,_),!r)throw n;return n},S=function(e,o,l){var u=e.length;if(!u||l&&!l.l&&u<5)return o||new t(0);var d=!o||l,p=!l||l.i;l||(l={}),o||(o=new t(3*u));var f,g=function(e){var r=o.length;if(e>r){var n=new t(Math.max(2*r,e));n.set(o),o=n}},v=l.f||0,E=l.p||0,S=l.b||0,M=l.l,T=l.d,C=l.m,I=l.n,B=8*u;do{if(!M){l.f=v=b(e,E,1);var R=b(e,E+1,3);if(E+=3,!R){var P=e[(H=((f=E)/8|0)+(7&f&&1)+4)-4]|e[H-3]<<8,F=H+P;if(F>u){p&&_(0);break}d&&g(S+P),o.set(e.subarray(H,F),S),l.b=S+=P,l.p=E=8*F;continue}if(1==R)M=A,T=y,C=9,I=5;else if(2==R){var L=b(e,E,31)+257,D=b(e,E+10,15)+4,U=L+b(e,E+5,31)+1;E+=14;for(var N=new t(U),k=new t(19),O=0;O>>4)<16)N[O++]=H;else{var j=0,W=0;for(16==H?(W=3+b(e,E,3),E+=2,j=N[O-1]):17==H?(W=3+b(e,E,7),E+=3):18==H&&(W=11+b(e,E,127),E+=7);W--;)N[O++]=j}}var X=N.subarray(0,L),Y=N.subarray(L);C=x(X),I=x(Y),M=m(X,C,1),T=m(Y,I,1)}else _(1);if(E>B){p&&_(0);break}}d&&g(S+131072);for(var q=(1<>>4;if((E+=15&j)>B){p&&_(0);break}if(j||_(2),Z<256)o[S++]=Z;else{if(256==Z){J=E,M=null;break}var $=Z-254;if(Z>264){var ee=i[O=Z-257];$=b(e,E,(1<>>4;if(te||_(3),E+=15&te,Y=h[re],re>3&&(ee=a[re],Y+=w(e,E)&(1<B){p&&_(0);break}d&&g(S+131072);for(var ne=S+$;Se.length)&&(a=e.length);var s=new(e instanceof r?r:e instanceof n?n:t)(a-i);return s.set(e.subarray(i,a)),s}(o,0,S)},M=new t(0),T="undefined"!=typeof TextDecoder&&new TextDecoder;try{T.decode(M,{stream:!0})}catch(e){}return e.convert_streams=function(e){var t=new DataView(e),r=0;function n(){var e=t.getUint16(r);return r+=2,e}function i(){var e=t.getUint32(r);return r+=4,e}function a(e){v.setUint16(A,e),A+=2}function s(e){v.setUint32(A,e),A+=4}for(var o={signature:i(),flavor:i(),length:i(),numTables:n(),reserved:n(),totalSfntSize:i(),majorVersion:n(),minorVersion:n(),metaOffset:i(),metaLength:i(),metaOrigLength:i(),privOffset:i(),privLength:i()},l=0;Math.pow(2,l)<=o.numTables;)l++;l--;for(var c=16*Math.pow(2,l),u=16*o.numTables-c,h=12,d=[],p=0;p{let[a,s]=n.split("+");a=parseInt(a,36),s=s?parseInt(s,36):0,i.set(r+=a,e[t]);for(let o=s;o--;)i.set(++r,e[t])}))}}return i.get(e)||32}const s=[null,"isol","init","fina","medi"];function o(e){const t=new Uint8Array(e.length);let r=32,n=1,i=-1;for(let s=0;s65535&&s++)}return t}function l(t){const n=Object.create(null),i={unitsPerEm:t.head.unitsPerEm,ascender:t.hhea.ascender,descender:t.hhea.descender,forEachGlyph(a,l,c,u){let h=0;const d=1/i.unitsPerEm*l,p=function(t,r){const n=[];for(let a=0;a65535&&a++,n.push(e.U.codeToGlyph(t,i))}const i=t.GSUB;if(i){const{lookupList:t,featureList:a}=i;let l;const c=/^(rlig|liga|mset|isol|init|fina|medi|half|pres|blws)$/,u=[];a.forEach((i=>{if(c.test(i.tag))for(let a=0;a{if(-1!==i){let a=n[i];if(!a){const{cmds:s,crds:o}=e.U.glyphToPath(t,i);let l,c,u,h,d="",p=0;for(let e=0,t=s.length;e1?",":"")+o[p++]}if(o.length){l=c=1/0,u=h=-1/0;for(let e=0,t=o.length;eu&&(u=t),r>h&&(h=r)}}else l=u=c=h=0;a=n[i]={index:i,advanceWidth:t.hmtx.aWidth[i],xMin:l,yMin:c,xMax:u,yMax:h,path:d,pathCommandCount:s.length}}-1!==m&&(h+=e.U.getPairAdjustment(t,m,i)*d),u.call(null,a,h,f),a.advanceWidth&&(h+=a.advanceWidth*d),c&&(h+=c*l),m=i}f+=a.codePointAt(f)>65535?2:1})),h}};return i}return function(r){const n=new Uint8Array(r,0,4),i=e._bin.readASCII(n,0,4);if("wOFF"===i)r=t(r);else if("wOF2"===i)throw new Error("woff2 fonts not supported");return l(e.parse(r)[0])}}],init:(e,t,r)=>r(e(),t())}),ha={defaultFontURL:"https://fonts.gstatic.com/s/roboto/v18/KFOmCnqEu92Fr1Mu4mxM.woff",sdfGlyphSize:64,sdfMargin:1/16,sdfExponent:9,textureWidth:2048},da=new s.Color;let pa=!1;function fa(){return(self.performance||Date).now()}const ma=Object.create(null);function ga(e,t){pa=!0,e=Aa({},e);const r=fa();if(e.font=function(e){ya||(ya="undefined"==typeof document?{}:document.createElement("a"));return ya.href=e,ya.href}(e.font||ha.defaultFontURL),e.text=""+e.text,e.sdfGlyphSize=e.sdfGlyphSize||ha.sdfGlyphSize,null!=e.colorRanges){let t={};for(let r in e.colorRanges)if(e.colorRanges.hasOwnProperty(r)){let n=e.colorRanges[r];"number"!=typeof n&&(n=da.set(n).getHex()),t[r]=n}e.colorRanges=t}Object.freeze(e);const{textureWidth:n,sdfExponent:i}=ha,{sdfGlyphSize:a}=e,o=n/a*4;let l=ma[a];if(!l){const e=document.createElement("canvas");e.width=n,e.height=256*a/o,l=ma[a]={glyphCount:0,sdfGlyphSize:a,sdfCanvas:e,sdfTexture:new s.Texture(e,void 0,void 0,void 0,s.LinearFilter,s.LinearFilter),contextLost:!1,glyphsByFont:new Map},l.sdfTexture.generateMipmaps=!1,function(e){const t=e.sdfCanvas;t.addEventListener("webglcontextlost",(t=>{console.log("Context Lost",t),t.preventDefault(),e.contextLost=!0})),t.addEventListener("webglcontextrestored",(t=>{console.log("Context Restored",t),e.contextLost=!1;const r=[];e.glyphsByFont.forEach((t=>{t.forEach((t=>{r.push(va(t,e,!0))}))})),Bi.all(r).then((()=>{xa(e),e.sdfTexture.needsUpdate=!0}))}))}(l)}const{sdfTexture:c,sdfCanvas:u}=l;let h=l.glyphsByFont.get(e.font);h||l.glyphsByFont.set(e.font,h=new Map),wa(e).then((s=>{const{glyphIds:d,glyphPositions:p,fontSize:f,unitsPerEm:m,timings:g}=s,v=[],A=new Float32Array(4*d.length),y=f/m;let x=0,b=0;const w=fa();d.forEach(((e,t)=>{let r=h.get(e);if(!r){const{path:t,pathBounds:n}=s.glyphData[e],i=Math.max(n[2]-n[0],n[3]-n[1])/a*(ha.sdfMargin*a+.5),o=l.glyphCount++,c=[n[0]-i,n[1]-i,n[2]+i,n[3]+i];h.set(e,r={path:t,atlasIndex:o,sdfViewBox:c}),v.push(r)}const{sdfViewBox:n}=r,i=p[b++],o=p[b++];A[x++]=i+n[0]*y,A[x++]=o+n[1]*y,A[x++]=i+n[2]*y,A[x++]=o+n[3]*y,d[t]=r.atlasIndex})),g.quads=(g.quads||0)+(fa()-w);const E=fa();g.sdf={};const _=u.height,S=Math.ceil(l.glyphCount/o),M=Math.pow(2,Math.ceil(Math.log2(S*a)));M>_&&(console.info(`Increasing SDF texture size ${_}->${M}`),ca(u,n,M),c.dispose()),Bi.all(v.map((t=>va(t,l,e.gpuAccelerateSDF).then((({timing:e})=>{g.sdf[t.atlasIndex]=e}))))).then((()=>{v.length&&!l.contextLost&&(xa(l),c.needsUpdate=!0),g.sdfTotal=fa()-E,g.total=fa()-r,t(Object.freeze({parameters:e,sdfTexture:c,sdfGlyphSize:a,sdfExponent:i,glyphBounds:A,glyphAtlasIndices:d,glyphColors:s.glyphColors,caretPositions:s.caretPositions,caretHeight:s.caretHeight,chunkedBounds:s.chunkedBounds,ascender:s.ascender,descender:s.descender,lineHeight:s.lineHeight,topBaseline:s.topBaseline,blockBounds:s.blockBounds,visibleBounds:s.visibleBounds,timings:s.timings,get totalBounds(){return console.log("totalBounds deprecated, use blockBounds instead"),s.blockBounds},get totalBlockSize(){console.log("totalBlockSize deprecated, use blockBounds instead");const[e,t,r,n]=s.blockBounds;return[r-e,n-t]}}))}))})),Bi.all([]).then((()=>{var e;l.contextLost||(e=u)._warm||(aa.webgl.isSupported(e),e._warm=!0)}))}function va({path:e,atlasIndex:t,sdfViewBox:r},{sdfGlyphSize:n,sdfCanvas:i,contextLost:a},s){if(a)return Promise.resolve({timing:-1});const{textureWidth:o,sdfExponent:l}=ha,c=Math.max(r[2]-r[0],r[3]-r[1]),u=Math.floor(t/4);return function(e,t,r,n,i,a,s,o,l,c,u=!0){return u?oa(e,t,r,n,i,a,s,o,l,c).then(null,(u=>(sa||(console.warn("WebGL SDF generation failed, falling back to JS",u),sa=!0),la(e,t,r,n,i,a,s,o,l,c)))):la(e,t,r,n,i,a,s,o,l,c)}(n,n,e,r,c,l,i,u%(o/n)*n,Math.floor(u/(o/n))*n,t%4,s)}function Aa(e,t){for(let r in t)t.hasOwnProperty(r)&&(e[r]=t[r]);return e}let ya;function xa(e){if("function"!=typeof createImageBitmap){console.info("Safari<15: applying SDF canvas workaround");const{sdfCanvas:t,sdfTexture:r}=e,{width:n,height:i}=t,a=e.sdfCanvas.getContext("webgl");let s=r.image.data;s&&s.length===n*i*4||(s=new Uint8Array(n*i*4),r.image={width:n,height:i,data:s},r.flipY=!1,r.isDataTexture=!0),a.readPixels(0,0,n,i,a.RGBA,a.UNSIGNED_BYTE,s)}}const ba=Oi({name:"Typesetter",dependencies:[ha,ua,function(e,t,r){const{defaultFontURL:n}=r,i=Object.create(null),a=1/0,s=/[\u00AD\u034F\u061C\u115F-\u1160\u17B4-\u17B5\u180B-\u180E\u200B-\u200F\u202A-\u202E\u2060-\u206F\u3164\uFE00-\uFE0F\uFEFF\uFFA0\uFFF0-\uFFF8]/,o=/[\s\-\u007C\u00AD\u2010\u2012-\u2014\u2027\u2056\u2E17\u2E40]/;function l(t,r){t||(t=n);let a=i[t];a?a.pending?a.pending.push(r):r(a):(i[t]={pending:[r]},function(t,r){!function i(){const a=e=>{console.error(`Failure loading font ${t}${t===n?"":"; trying fallback"}`,e),t!==n&&(t=n,i())};try{const n=new XMLHttpRequest;n.open("get",t,!0),n.responseType="arraybuffer",n.onload=function(){if(n.status>=400)a(new Error(n.statusText));else if(n.status>0)try{const t=e(n.response);r(t)}catch(Ha){a(Ha)}},n.onerror=a,n.send()}catch(s){a(s)}}()}(t,(e=>{let r=i[t].pending;i[t]=e,r.forEach((t=>t(e)))})))}function c({text:e="",font:r=n,sdfGlyphSize:i=64,fontSize:c=1,letterSpacing:f=0,lineHeight:m="normal",maxWidth:g=a,direction:v,textAlign:A="left",textIndent:y=0,whiteSpace:x="normal",overflowWrap:b="normal",anchorX:w=0,anchorY:E=0,includeCaretPositions:_=!1,chunkedBoundsSize:S=8192,colorRanges:M=null},T,C=!1){const I=d(),B={fontLoad:0,typesetting:0};e.indexOf("\r")>-1&&(console.info("Typesetter: got text with \\r chars; normalizing to \\n"),e=e.replace(/\r\n/g,"\n").replace(/\r/g,"\n")),c=+c,f=+f,g=+g,m=m||"normal",y=+y,l(r,(r=>{const n=isFinite(g);let i=null,l=null,R=null,P=null,F=null,L=null,D=null,U=0,N=0,k="nowrap"!==x;const{ascender:O,descender:G,unitsPerEm:z}=r;B.fontLoad=d()-I;const V=d(),H=c/z;"normal"===m&&(m=(O-G)/z);const Q=((m*=c)-(O-G)*H)/2,j=-(O*H+Q),W=Math.min(m,(O-G)*H),X=(O+G)/2*H-W/2;let Y=y,q=new p;const K=[q];r.forEachGlyph(e,c,f,((t,r,i)=>{const a=e.charAt(i),l=t.advanceWidth*H,u=q.count;let h;if("isEmpty"in t||(t.isWhitespace=!!a&&/\s/.test(a),t.canBreakAfter=!!a&&o.test(a),t.isEmpty=t.xMin===t.xMax||t.yMin===t.yMax||s.test(a)),t.isWhitespace||t.isEmpty||N++,k&&n&&!t.isWhitespace&&r+l+Y>g&&u){if(q.glyphAt(u-1).glyphObj.canBreakAfter)h=new p,Y=-r;else for(let e=u;e--;){if(0===e&&"break-word"===b){h=new p,Y=-r;break}if(q.glyphAt(e).glyphObj.canBreakAfter){h=q.splitAt(e+1);const t=h.glyphAt(0).x;Y-=t;for(let e=h.count;e--;)h.glyphAt(e).x-=t;break}}h&&(q.isSoftWrapped=!0,q=h,K.push(q),U=g)}let d=q.glyphAt(q.count);d.glyphObj=t,d.x=r+Y,d.width=l,d.charIndex=i,"\n"===a&&(q=new p,K.push(q),Y=-(r+l+f*c)+y)})),K.forEach((e=>{for(let t=e.count;t--;){let{glyphObj:r,x:n,width:i}=e.glyphAt(t);if(!r.isWhitespace)return e.width=n+i,void(e.width>U&&(U=e.width))}}));let J=0,Z=0;if(w&&("number"==typeof w?J=-w:"string"==typeof w&&(J=-U*("left"===w?0:"center"===w?.5:"right"===w?1:u(w)))),E)if("number"==typeof E)Z=-E;else if("string"==typeof E){let e=K.length*m;Z="top"===E?0:"top-baseline"===E?-j:"middle"===E?e/2:"bottom"===E?e:"bottom-baseline"===E?e-Q+G*H:u(E)*e}if(!C){const n=t.getEmbeddingLevels(e,v);i=new Uint16Array(N),l=new Float32Array(2*N),R={},L=[a,a,-1/0,-1/0],D=[];let s=j;_&&(F=new Float32Array(3*e.length)),M&&(P=new Uint8Array(3*N));let o,c,u=0,d=-1,p=-1;if(K.forEach(((f,g)=>{let{count:v,width:y}=f;if(v>0){let m=0;for(let e=v;e--&&f.glyphAt(e).glyphObj.isWhitespace;)m++;let g=0,x=0;if("center"===A)g=(U-y)/2;else if("right"===A)g=U-y;else if("justify"===A&&f.isSoftWrapped){let e=0;for(let t=v-m;t--;)f.glyphAt(t).glyphObj.isWhitespace&&e++;x=(U-y)/e}if(x||g){let e=0;for(let t=0;t=t){let t=e,a=e;for(;ar)break;aw=e;for(let A=0;A1&&h(F,d,n),d=e}if(M){const{charIndex:e}=m;for(;e>p;)p++,M.hasOwnProperty(p)&&(c=M[p])}if(!w.isWhitespace&&!w.isEmpty){const e=u++;R[g]||(R[g]={path:w.path,pathBounds:[w.xMin,w.yMin,w.xMax,w.yMax]});const t=m.x+J,r=s+Z;l[2*e]=t,l[2*e+1]=r;const n=t+w.xMin*H,h=r+w.yMin*H,d=t+w.xMax*H,p=r+w.yMax*H;nL[2]&&(L[2]=d),p>L[3]&&(L[3]=p),e%S==0&&(o={start:e,end:e,rect:[a,a,-1/0,-1/0]},D.push(o)),o.end++;const f=o.rect;if(nf[2]&&(f[2]=d),p>f[3]&&(f[3]=p),i[e]=g,M){const t=3*e;P[t]=c>>16&255,P[t+1]=c>>8&255,P[t+2]=255&c}}}}s-=m})),F){const t=e.length-d;t>1&&h(F,d,t)}}B.typesetting=d()-V,T({glyphIds:i,glyphPositions:l,glyphData:R,caretPositions:F,caretHeight:W,glyphColors:P,chunkedBounds:D,fontSize:c,unitsPerEm:z,ascender:O*H,descender:G*H,lineHeight:m,topBaseline:j,blockBounds:[J,Z-K.length*m,J+U,Z],visibleBounds:L,timings:B})}))}function u(e){let t=e.match(/^([\d.]+)%$/),r=t?parseFloat(t[1]):NaN;return isNaN(r)?0:r/100}function h(e,t,r){const n=e[3*t],i=e[3*t+1],a=e[3*t+2],s=(i-n)/r;for(let o=0;o(Object.defineProperty(e,t,{get(){return this.data[this.index*f.length+r]},set(e){this.data[this.index*f.length+r]=e}}),e)),{data:null,index:0}),{typeset:c,measure:function(e,t){c(e,(e=>{const[r,n,i,a]=e.blockBounds;t({width:i-r,height:a-n})}),{metricsOnly:!0})},loadFont:l}},Qi],init(e,t,r,n){const{defaultFontURL:i}=e;return r(t,n(),{defaultFontURL:i})}}),wa=Oi({name:"Typesetter",dependencies:[ba,Vi],init:(e,t)=>function(r){const n=new t;return e.typeset(r,n.resolve),n},getTransferables(e){const t=[e.glyphPositions.buffer,e.glyphIds.buffer];return e.caretPositions&&t.push(e.caretPositions.buffer),e.glyphColors&&t.push(e.glyphColors.buffer),t}});const Ea=(()=>{const e={};const t="aTroikaGlyphIndex";class r extends s.InstancedBufferGeometry{constructor(){super(),this.detail=1,this.curveRadius=0,this.groups=[{start:0,count:1/0,materialIndex:0},{start:0,count:1/0,materialIndex:1}],this.boundingSphere=new s.Sphere,this.boundingBox=new s.Box3}computeBoundingSphere(){}computeBoundingBox(){}setSide(e){const t=this.getIndex().count;this.setDrawRange(e===s.BackSide?t/2:0,e===s.DoubleSide?t:t/2)}set detail(t){if(t!==this._detail){this._detail=t,("number"!=typeof t||t<1)&&(t=1);let r=function(t){let r=e[t];if(!r){const n=new s.PlaneBufferGeometry(1,1,t,t),i=n.clone(),a=n.attributes,o=i.attributes,l=new s.BufferGeometry,c=a.uv.count;for(let e=0;e{l.setAttribute(e,new s.Float32BufferAttribute([...a[e].array,...o[e].array],a[e].itemSize))})),l.setIndex([...n.index.array,...i.index.array.map((e=>e+c))]),l.translate(.5,.5,0),r=e[t]=l}return r}(t);["position","normal","uv"].forEach((e=>{this.attributes[e]=r.attributes[e].clone()})),this.setIndex(r.getIndex().clone())}}get detail(){return this._detail}set curveRadius(e){e!==this._curveRadius&&(this._curveRadius=e,this._updateBounds())}get curveRadius(){return this._curveRadius}updateGlyphs(e,r,a,s,o){n(this,"aTroikaGlyphBounds",e,4),n(this,t,r,1),n(this,"aTroikaGlyphColor",o,3),this._blockBounds=a,this._chunkedBounds=s,i(this,r.length),this._updateBounds()}_updateBounds(){const e=this._blockBounds;if(e){const{curveRadius:t,boundingBox:r}=this;if(t){const{PI:n,floor:i,min:a,max:s,sin:o,cos:l}=Math,c=n/2,u=2*n,h=Math.abs(t),d=e[0]/h,p=e[2]/h,f=i((d+c)/u)!==i((p+c)/u)?-h:a(o(d)*h,o(p)*h),m=i((d-c)/u)!==i((p-c)/u)?h:s(o(d)*h,o(p)*h),g=i((d+n)/u)!==i((p+n)/u)?2*h:s(h-l(d)*h,h-l(p)*h);r.min.set(f,e[1],t<0?-g:0),r.max.set(m,e[3],t<0?0:g)}else r.min.set(e[0],e[1],0),r.max.set(e[2],e[3],0);r.getBoundingSphere(this.boundingSphere)}}applyClipRect(e){let r=this.getAttribute(t).count,n=this._chunkedBounds;if(n)for(let t=n.length;t--;){r=n[t].end;let i=n[t].rect;if(i[1]e.y&&i[0]e.x)break}i(this,r)}}function n(e,t,r,n){const i=e.getAttribute(t);r?i&&i.array.length===r.length?(i.array.set(r),i.needsUpdate=!0):(e.setAttribute(t,new s.InstancedBufferAttribute(r,n)),delete e._maxInstanceCount,e.dispose()):i&&e.deleteAttribute(t)}function i(e,t){e[e.hasOwnProperty("instanceCount")?"instanceCount":"maxInstancedCount"]=t}return r.prototype.setAttribute||(r.prototype.setAttribute=function(e,t){return this.attributes[e]=t,this}),r})();const _a=(()=>{const e=new s.MeshBasicMaterial({color:16777215,side:s.DoubleSide,transparent:!0}),t=8421504,r=new s.Matrix4,n=new s.Vector3,i=new s.Vector3,a=[],o=new s.Vector3,l="+x+y";function c(e){return Array.isArray(e)?e[0]:e}let u=()=>{const t=new s.Mesh(new s.PlaneBufferGeometry(1,1),e);return u=()=>t,t},h=()=>{const t=new s.Mesh(new s.PlaneBufferGeometry(1,1,32,1),e);return h=()=>t,t};const d={type:"syncstart"},p={type:"synccomplete"},f=["font","fontSize","letterSpacing","lineHeight","maxWidth","overflowWrap","text","direction","textAlign","textIndent","whiteSpace","anchorX","anchorY","colorRanges","sdfGlyphSize"],m=f.concat("material","color","depthOffset","clipRect","curveRadius","orientation","glyphGeometryDetail");class g extends s.Mesh{constructor(){super(new Ea,null),this.text="",this.anchorX=0,this.anchorY=0,this.curveRadius=0,this.direction="auto",this.font=null,this.fontSize=.1,this.letterSpacing=0,this.lineHeight="normal",this.maxWidth=1/0,this.overflowWrap="normal",this.textAlign="left",this.textIndent=0,this.whiteSpace="normal",this.material=null,this.color=null,this.colorRanges=null,this.outlineWidth=0,this.outlineColor=0,this.outlineOpacity=1,this.outlineBlur=0,this.outlineOffsetX=0,this.outlineOffsetY=0,this.strokeWidth=0,this.strokeColor=t,this.strokeOpacity=1,this.fillOpacity=1,this.depthOffset=0,this.clipRect=null,this.orientation=l,this.glyphGeometryDetail=1,this.sdfGlyphSize=null,this.gpuAccelerateSDF=!0,this.debugSDF=!1}sync(e){this._needsSync&&(this._needsSync=!1,this._isSyncing?(this._queuedSyncs||(this._queuedSyncs=[])).push(e):(this._isSyncing=!0,this.dispatchEvent(d),ga({text:this.text,font:this.font,fontSize:this.fontSize||.1,letterSpacing:this.letterSpacing||0,lineHeight:this.lineHeight||"normal",maxWidth:this.maxWidth,direction:this.direction||"auto",textAlign:this.textAlign,textIndent:this.textIndent,whiteSpace:this.whiteSpace,overflowWrap:this.overflowWrap,anchorX:this.anchorX,anchorY:this.anchorY,colorRanges:this.colorRanges,includeCaretPositions:!0,sdfGlyphSize:this.sdfGlyphSize,gpuAccelerateSDF:this.gpuAccelerateSDF},(t=>{this._isSyncing=!1,this._textRenderInfo=t,this.geometry.updateGlyphs(t.glyphBounds,t.glyphAtlasIndices,t.blockBounds,t.chunkedBounds,t.glyphColors);const r=this._queuedSyncs;r&&(this._queuedSyncs=null,this._needsSync=!0,this.sync((()=>{r.forEach((e=>e&&e()))}))),this.dispatchEvent(p),e&&e()}))))}onBeforeRender(e,t,r,n,i,a){this.sync(),i.isTroikaTextMaterial&&this._prepareForRender(i),i._hadOwnSide=i.hasOwnProperty("side"),this.geometry.setSide(i._actualSide=i.side),i.side=s.FrontSide}onAfterRender(e,t,r,n,i,a){i._hadOwnSide?i.side=i._actualSide:delete i.side}dispose(){this.geometry.dispose()}get textRenderInfo(){return this._textRenderInfo||null}get material(){let t=this._derivedMaterial;const r=this._baseMaterial||this._defaultMaterial||(this._defaultMaterial=e.clone());if(t&&t.baseMaterial===r||(t=this._derivedMaterial=function(e){const t=$i(e,{chained:!0,extensions:{derivatives:!0},uniforms:{uTroikaSDFTexture:{value:null},uTroikaSDFTextureSize:{value:new s.Vector2},uTroikaSDFGlyphSize:{value:0},uTroikaSDFExponent:{value:0},uTroikaTotalBounds:{value:new s.Vector4(0,0,0,0)},uTroikaClipRect:{value:new s.Vector4(0,0,0,0)},uTroikaDistanceOffset:{value:0},uTroikaOutlineOpacity:{value:0},uTroikaFillOpacity:{value:1},uTroikaPositionOffset:{value:new s.Vector2},uTroikaCurveRadius:{value:0},uTroikaBlurRadius:{value:0},uTroikaStrokeWidth:{value:0},uTroikaStrokeColor:{value:new s.Color},uTroikaStrokeOpacity:{value:1},uTroikaOrient:{value:new s.Matrix3},uTroikaUseGlyphColors:{value:!0},uTroikaSDFDebug:{value:!1}},vertexDefs:"\nuniform vec2 uTroikaSDFTextureSize;\nuniform float uTroikaSDFGlyphSize;\nuniform vec4 uTroikaTotalBounds;\nuniform vec4 uTroikaClipRect;\nuniform mat3 uTroikaOrient;\nuniform bool uTroikaUseGlyphColors;\nuniform float uTroikaDistanceOffset;\nuniform float uTroikaBlurRadius;\nuniform vec2 uTroikaPositionOffset;\nuniform float uTroikaCurveRadius;\nattribute vec4 aTroikaGlyphBounds;\nattribute float aTroikaGlyphIndex;\nattribute vec3 aTroikaGlyphColor;\nvarying vec2 vTroikaGlyphUV;\nvarying vec4 vTroikaTextureUVBounds;\nvarying float vTroikaTextureChannel;\nvarying vec3 vTroikaGlyphColor;\nvarying vec2 vTroikaGlyphDimensions;\n",vertexTransform:"\nvec4 bounds = aTroikaGlyphBounds;\nbounds.xz += uTroikaPositionOffset.x;\nbounds.yw -= uTroikaPositionOffset.y;\n\nvec4 outlineBounds = vec4(\n bounds.xy - uTroikaDistanceOffset - uTroikaBlurRadius,\n bounds.zw + uTroikaDistanceOffset + uTroikaBlurRadius\n);\nvec4 clippedBounds = vec4(\n clamp(outlineBounds.xy, uTroikaClipRect.xy, uTroikaClipRect.zw),\n clamp(outlineBounds.zw, uTroikaClipRect.xy, uTroikaClipRect.zw)\n);\n\nvec2 clippedXY = (mix(clippedBounds.xy, clippedBounds.zw, position.xy) - bounds.xy) / (bounds.zw - bounds.xy);\n\nposition.xy = mix(bounds.xy, bounds.zw, clippedXY);\n\nuv = (position.xy - uTroikaTotalBounds.xy) / (uTroikaTotalBounds.zw - uTroikaTotalBounds.xy);\n\nfloat rad = uTroikaCurveRadius;\nif (rad != 0.0) {\n float angle = position.x / rad;\n position.xz = vec2(sin(angle) * rad, rad - cos(angle) * rad);\n normal.xz = vec2(sin(angle), cos(angle));\n}\n \nposition = uTroikaOrient * position;\nnormal = uTroikaOrient * normal;\n\nvTroikaGlyphUV = clippedXY.xy;\nvTroikaGlyphDimensions = vec2(bounds[2] - bounds[0], bounds[3] - bounds[1]);\n\n\nfloat txCols = uTroikaSDFTextureSize.x / uTroikaSDFGlyphSize;\nvec2 txUvPerSquare = uTroikaSDFGlyphSize / uTroikaSDFTextureSize;\nvec2 txStartUV = txUvPerSquare * vec2(\n mod(floor(aTroikaGlyphIndex / 4.0), txCols),\n floor(floor(aTroikaGlyphIndex / 4.0) / txCols)\n);\nvTroikaTextureUVBounds = vec4(txStartUV, vec2(txStartUV) + txUvPerSquare);\nvTroikaTextureChannel = mod(aTroikaGlyphIndex, 4.0);\n",fragmentDefs:"\nuniform sampler2D uTroikaSDFTexture;\nuniform vec2 uTroikaSDFTextureSize;\nuniform float uTroikaSDFGlyphSize;\nuniform float uTroikaSDFExponent;\nuniform float uTroikaDistanceOffset;\nuniform float uTroikaFillOpacity;\nuniform float uTroikaOutlineOpacity;\nuniform float uTroikaBlurRadius;\nuniform vec3 uTroikaStrokeColor;\nuniform float uTroikaStrokeWidth;\nuniform float uTroikaStrokeOpacity;\nuniform bool uTroikaSDFDebug;\nvarying vec2 vTroikaGlyphUV;\nvarying vec4 vTroikaTextureUVBounds;\nvarying float vTroikaTextureChannel;\nvarying vec2 vTroikaGlyphDimensions;\n\nfloat troikaSdfValueToSignedDistance(float alpha) {\n // Inverse of exponential encoding in webgl-sdf-generator\n \n float maxDimension = max(vTroikaGlyphDimensions.x, vTroikaGlyphDimensions.y);\n float absDist = (1.0 - pow(2.0 * (alpha > 0.5 ? 1.0 - alpha : alpha), 1.0 / uTroikaSDFExponent)) * maxDimension;\n float signedDist = absDist * (alpha > 0.5 ? -1.0 : 1.0);\n return signedDist;\n}\n\nfloat troikaGlyphUvToSdfValue(vec2 glyphUV) {\n vec2 textureUV = mix(vTroikaTextureUVBounds.xy, vTroikaTextureUVBounds.zw, glyphUV);\n vec4 rgba = texture2D(uTroikaSDFTexture, textureUV);\n float ch = floor(vTroikaTextureChannel + 0.5); //NOTE: can't use round() in WebGL1\n return ch == 0.0 ? rgba.r : ch == 1.0 ? rgba.g : ch == 2.0 ? rgba.b : rgba.a;\n}\n\nfloat troikaGlyphUvToDistance(vec2 uv) {\n return troikaSdfValueToSignedDistance(troikaGlyphUvToSdfValue(uv));\n}\n\nfloat troikaGetAADist() {\n \n #if defined(GL_OES_standard_derivatives) || __VERSION__ >= 300\n return length(fwidth(vTroikaGlyphUV * vTroikaGlyphDimensions)) * 0.5;\n #else\n return vTroikaGlyphDimensions.x / 64.0;\n #endif\n}\n\nfloat troikaGetFragDistValue() {\n vec2 clampedGlyphUV = clamp(vTroikaGlyphUV, 0.5 / uTroikaSDFGlyphSize, 1.0 - 0.5 / uTroikaSDFGlyphSize);\n float distance = troikaGlyphUvToDistance(clampedGlyphUV);\n \n // Extrapolate distance when outside bounds:\n distance += clampedGlyphUV == vTroikaGlyphUV ? 0.0 : \n length((vTroikaGlyphUV - clampedGlyphUV) * vTroikaGlyphDimensions);\n\n \n\n return distance;\n}\n\nfloat troikaGetEdgeAlpha(float distance, float distanceOffset, float aaDist) {\n #if defined(IS_DEPTH_MATERIAL) || defined(IS_DISTANCE_MATERIAL)\n float alpha = step(-distanceOffset, -distance);\n #else\n\n float alpha = smoothstep(\n distanceOffset + aaDist,\n distanceOffset - aaDist,\n distance\n );\n #endif\n\n return alpha;\n}\n",fragmentColorTransform:"\nfloat aaDist = troikaGetAADist();\nfloat fragDistance = troikaGetFragDistValue();\nfloat edgeAlpha = uTroikaSDFDebug ?\n troikaGlyphUvToSdfValue(vTroikaGlyphUV) :\n troikaGetEdgeAlpha(fragDistance, uTroikaDistanceOffset, max(aaDist, uTroikaBlurRadius));\n\n#if !defined(IS_DEPTH_MATERIAL) && !defined(IS_DISTANCE_MATERIAL)\nvec4 fillRGBA = gl_FragColor;\nfillRGBA.a *= uTroikaFillOpacity;\nvec4 strokeRGBA = uTroikaStrokeWidth == 0.0 ? fillRGBA : vec4(uTroikaStrokeColor, uTroikaStrokeOpacity);\nif (fillRGBA.a == 0.0) fillRGBA.rgb = strokeRGBA.rgb;\ngl_FragColor = mix(fillRGBA, strokeRGBA, smoothstep(\n -uTroikaStrokeWidth - aaDist,\n -uTroikaStrokeWidth + aaDist,\n fragDistance\n));\ngl_FragColor.a *= edgeAlpha;\n#endif\n\nif (edgeAlpha == 0.0) {\n discard;\n}\n",customRewriter({vertexShader:e,fragmentShader:t}){let r=/\buniform\s+vec3\s+diffuse\b/;return r.test(t)&&(t=t.replace(r,"varying vec3 vTroikaGlyphColor").replace(/\bdiffuse\b/g,"vTroikaGlyphColor"),r.test(e)||(e=e.replace(ji,"uniform vec3 diffuse;\n$&\nvTroikaGlyphColor = uTroikaUseGlyphColors ? aTroikaGlyphColor / 255.0 : diffuse;\n"))),{vertexShader:e,fragmentShader:t}}});return t.transparent=!0,Object.defineProperties(t,{isTroikaTextMaterial:{value:!0},shadowSide:{get(){return this.side},set(){}}}),t}(r),r.addEventListener("dispose",(function e(){r.removeEventListener("dispose",e),t.dispose()}))),this.outlineWidth||this.outlineBlur||this.outlineOffsetX||this.outlineOffsetY){let e=t._outlineMtl;return e||(e=t._outlineMtl=Object.create(t,{id:{value:t.id+.1}}),e.isTextOutlineMaterial=!0,e.depthWrite=!1,e.map=null,t.addEventListener("dispose",(function r(){t.removeEventListener("dispose",r),e.dispose()}))),[e,t]}return t}set material(e){e&&e.isTroikaTextMaterial?(this._derivedMaterial=e,this._baseMaterial=e.baseMaterial):this._baseMaterial=e}get glyphGeometryDetail(){return this.geometry.detail}set glyphGeometryDetail(e){this.geometry.detail=e}get curveRadius(){return this.geometry.curveRadius}set curveRadius(e){this.geometry.curveRadius=e}get customDepthMaterial(){return c(this.material).getDepthMaterial()}get customDistanceMaterial(){return c(this.material).getDistanceMaterial()}_prepareForRender(e){const a=e.isTextOutlineMaterial,c=e.uniforms,u=this.textRenderInfo;if(u){const{sdfTexture:e,blockBounds:r}=u;c.uTroikaSDFTexture.value=e,c.uTroikaSDFTextureSize.value.set(e.image.width,e.image.height),c.uTroikaSDFGlyphSize.value=u.sdfGlyphSize,c.uTroikaSDFExponent.value=u.sdfExponent,c.uTroikaTotalBounds.value.fromArray(r),c.uTroikaUseGlyphColors.value=!a&&!!u.glyphColors;let n,i,s,o=0,l=0,h=0,d=0,p=0;if(a){let{outlineWidth:e,outlineOffsetX:t,outlineOffsetY:r,outlineBlur:i,outlineOpacity:a}=this;o=this._parsePercent(e)||0,l=Math.max(0,this._parsePercent(i)||0),n=a,d=this._parsePercent(t)||0,p=this._parsePercent(r)||0}else h=Math.max(0,this._parsePercent(this.strokeWidth)||0),h&&(s=this.strokeColor,c.uTroikaStrokeColor.value.set(null==s?t:s),i=this.strokeOpacity,null==i&&(i=1)),n=this.fillOpacity;c.uTroikaDistanceOffset.value=o,c.uTroikaPositionOffset.value.set(d,p),c.uTroikaBlurRadius.value=l,c.uTroikaStrokeWidth.value=h,c.uTroikaStrokeOpacity.value=i,c.uTroikaFillOpacity.value=null==n?1:n,c.uTroikaCurveRadius.value=this.curveRadius||0;let f=this.clipRect;if(f&&Array.isArray(f)&&4===f.length)c.uTroikaClipRect.value.fromArray(f);else{const e=100*(this.fontSize||.1);c.uTroikaClipRect.value.set(r[0]-e,r[1]-e,r[2]+e,r[3]+e)}this.geometry.applyClipRect(c.uTroikaClipRect.value)}c.uTroikaSDFDebug.value=!!this.debugSDF,e.polygonOffset=!!this.depthOffset,e.polygonOffsetFactor=e.polygonOffsetUnits=this.depthOffset||0;const h=a?this.outlineColor||0:this.color;if(null==h)delete e.color;else{const t=e.hasOwnProperty("color")?e.color:e.color=new s.Color;h===t._input&&"object"!=typeof h||t.set(t._input=h)}let d=this.orientation||l;if(d!==e._orientation){let t=c.uTroikaOrient.value;d=d.replace(/[^-+xyz]/g,"");let a=d!==l&&d.match(/^([-+])([xyz])([-+])([xyz])$/);if(a){let[,e,s,l,c]=a;n.set(0,0,0)[s]="-"===e?1:-1,i.set(0,0,0)[c]="-"===l?-1:1,r.lookAt(o,n.cross(i),i),t.setFromMatrix4(r)}else t.identity();e._orientation=d}}_parsePercent(e){if("string"==typeof e){let t=e.match(/^(-?[\d.]+)%$/),r=t?parseFloat(t[1]):NaN;e=(isNaN(r)?0:r/100)*this.fontSize}return e}localPositionToTextCoords(e,t=new s.Vector2){t.copy(e);const r=this.curveRadius;return r&&(t.x=Math.atan2(e.x,Math.abs(r)-Math.abs(e.z))*Math.abs(r)),t}worldPositionToTextCoords(e,t=new s.Vector2){return n.copy(e),this.localPositionToTextCoords(this.worldToLocal(n),t)}raycast(e,t){const{textRenderInfo:r,curveRadius:n}=this;if(r){const i=r.blockBounds,s=n?h():u(),o=s.geometry,{position:l,uv:c}=o.attributes;for(let e=0;e{this[t]=e[t]})),this}clone(){return(new this.constructor).copy(this)}}f.forEach((e=>{const t="_private_"+e;Object.defineProperty(g.prototype,e,{get(){return this[t]},set(e){e!==this[t]&&(this[t]=e,this._needsSync=!0)}})}));let v=!1;return Object.defineProperty(g.prototype,"anchor",{get(){return this._deprecated_anchor},set(e){this._deprecated_anchor=e,v||(console.warn("TextMesh: `anchor` has been deprecated; use `anchorX` and `anchorY` instead."),v=!0),Array.isArray(e)?(this.anchorX=100*(+e[0]||0)+"%",this.anchorY=100*(+e[1]||0)+"%"):this.anchorX=this.anchorY=0}}),g})();new WeakMap;new WeakMap;function Sa(e,t,r=((e,t)=>e===t)){if(e===t)return!0;if(!e||!t)return!1;const n=e.length;if(t.length!==n)return!1;for(let i=0;ii.response=e)).then((()=>{n.lifespan&&n.lifespan>0&&setTimeout((()=>{const e=Ma.indexOf(i);-1!==e&&Ma.splice(e,1)}),n.lifespan)})).catch((e=>i.error=e))};if(Ma.push(i),!r)throw i.promise}const Ca=(e,t,r)=>Ta(e,t,!1,r),Ia=i.forwardRef((({anchorX:e="center",anchorY:t="middle",font:r,children:a,characters:s,onSync:l,...c},u)=>{const h=(0,o.useThree)((({invalidate:e})=>e)),[d]=i.useState((()=>new _a)),[p,f]=i.useMemo((()=>{const e=[];let t="";return i.Children.forEach(a,(r=>{"string"==typeof r||"number"==typeof r?t+=r:e.push(r)})),[e,t]}),[a]);return Ca((()=>new Promise((e=>function({font:e,characters:t,sdfGlyphSize:r},n){ga({font:e,sdfGlyphSize:r,text:Array.isArray(t)?t.join("\n"):""+t},n)}({font:r,characters:s},e)))),["troika-text",r,s]),i.useLayoutEffect((()=>{d.sync((()=>{h(),l&&l(d)}))})),i.useEffect((()=>()=>d.dispose()),[d]),i.createElement("primitive",n({object:d,ref:u,font:r,text:f,anchorX:e,anchorY:t},c),p)}));var Ba=r(8064),Ra=r(48197),Pa=r(6073),Fa=r(60809);(0,o.extend)({EffectComposer:Ba.x,RenderPass:Ra.C,ShaderPass:Pa.T});const La=()=>{try{var e=document.createElement("canvas");return!(!window.WebGL2RenderingContext||!e.getContext("webgl2"))}catch(Ha){return!1}},Da=i.forwardRef((({children:e,multisamping:t=8,renderIndex:r=1,disableGamma:a=!1,disableRenderPass:l=!1,...c},u)=>{const h=i.useRef(),d=(0,o.useThree)((({scene:e})=>e)),p=(0,o.useThree)((({camera:e})=>e)),f=(0,o.useThree)((({gl:e})=>e)),m=(0,o.useThree)((({size:e})=>e)),[g]=i.useState((()=>{if(La()&&t>0){const e=new s.WebGLRenderTarget(m.width,m.height,{format:s.RGBAFormat,encoding:s.sRGBEncoding});return e.samples=8,e}}));return i.useEffect((()=>{var e,t;null==(e=h.current)||e.setSize(m.width,m.height),null==(t=h.current)||t.setPixelRatio(f.getPixelRatio())}),[f,m]),(0,o.useFrame)((()=>{var e;return null==(e=h.current)?void 0:e.render()}),r),i.createElement("effectComposer",n({ref:M([u,h]),args:[f,g]},c),!l&&i.createElement("renderPass",{attachArray:"passes",args:[d,p]}),!a&&i.createElement("shaderPass",{attachArray:"passes",args:[Fa.Y]}),e)}));function Ua({stops:e,colors:t,size:r=1024,...a}){const l=(0,o.useThree)((e=>e.gl)),c=i.useMemo((()=>{const n=document.createElement("canvas"),i=n.getContext("2d");n.width=16,n.height=r;const a=i.createLinearGradient(0,0,0,r);let o=e.length;for(;o--;)a.addColorStop(e[o],t[o]);i.fillStyle=a,i.fillRect(0,0,16,r);const l=new s.Texture(n);return l.needsUpdate=!0,l}),[e]);return i.useEffect((()=>()=>{c.dispose()}),[c]),i.createElement("primitive",n({object:c,attach:"map",encoding:l.outputEncoding},a))}function Na(e,t,r,n){return class extends s.ShaderMaterial{constructor(){const i=Object.entries(e);super({uniforms:i.reduce(((e,[t,r])=>({...e,...s.UniformsUtils.clone({[t]:{value:r}})})),{}),vertexShader:t,fragmentShader:r}),i.forEach((([e])=>Object.defineProperty(this,e,{get:()=>this.uniforms[e].value,set:t=>this.uniforms[e].value=t}))),n&&n(this)}}}const ka=e=>e===Object(e)&&!Array.isArray(e)&&"function"!=typeof e;function Oa(e){const t=(0,o.useThree)((e=>e.gl)),r=(0,o.useLoader)(s.TextureLoader,ka(e)?Object.values(e):e);if((0,i.useEffect)((()=>{(Array.isArray(r)?r:[r]).forEach(t.initTexture)}),[t,r]),ka(e)){const t=Object.keys(e),n={};return t.forEach((e=>Object.assign(n,{[e]:r[t.indexOf(e)]}))),n}return r}Oa.preload=e=>o.useLoader.preload(s.TextureLoader,e),Oa.clear=e=>o.useLoader.clear(s.TextureLoader,e);const Ga=Na({color:new s.Color("white"),scale:[1,1],imageBounds:[1,1],map:null,zoom:1,grayscale:0},"\n varying vec2 vUv;\n void main() {\n gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.);\n vUv = uv;\n }\n","\n // mostly from https://gist.github.com/statico/df64c5d167362ecf7b34fca0b1459a44\n varying vec2 vUv;\n uniform vec2 scale;\n uniform vec2 imageBounds;\n uniform vec3 color;\n uniform sampler2D map;\n uniform float zoom;\n uniform float grayscale;\n const vec3 luma = vec3(.299, 0.587, 0.114);\n vec4 toGrayscale(vec4 color, float intensity) {\n return vec4(mix(color.rgb, vec3(dot(color.rgb, luma)), intensity), color.a);\n }\n vec2 aspect(vec2 size) {\n return size / min(size.x, size.y);\n }\n void main() {\n vec2 s = aspect(scale);\n vec2 i = aspect(imageBounds);\n float rs = s.x / s.y;\n float ri = i.x / i.y;\n vec2 new = rs < ri ? vec2(i.x * s.y / i.y, s.y) : vec2(s.x, i.y * s.x / i.x);\n vec2 offset = (rs < ri ? vec2((new.x - s.x) / 2.0, 0.0) : vec2(0.0, (new.y - s.y) / 2.0)) / new;\n vec2 uv = vUv * s / new + offset;\n vec2 zUv = (uv - vec2(0.5, 0.5)) / zoom + vec2(0.5, 0.5);\n gl_FragColor = toGrayscale(texture2D(map, zUv) * vec4(color, 1.0), grayscale);\n \n #include \n #include \n }\n"),za=i.forwardRef((({children:e,color:t,segments:r=1,scale:a=1,zoom:s=1,grayscale:l=0,url:c,toneMapped:u,...h},d)=>{(0,o.extend)({ImageMaterial:Ga});const p=Oa(c),f=Array.isArray(a)?[a[0],a[1]]:[a,a],m=[p.image.width,p.image.height];return i.createElement("mesh",n({ref:d,scale:a},h),i.createElement("planeGeometry",{args:[1,1,r,r]}),i.createElement("imageMaterial",{color:t,map:p,zoom:s,grayscale:l,scale:f,imageBounds:m,toneMapped:u}),e)}));function Va({userData:e,children:t,geometry:r,threshold:a=15,color:o="black",...l}){const c=i.useRef(null);return i.useLayoutEffect((()=>{const e=c.current.parent;if(e){const t=r||e.geometry;t===c.current.userData.currentGeom&&a===c.current.userData.currentThreshold||(c.current.userData.currentGeom=t,c.current.userData.currentThreshold=a,c.current.geometry=new s.EdgesGeometry(t,a))}})),i.createElement("lineSegments",n({ref:c,raycast:()=>null},l),t||i.createElement("lineBasicMaterial",{color:o}))}function Ha(e,t){const r=new s.Matrix4,n=new s.Ray,i=new s.Sphere,a=new s.Vector3,o=this.geometry;if(i.copy(o.boundingSphere),i.applyMatrix4(this.matrixWorld),!1===e.ray.intersectSphere(i,a))return;r.copy(this.matrixWorld).invert(),n.copy(e.ray).applyMatrix4(r);const l=new s.Vector3,c=new s.Vector3,u=new s.Vector3,h=this instanceof s.LineSegments?2:1,d=o.index,p=o.attributes;if(null!==d){const r=d.array,i=p.position.array,s=p.width.array;for(let o=0,d=r.length-1;og)continue;a.applyMatrix4(this.matrixWorld);const v=e.ray.origin.distanceTo(a);ve.far||(t.push({distance:v,point:u.clone().applyMatrix4(this.matrixWorld),index:o,face:null,faceIndex:null,object:this}),o=d)}}}function Qa(e,t,r,n,i){let a;if(e=e.subarray||e.slice?e:e.buffer,r=r.subarray||r.slice?r:r.buffer,e=t?e.subarray?e.subarray(t,i&&t+i):e.slice(t,i&&t+i):e,r.set)r.set(e,n);else for(a=0;a0&&(r=this.copyV3(n),this.next.push(r[0],r[1],r[2]),this.next.push(r[0],r[1],r[2]))}r=this.compareV3(e-1,0)?this.copyV3(1):this.copyV3(e-1),this.next.push(r[0],r[1],r[2]),this.next.push(r[0],r[1],r[2]),this._attributes&&this._attributes.position.count===this.positions.length?(this._attributes.position.copyArray(new Float32Array(this.positions)),this._attributes.position.needsUpdate=!0,this._attributes.previous.copyArray(new Float32Array(this.previous)),this._attributes.previous.needsUpdate=!0,this._attributes.next.copyArray(new Float32Array(this.next)),this._attributes.next.needsUpdate=!0,this._attributes.side.copyArray(new Float32Array(this.side)),this._attributes.side.needsUpdate=!0,this._attributes.width.copyArray(new Float32Array(this.width)),this._attributes.width.needsUpdate=!0,this._attributes.uv.copyArray(new Float32Array(this.uvs)),this._attributes.uv.needsUpdate=!0,this._attributes.index.copyArray(new Uint16Array(this.indices_array)),this._attributes.index.needsUpdate=!0):this._attributes={position:new s.BufferAttribute(new Float32Array(this.positions),3),previous:new s.BufferAttribute(new Float32Array(this.previous),3),next:new s.BufferAttribute(new Float32Array(this.next),3),side:new s.BufferAttribute(new Float32Array(this.side),1),width:new s.BufferAttribute(new Float32Array(this.width),1),uv:new s.BufferAttribute(new Float32Array(this.uvs),2),index:new s.BufferAttribute(new Uint16Array(this.indices_array),1),counters:new s.BufferAttribute(new Float32Array(this.counters),1)},this.setAttribute("position",this._attributes.position),this.setAttribute("previous",this._attributes.previous),this.setAttribute("next",this._attributes.next),this.setAttribute("side",this._attributes.side),this.setAttribute("width",this._attributes.width),this.setAttribute("uv",this._attributes.uv),this.setAttribute("counters",this._attributes.counters),this.setIndex(this._attributes.index),this.computeBoundingSphere(),this.computeBoundingBox()}advance({x:e,y:t,z:r}){const n=this._attributes.position.array,i=this._attributes.previous.array,a=this._attributes.next.array,s=n.length;Qa(n,0,i,0,s),Qa(n,6,n,0,s-6),n[s-6]=e,n[s-5]=t,n[s-4]=r,n[s-3]=e,n[s-2]=t,n[s-1]=r,Qa(n,6,a,0,s-6),a[s-6]=e,a[s-5]=t,a[s-4]=r,a[s-3]=e,a[s-2]=t,a[s-1]=r,this._attributes.position.needsUpdate=!0,this._attributes.previous.needsUpdate=!0,this._attributes.next.needsUpdate=!0}}s.ShaderChunk.meshline_vert=["","#include ","",s.ShaderChunk.logdepthbuf_pars_vertex,s.ShaderChunk.fog_pars_vertex,"","attribute vec3 previous;","attribute vec3 next;","attribute float side;","attribute float width;","attribute float counters;","","uniform vec2 resolution;","uniform float lineWidth;","uniform vec3 color;","uniform float opacity;","uniform float sizeAttenuation;","","varying vec2 vUV;","varying vec4 vColor;","varying float vCounters;","","vec2 fix( vec4 i, float aspect ) {",""," vec2 res = i.xy / i.w;"," res.x *= aspect;","\t vCounters = counters;"," return res;","","}","","void main() {",""," float aspect = resolution.x / resolution.y;",""," vColor = vec4( color, opacity );"," vUV = uv;",""," mat4 m = projectionMatrix * modelViewMatrix;"," vec4 finalPosition = m * vec4( position, 1.0 );"," vec4 prevPos = m * vec4( previous, 1.0 );"," vec4 nextPos = m * vec4( next, 1.0 );",""," vec2 currentP = fix( finalPosition, aspect );"," vec2 prevP = fix( prevPos, aspect );"," vec2 nextP = fix( nextPos, aspect );",""," float w = lineWidth * width;",""," vec2 dir;"," if( nextP == currentP ) dir = normalize( currentP - prevP );"," else if( prevP == currentP ) dir = normalize( nextP - currentP );"," else {"," vec2 dir1 = normalize( currentP - prevP );"," vec2 dir2 = normalize( nextP - currentP );"," dir = normalize( dir1 + dir2 );",""," vec2 perp = vec2( -dir1.y, dir1.x );"," vec2 miter = vec2( -dir.y, dir.x );"," //w = clamp( w / dot( miter, perp ), 0., 4. * lineWidth * width );",""," }",""," //vec2 normal = ( cross( vec3( dir, 0. ), vec3( 0., 0., 1. ) ) ).xy;"," vec4 normal = vec4( -dir.y, dir.x, 0., 1. );"," normal.xy *= .5 * w;"," normal *= projectionMatrix;"," if( sizeAttenuation == 0. ) {"," normal.xy *= finalPosition.w;"," normal.xy /= ( vec4( resolution, 0., 1. ) * projectionMatrix ).xy;"," }",""," finalPosition.xy += normal.xy * side;",""," gl_Position = finalPosition;","",s.ShaderChunk.logdepthbuf_vertex,s.ShaderChunk.fog_vertex&&" vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );",s.ShaderChunk.fog_vertex,"}"].join("\n"),s.ShaderChunk.meshline_frag=["",s.ShaderChunk.fog_pars_fragment,s.ShaderChunk.logdepthbuf_pars_fragment,"","uniform sampler2D map;","uniform sampler2D alphaMap;","uniform float useMap;","uniform float useAlphaMap;","uniform float useDash;","uniform float dashArray;","uniform float dashOffset;","uniform float dashRatio;","uniform float visibility;","uniform float alphaTest;","uniform vec2 repeat;","","varying vec2 vUV;","varying vec4 vColor;","varying float vCounters;","","void main() {","",s.ShaderChunk.logdepthbuf_fragment,""," vec4 c = vColor;"," if( useMap == 1. ) c *= texture2D( map, vUV * repeat );"," if( useAlphaMap == 1. ) c.a *= texture2D( alphaMap, vUV * repeat ).a;"," if( c.a < alphaTest ) discard;"," if( useDash == 1. ){"," c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio));"," }"," gl_FragColor = c;"," gl_FragColor.a *= step(vCounters, visibility);","",s.ShaderChunk.fog_fragment,"}"].join("\n");class Wa extends s.ShaderMaterial{constructor(e){super({uniforms:Object.assign({},s.UniformsLib.fog,{lineWidth:{value:1},map:{value:null},useMap:{value:0},alphaMap:{value:null},useAlphaMap:{value:0},color:{value:new s.Color(16777215)},opacity:{value:1},resolution:{value:new s.Vector2(1,1)},sizeAttenuation:{value:1},dashArray:{value:0},dashOffset:{value:0},dashRatio:{value:.5},useDash:{value:0},visibility:{value:1},alphaTest:{value:0},repeat:{value:new s.Vector2(1,1)}}),vertexShader:s.ShaderChunk.meshline_vert,fragmentShader:s.ShaderChunk.meshline_frag}),this.type="MeshLineMaterial",Object.defineProperties(this,{lineWidth:{enumerable:!0,get(){return this.uniforms.lineWidth.value},set(e){this.uniforms.lineWidth.value=e}},map:{enumerable:!0,get(){return this.uniforms.map.value},set(e){this.uniforms.map.value=e}},useMap:{enumerable:!0,get(){return this.uniforms.useMap.value},set(e){this.uniforms.useMap.value=e}},alphaMap:{enumerable:!0,get(){return this.uniforms.alphaMap.value},set(e){this.uniforms.alphaMap.value=e}},useAlphaMap:{enumerable:!0,get(){return this.uniforms.useAlphaMap.value},set(e){this.uniforms.useAlphaMap.value=e}},color:{enumerable:!0,get(){return this.uniforms.color.value},set(e){this.uniforms.color.value=e}},opacity:{enumerable:!0,get(){return this.uniforms.opacity.value},set(e){this.uniforms.opacity.value=e}},resolution:{enumerable:!0,get(){return this.uniforms.resolution.value},set(e){this.uniforms.resolution.value.copy(e)}},sizeAttenuation:{enumerable:!0,get(){return this.uniforms.sizeAttenuation.value},set(e){this.uniforms.sizeAttenuation.value=e}},dashArray:{enumerable:!0,get(){return this.uniforms.dashArray.value},set(e){this.uniforms.dashArray.value=e,this.useDash=0!==e?1:0}},dashOffset:{enumerable:!0,get(){return this.uniforms.dashOffset.value},set(e){this.uniforms.dashOffset.value=e}},dashRatio:{enumerable:!0,get(){return this.uniforms.dashRatio.value},set(e){this.uniforms.dashRatio.value=e}},useDash:{enumerable:!0,get(){return this.uniforms.useDash.value},set(e){this.uniforms.useDash.value=e}},visibility:{enumerable:!0,get(){return this.uniforms.visibility.value},set(e){this.uniforms.visibility.value=e}},alphaTest:{enumerable:!0,get(){return this.uniforms.alphaTest.value},set(e){this.uniforms.alphaTest.value=e}},repeat:{enumerable:!0,get(){return this.uniforms.repeat.value},set(e){this.uniforms.repeat.value.copy(e)}}}),this.setValues(e)}copy(e){return super.copy(e),this.lineWidth=e.lineWidth,this.map=e.map,this.useMap=e.useMap,this.alphaMap=e.alphaMap,this.useAlphaMap=e.useAlphaMap,this.color.copy(e.color),this.opacity=e.opacity,this.resolution.copy(e.resolution),this.sizeAttenuation=e.sizeAttenuation,this.dashArray.copy(e.dashArray),this.dashOffset.copy(e.dashOffset),this.dashRatio.copy(e.dashRatio),this.useDash=e.useDash,this.visibility=e.visibility,this.alphaTest=e.alphaTest,this.repeat.copy(e.repeat),this}}const Xa={width:.2,length:1,decay:1,local:!1,stride:0,interval:1},Ya=(e,t=1)=>(e.set(e.subarray(t)),e.fill(-1/0,-t),e);function qa(e,t){const{length:r,local:n,decay:a,interval:l,stride:c}={...Xa,...t},u=i.useRef(),[h]=i.useState((()=>new s.Vector3));i.useLayoutEffect((()=>{e&&(u.current=Float32Array.from({length:10*r*3},((t,r)=>e.position.getComponent(r%3))))}),[r,e]);const d=i.useRef(new s.Vector3),p=i.useRef(0);return(0,o.useFrame)((()=>{if(e&&u.current){if(0===p.current){let t;n?t=e.position:(e.getWorldPosition(h),t=h);const r=1*a;for(let e=0;e{const{children:r}=e,{width:n,length:a,decay:l,local:c,stride:u,interval:h}={...Xa,...e},{color:d="hotpink",attenuation:p,target:f}=e,m=(0,o.useThree)((e=>e.size)),g=i.useRef(null),[v,A]=i.useState(null),y=qa(v,{length:a,decay:l,local:c,stride:u,interval:h});i.useEffect((()=>{const e=(null==f?void 0:f.current)||g.current.children.find((e=>e instanceof s.Object3D));e&&A(e)}),[y,f]);const x=i.useMemo((()=>new ja),[]),b=i.useMemo((()=>{var e;const t=new Wa({lineWidth:.1*n,color:d,sizeAttenuation:1,resolution:new s.Vector2(m.width,m.height)});let i;if(r)if(Array.isArray(r))i=r.find((e=>{const t=e;return"string"==typeof t.type&&"meshLineMaterial"===t.type}));else{const e=r;"string"==typeof e.type&&"meshLineMaterial"===e.type&&(i=e)}return"object"==typeof(null==(e=i)?void 0:e.props)&&t.setValues(i.props),t}),[n,d,m,r]);return i.useEffect((()=>{b.uniforms.resolution.value.set(m.width,m.height)}),[m]),(0,o.useFrame)((()=>{y.current&&x.setPoints(y.current,p)})),i.createElement("group",null,i.createElement("mesh",{ref:t,geometry:x,material:b}),i.createElement("group",{ref:g},r))}));var Ja=r(96272);const Za=({children:e,weight:t,transform:r,instances:a,mesh:o,...l})=>{const c=i.useRef(null),u=i.useRef(null),h=i.useRef(null);return i.useEffect((()=>{var e,t;u.current=null!==(e=null==a?void 0:a.current)&&void 0!==e?e:c.current.children.find((e=>e.hasOwnProperty("instanceMatrix"))),h.current=null!==(t=null==o?void 0:o.current)&&void 0!==t?t:c.current.children.find((e=>"Mesh"===e.type))}),[e,null==o?void 0:o.current,null==a?void 0:a.current]),i.useEffect((()=>{if(void 0===h.current)return;if(void 0===u.current)return;const e=new Ja.a(h.current);t&&e.setWeightAttribute(t),e.build();const n=new s.Vector3,i=new s.Vector3,a=new s.Color,o=new s.Object3D;h.current.updateMatrixWorld(!0);for(let t=0;t{const[a]=i.useState((()=>new s.BufferAttribute(new Float32Array(0),1))),o=i.useRef(null);return i.useLayoutEffect((()=>{if(o.current){var t;const r=null!==(t=o.current.parent)&&void 0!==t?t:o.current.__r3f.parent,n=e(r);o.current.copy(n)}}),[e]),i.createElement("primitive",n({ref:o,object:a,attachObject:["attributes",t]},r))};var es=r(25030),ts=r.n(es);function rs(e,{keys:t=["near","far","color","distance","decay","penumbra","angle","intensity","skeleton","visible","castShadow","receiveShadow","morphTargetDictionary","morphTargetInfluences","name","geometry","material","position","rotation","scale","up","userData"],deep:r,inject:n,castShadow:a,receiveShadow:s}){let o=ts()(e,t);return r&&(o.geometry&&"materialsOnly"!==r&&(o.geometry=o.geometry.clone()),o.material&&"geometriesOnly"!==r&&(o.material=o.material.clone())),n&&(o="function"==typeof n?{...o,children:n(e)}:i.isValidElement(n)?{...o,children:n}:{...o,...n}),"Mesh"===e.type&&(a&&(o.castShadow=!0),s&&(o.receiveShadow=!0)),o}const ns=i.forwardRef((({object:e,children:t,deep:r,castShadow:a,receiveShadow:s,inject:o,keys:l,...c},u)=>{const h={keys:l,deep:r,inject:o,castShadow:a,receiveShadow:s};if(Array.isArray(e))return i.createElement("group",n({},c,{ref:u}),e.map((e=>i.createElement(ns,n({key:e.uuid,object:e},h)))),t);const{children:d,...p}=rs(e,h),f=e.type[0].toLowerCase()+e.type.slice(1);return i.createElement(f,n({},p,c,{ref:u}),(null==e?void 0:e.children).map((e=>{let t={},r=e.type[0].toLowerCase()+e.type.slice(1);return"group"===r||"object3D"===r?(r=ns,t={object:e,...h}):t=rs(e,h),i.createElement(r,n({key:e.uuid},t))})),t,d)})),is=i.forwardRef((({makeDefault:e,...t},r)=>{const a=(0,o.useThree)((({set:e})=>e)),s=(0,o.useThree)((({camera:e})=>e)),l=(0,o.useThree)((({size:e})=>e)),c=i.useRef();return i.useLayoutEffect((()=>{c.current&&!t.manual&&c.current.updateProjectionMatrix()}),[l,t]),i.useLayoutEffect((()=>{if(e&&c.current){const e=s;return a((()=>({camera:c.current}))),()=>a((()=>({camera:e})))}}),[s,c,e,a]),i.createElement("orthographicCamera",n({left:l.width/-2,right:l.width/2,top:l.height/2,bottom:l.height/-2,ref:M([c,r])},t))})),as=i.forwardRef((({makeDefault:e,...t},r)=>{const a=(0,o.useThree)((({set:e})=>e)),s=(0,o.useThree)((({camera:e})=>e)),l=(0,o.useThree)((({size:e})=>e)),c=i.useRef();return i.useLayoutEffect((()=>{const{current:e}=c;e&&!t.manual&&(e.aspect=l.width/l.height,e.updateProjectionMatrix())}),[l,t]),i.useLayoutEffect((()=>{if(e&&c.current){const e=s;return a((()=>({camera:c.current}))),()=>a((()=>({camera:e})))}}),[s,c,e,a]),i.createElement("perspectiveCamera",n({ref:M([c,r])},t))}));function ss({children:e,fog:t,frames:r=1/0,resolution:n=256,near:a=1,far:l=1e3,...c}){const u=i.useRef(),[h,d]=i.useState(),p=(0,o.useThree)((({scene:e})=>e)),f=(0,o.useThree)((({gl:e})=>e)),m=i.useMemo((()=>{const e=new s.WebGLCubeRenderTarget(n);return e.texture.type=s.HalfFloatType,e}),[n]);let g=0;return(0,o.useFrame)((()=>{if(h&&u.current&&(r===1/0||ge.visible=!1));const e=p.fog;p.fog=t||e,h.update(f,p),p.fog=e,u.current.traverse((e=>e.visible=!0)),g++}})),i.createElement("group",c,i.createElement("cubeCamera",{ref:d,args:[a,l,m]}),i.createElement("group",{ref:u},e(m.texture)))}var os=r(41743);const ls=i.forwardRef(((e,t)=>{const{camera:r,onChange:a,...s}=e,l=(0,o.useThree)((e=>e.camera)),c=(0,o.useThree)((e=>e.invalidate)),u=r||l,[h]=i.useState((()=>new os.W(u)));return i.useEffect((()=>{const e=e=>{c(),a&&a(e)};return null==h||null==h.addEventListener||h.addEventListener("change",e),()=>null==h||null==h.removeEventListener?void 0:h.removeEventListener("change",e)}),[a,h,c]),(0,o.useFrame)((()=>null==h?void 0:h.update())),i.useEffect((()=>{const e=h;return null==e||e.connect(),()=>null==e?void 0:e.dispose()}),[h]),h?i.createElement("primitive",n({ref:t,dispose:void 0,object:h},s)):null}));var cs=r(32044);const us=i.forwardRef(((e,t)=>{const{onChange:r,...a}=e,s=(0,o.useThree)((({invalidate:e})=>e)),l=(0,o.useThree)((({camera:e})=>e)),c=(0,o.useThree)((({gl:e})=>e)),[u]=i.useState((()=>new cs.m(l,c.domElement)));return i.useEffect((()=>{const e=e=>{s(),r&&r(e)};return null==u||null==u.addEventListener||u.addEventListener("change",e),()=>null==u||null==u.removeEventListener?void 0:u.removeEventListener("change",e)}),[r,u,s]),(0,o.useFrame)(((e,t)=>null==u?void 0:u.update(t))),u?i.createElement("primitive",n({ref:t,dispose:void 0,object:u},a)):null}));var hs=r(22227);const ds=i.forwardRef(((e={enableDamping:!0},t)=>{const{camera:r,onChange:a,onStart:s,onEnd:l,...c}=e,u=(0,o.useThree)((({invalidate:e})=>e)),h=(0,o.useThree)((({camera:e})=>e)),d=(0,o.useThree)((({gl:e})=>e.domElement)),p=r||h,f=i.useMemo((()=>new hs.o(p)),[p]);return i.useEffect((()=>{f.connect(d);const e=e=>{u(),a&&a(e)};return f.addEventListener("change",e),s&&f.addEventListener("start",s),l&&f.addEventListener("end",l),()=>{f.dispose(),f.removeEventListener("change",e),s&&f.removeEventListener("start",s),l&&f.removeEventListener("end",l)}}),[a,s,l,f,u,d]),(0,o.useFrame)((()=>f.update())),i.createElement("primitive",n({ref:t,dispose:void 0,object:f,enableDamping:!0},c))})),ps=i.forwardRef((({makeDefault:e,camera:t,regress:r,domElement:a,enableDamping:s=!0,onChange:l,onStart:c,onEnd:u,...h},d)=>{const p=(0,o.useThree)((e=>e.invalidate)),f=(0,o.useThree)((e=>e.camera)),m=(0,o.useThree)((e=>e.gl)),g=(0,o.useThree)((e=>e.events)),v=(0,o.useThree)((e=>e.set)),A=(0,o.useThree)((e=>e.get)),y=(0,o.useThree)((e=>e.performance)),x=t||f,b=a||g.connected||m.domElement,w=i.useMemo((()=>new hs.z(x)),[x]);return(0,o.useFrame)((()=>{w.enabled&&w.update()})),i.useEffect((()=>(w.connect(b),()=>{w.dispose()})),[b,r,w,p]),i.useEffect((()=>{const e=e=>{p(),r&&y.regress(),l&&l(e)};return w.addEventListener("change",e),c&&w.addEventListener("start",c),u&&w.addEventListener("end",u),()=>{c&&w.removeEventListener("start",c),u&&w.removeEventListener("end",u),w.removeEventListener("change",e)}}),[l,c,u]),i.useEffect((()=>{if(e){const e=A().controls;return v({controls:w}),()=>v({controls:e})}}),[e,w]),i.createElement("primitive",n({ref:d,object:w,enableDamping:s},h))}));var fs=r(7703);const ms=i.forwardRef((({makeDefault:e,camera:t,domElement:r,regress:a,onChange:s,onStart:l,onEnd:c,...u},h)=>{const{invalidate:d,camera:p,gl:f,events:m,set:g,get:v,performance:A,viewport:y}=(0,o.useThree)(),x=t||p,b=r||m.connected||f.domElement,w=i.useMemo((()=>new fs.$(x)),[x]);return(0,o.useFrame)((()=>{w.enabled&&w.update()})),i.useEffect((()=>(w.connect(b),()=>{w.dispose()})),[b,a,w,d]),i.useEffect((()=>{const e=e=>{d(),a&&A.regress(),s&&s(e)};return w.addEventListener("change",e),l&&w.addEventListener("start",l),c&&w.addEventListener("end",c),()=>{l&&w.removeEventListener("start",l),c&&w.removeEventListener("end",c),w.removeEventListener("change",e)}}),[s,l,c]),i.useEffect((()=>{w.handleResize()}),[y]),i.useEffect((()=>{if(e){const e=v().controls;return g({controls:w}),()=>g({controls:e})}}),[e,w]),i.createElement("primitive",n({ref:h,object:w},u))}));var gs=r(1687);const vs=(0,i.forwardRef)((({camera:e,makeDefault:t,regress:r,domElement:a,onChange:s,onStart:l,onEnd:c,...u},h)=>{const d=(0,o.useThree)((e=>e.invalidate)),p=(0,o.useThree)((e=>e.camera)),f=(0,o.useThree)((e=>e.gl)),m=(0,o.useThree)((e=>e.events)),g=(0,o.useThree)((e=>e.set)),v=(0,o.useThree)((e=>e.get)),A=(0,o.useThree)((e=>e.performance)),y=e||p,x=a||m.connected||f.domElement,b=(0,i.useMemo)((()=>new gs.A(y)),[y]);return(0,o.useFrame)((()=>{b.enabled&&b.update()})),(0,i.useEffect)((()=>(b.connect(x),()=>{b.dispose()})),[x,r,b,d]),(0,i.useEffect)((()=>{const e=e=>{d(),r&&A.regress(),s&&s(e)};return b.addEventListener("change",e),l&&b.addEventListener("start",l),c&&b.addEventListener("end",c),()=>{b.removeEventListener("change",e),l&&b.removeEventListener("start",l),c&&b.removeEventListener("end",c)}}),[s,l,c]),(0,i.useEffect)((()=>{if(t){const e=v().controls;return g({controls:b}),()=>g({controls:e})}}),[t,b]),i.createElement("primitive",n({ref:h,object:b},u))}));var As=r(76427),ys=r.n(As),xs=r(35041);const bs=i.forwardRef((({children:e,domElement:t,onChange:r,onMouseDown:a,onMouseUp:l,onObjectChange:c,object:u,...h},d)=>{const p=["enabled","axis","mode","translationSnap","rotationSnap","scaleSnap","space","size","showX","showY","showZ"],{camera:f,...m}=h,g=ts()(m,p),v=ys()(m,p),A=(0,o.useThree)((e=>e.controls)),y=(0,o.useThree)((e=>e.gl)),x=(0,o.useThree)((e=>e.events)),b=(0,o.useThree)((e=>e.camera)),w=(0,o.useThree)((e=>e.invalidate)),E=f||b,_=t||x.connected||y.domElement,S=i.useMemo((()=>new xs.Ys(E,_)),[E,_]),M=i.useRef();return i.useLayoutEffect((()=>(u?S.attach(u instanceof s.Object3D?u:u.current):M.current instanceof s.Object3D&&S.attach(M.current),()=>{S.detach()})),[u,e,S]),i.useEffect((()=>{if(A){const e=e=>A.enabled=!e.value;return S.addEventListener("dragging-changed",e),()=>S.removeEventListener("dragging-changed",e)}}),[S,A]),i.useEffect((()=>{const e=e=>{w(),r&&r(e)};return null==S||null==S.addEventListener||S.addEventListener("change",e),a&&(null==S||null==S.addEventListener||S.addEventListener("mouseDown",a)),l&&(null==S||null==S.addEventListener||S.addEventListener("mouseUp",l)),c&&(null==S||null==S.addEventListener||S.addEventListener("objectChange",c)),()=>{null==S||null==S.removeEventListener||S.removeEventListener("change",e),a&&(null==S||null==S.removeEventListener||S.removeEventListener("mouseDown",a)),l&&(null==S||null==S.removeEventListener||S.removeEventListener("mouseUp",l)),c&&(null==S||null==S.removeEventListener||S.removeEventListener("objectChange",c))}}),[r,a,l,c,S,w]),S?i.createElement(i.Fragment,null,i.createElement("primitive",n({ref:d,dispose:void 0,object:S},g)),i.createElement("group",n({ref:M},v),e)):null}));var ws=r(2679);const Es=i.forwardRef((({domElement:e,selector:t,onChange:r,onLock:a,onUnlock:s,enabled:l=!0,...c},u)=>{const{camera:h,...d}=c,p=(0,o.useThree)((({gl:e})=>e)),f=(0,o.useThree)((e=>e.camera)),m=(0,o.useThree)((e=>e.invalidate)),g=(0,o.useThree)((e=>e.raycaster)),v=(0,o.useThree)((e=>e.events)),A=h||f,y=e||v.connected||p.domElement,[x]=i.useState((()=>new ws.q(A)));return i.useEffect((()=>{if(l){x.connect(y);const e=g.computeOffsets;return g.computeOffsets=e=>({offsetX:e.target.width/2,offsetY:e.target.height/2}),()=>{x.disconnect(),g.computeOffsets=e}}}),[l,x]),i.useEffect((()=>{const e=e=>{m(),r&&r(e)};x.addEventListener("change",e),a&&x.addEventListener("lock",a),s&&x.addEventListener("unlock",s);const n=()=>x.lock(),i=t?Array.from(document.querySelectorAll(t)):[document];return i.forEach((e=>e&&e.addEventListener("click",n))),()=>{x.removeEventListener("change",e),a&&x.addEventListener("lock",a),s&&x.addEventListener("unlock",s),i.forEach((e=>e?e.removeEventListener("click",n):void 0))}}),[r,a,s,t]),i.createElement("primitive",n({ref:u,object:x},d))}));var _s=r(55259);const Ss=i.forwardRef(((e,t)=>{const r=(0,o.useThree)((e=>e.camera)),a=(0,o.useThree)((e=>e.gl)),[s]=i.useState((()=>new _s.o(r,a.domElement)));return(0,o.useFrame)(((e,t)=>{s.update(t)})),s?i.createElement("primitive",n({ref:t,object:s},e)):null}));function Ms(e,t){const r=(0,o.useThree)((e=>e.mouse)),[n]=i.useState((()=>{const n=new s.Raycaster;return t&&(0,o.applyProps)(n,t,{}),function(t,i){n.setFromCamera(r,e instanceof s.Camera?e:e.current);const a=this.constructor.prototype.raycast.bind(this);a&&a(n,i)}}));return n}const Ts=i.createContext({}),Cs=()=>i.useContext(Ts),Is=2*Math.PI,Bs=new s.Object3D,Rs=new s.Matrix4,[Ps,Fs]=[new s.Quaternion,new s.Quaternion],Ls=new s.Vector3,Ds=new s.Vector3,Us=({alignment:e="bottom-right",margin:t=[80,80],renderPriority:r=0,autoClear:n=!0,onUpdate:a,onTarget:l,children:c})=>{const u=(0,o.useThree)((({size:e})=>e)),h=(0,o.useThree)((({camera:e})=>e)),d=(0,o.useThree)((({controls:e})=>e)),p=(0,o.useThree)((({gl:e})=>e)),f=(0,o.useThree)((({scene:e})=>e)),m=(0,o.useThree)((({invalidate:e})=>e)),g=i.useRef(),v=i.useRef(),A=i.useRef(null),[y]=i.useState((()=>new s.Scene)),x=i.useRef(!1),b=i.useRef(0),w=i.useRef(new s.Vector3(0,0,0)),E=i.useCallback((e=>{x.current=!0,(d||l)&&(w.current=(null==d?void 0:d.target)||(null==l?void 0:l())),b.current=h.position.distanceTo(Ls),Ps.copy(h.quaternion),Ds.copy(e).multiplyScalar(b.current).add(Ls),Bs.lookAt(Ds),Fs.copy(Bs.quaternion),m()}),[d,h,l,m]);i.useEffect((()=>(f.background&&(g.current=f.background,f.background=null,y.background=g.current),()=>{g.current&&(f.background=g.current)})),[]),(0,o.useFrame)(((e,t)=>{if(A.current&&v.current){var r;if(x.current)if(Ps.angleTo(Fs)<.01)x.current=!1;else{const e=t*Is;Ps.rotateTowards(Fs,e),h.position.set(0,0,1).applyQuaternion(Ps).multiplyScalar(b.current).add(w.current),h.up.set(0,1,0).applyQuaternion(Ps).normalize(),h.quaternion.copy(Ps),a?a():d&&d.update(),m()}Rs.copy(h.matrix).invert(),null==(r=v.current)||r.quaternion.setFromRotationMatrix(Rs),n&&(p.autoClear=!1),p.clearDepth(),p.render(y,A.current)}}),r);const _=Ms(A),S=i.useMemo((()=>({tweenCamera:E,raycast:_})),[E]),[M,T]=t,C=e.endsWith("-left")?-u.width/2+M:u.width/2-M,I=e.startsWith("top-")?u.height/2-T:-u.height/2+T;return(0,o.createPortal)(i.createElement(Ts.Provider,{value:S},i.createElement(is,{ref:A,position:[0,0,200]}),i.createElement("group",{ref:v,position:[C,I,0]},c)),y)},Ns="#f0f0f0",ks="#999",Os="black",Gs="black",zs=["Right","Left","Top","Bottom","Front","Back"],Vs=e=>new s.Vector3(...e).multiplyScalar(.38),Hs=[[1,1,1],[1,1,-1],[1,-1,1],[1,-1,-1],[-1,1,1],[-1,1,-1],[-1,-1,1],[-1,-1,-1]].map(Vs),Qs=[.25,.25,.25],js=[[1,1,0],[1,0,1],[1,0,-1],[1,-1,0],[0,1,1],[0,1,-1],[0,-1,1],[0,-1,-1],[-1,1,0],[-1,0,1],[-1,0,-1],[-1,-1,0]].map(Vs),Ws=js.map((e=>e.toArray().map((e=>0==e?.5:.25)))),Xs=({hover:e,index:t,font:r="20px Inter var, Arial, sans-serif",faces:n=zs,color:a=Ns,hoverColor:l=ks,textColor:c=Os,strokeColor:u=Gs,opacity:h=1})=>{const d=(0,o.useThree)((e=>e.gl)),p=i.useMemo((()=>{const e=document.createElement("canvas");e.width=128,e.height=128;const i=e.getContext("2d");return i.fillStyle=a,i.fillRect(0,0,e.width,e.height),i.strokeStyle=u,i.strokeRect(0,0,e.width,e.height),i.font=r,i.textAlign="center",i.fillStyle=c,i.fillText(n[t].toUpperCase(),64,76),new s.CanvasTexture(e)}),[t,n,r,a,c,u]);return i.createElement("meshLambertMaterial",{map:p,"map-anisotropy":d.capabilities.getMaxAnisotropy()||1,attachArray:"material",color:e?l:"white",transparent:!0,opacity:h})},Ys=e=>{const{tweenCamera:t,raycast:r}=Cs(),[a,s]=i.useState(null);return i.createElement("mesh",{raycast:r,onPointerOut:e=>{e.stopPropagation(),s(null)},onPointerMove:e=>{e.stopPropagation(),s(Math.floor(e.faceIndex/2))},onClick:e.onClick||(e=>{e.stopPropagation(),t(e.face.normal)})},[...Array(6)].map(((t,r)=>i.createElement(Xs,n({key:r,index:r,hover:a===r},e)))),i.createElement("boxGeometry",null))},qs=({onClick:e,dimensions:t,position:r,hoverColor:n=ks})=>{const{tweenCamera:a,raycast:s}=Cs(),[o,l]=i.useState(!1);return i.createElement("mesh",{scale:1.01,position:r,raycast:s,onPointerOver:e=>{e.stopPropagation(),l(!0)},onPointerOut:e=>{e.stopPropagation(),l(!1)},onClick:e||(e=>{e.stopPropagation(),a(r)})},i.createElement("meshBasicMaterial",{color:o?n:"white",transparent:!0,opacity:.6,visible:o}),i.createElement("boxGeometry",{args:t}))},Ks=e=>i.createElement("group",{scale:[60,60,60]},i.createElement(Ys,e),js.map(((t,r)=>i.createElement(qs,n({key:r,position:t,dimensions:Ws[r]},e)))),Hs.map(((t,r)=>i.createElement(qs,n({key:r,position:t,dimensions:Qs},e)))),i.createElement("ambientLight",{intensity:.5}),i.createElement("pointLight",{position:[10,10,10],intensity:.5}));function Js({scale:e=[.8,.05,.05],color:t,rotation:r}){return i.createElement("group",{rotation:r},i.createElement("mesh",{position:[.4,0,0]},i.createElement("boxGeometry",{args:e}),i.createElement("meshBasicMaterial",{color:t,toneMapped:!1})))}function Zs({onClick:e,font:t,disabled:r,arcStyle:a,label:l,labelColor:c,axisHeadScale:u=1,...h}){const d=(0,o.useThree)((e=>e.gl)),p=i.useMemo((()=>{const e=document.createElement("canvas");e.width=64,e.height=64;const r=e.getContext("2d");return r.beginPath(),r.arc(32,32,16,0,2*Math.PI),r.closePath(),r.fillStyle=a,r.fill(),l&&(r.font=t,r.textAlign="center",r.fillStyle=c,r.fillText(l,32,41)),new s.CanvasTexture(e)}),[a,l,c,t]),[f,m]=i.useState(!1),g=(l?1:.75)*(f?1.2:1)*u;return i.createElement("sprite",n({scale:g,onPointerOver:r?void 0:e=>{e.stopPropagation(),m(!0)},onPointerOut:r?void 0:e||(e=>{e.stopPropagation(),m(!1)})},h),i.createElement("spriteMaterial",{map:p,"map-anisotropy":d.capabilities.getMaxAnisotropy()||1,alphaTest:.3,opacity:l?1:.75,toneMapped:!1}))}const $s=({hideNegativeAxes:e,hideAxisHeads:t,disabled:r,font:a="18px Inter var, Arial, sans-serif",axisColors:s=["#ff3653","#0adb50","#2c8fdf"],axisHeadScale:o=1,axisScale:l,labels:c=["X","Y","Z"],labelColor:u="#000",onClick:h,...d})=>{const[p,f,m]=s,{tweenCamera:g,raycast:v}=Cs(),A={font:a,disabled:r,labelColor:u,raycast:v,onClick:h,axisHeadScale:o,onPointerDown:r?void 0:e=>{g(e.object.position),e.stopPropagation()}};return i.createElement("group",n({scale:40},d),i.createElement(Js,{color:p,rotation:[0,0,0],scale:l}),i.createElement(Js,{color:f,rotation:[0,0,Math.PI/2],scale:l}),i.createElement(Js,{color:m,rotation:[0,-Math.PI/2,0],scale:l}),!t&&i.createElement(i.Fragment,null,i.createElement(Zs,n({arcStyle:p,position:[1,0,0],label:c[0]},A)),i.createElement(Zs,n({arcStyle:f,position:[0,1,0],label:c[1]},A)),i.createElement(Zs,n({arcStyle:m,position:[0,0,1],label:c[2]},A)),!e&&i.createElement(i.Fragment,null,i.createElement(Zs,n({arcStyle:p,position:[-1,0,0]},A)),i.createElement(Zs,n({arcStyle:f,position:[0,-1,0]},A)),i.createElement(Zs,n({arcStyle:m,position:[0,0,-1]},A)))),i.createElement("ambientLight",{intensity:.5}),i.createElement("pointLight",{position:[10,10,10],intensity:.5}))};function eo(e,{path:t}){const[r]=(0,o.useLoader)(s.CubeTextureLoader,[e],(e=>e.setPath(t)));return r}eo.preload=(e,{path:t})=>o.useLoader.preload(s.CubeTextureLoader,[e],(e=>e.setPath(t)));var to=r(43953);function ro(e){return(0,o.useLoader)(to.y,e)}ro.preload=e=>o.useLoader.preload(to.y,e),ro.clear=e=>o.useLoader.clear(to.y,e);var no=r(44976),io=r(46295),ao=r(17607);let so=null;function oo(e,t,r){return n=>{r&&r(n),e&&(so||(so=new no._),so.setDecoderPath("string"==typeof e?e:"https://www.gstatic.com/draco/versioned/decoders/1.4.3/"),n.setDRACOLoader(so)),t&&n.setMeshoptDecoder("function"==typeof io.z?(0,io.z)():io.z)}}function lo(e,t=!0,r=!0,n){return(0,o.useLoader)(ao.E,e,oo(t,r,n))}lo.preload=(e,t=!0,r=!0,n)=>o.useLoader.preload(ao.E,e,oo(t,r,n)),lo.clear=e=>o.useLoader.clear(ao.E,e);var co=r(28934);const uo="https://cdn.jsdelivr.net/gh/pmndrs/drei-assets@master";function ho(e,t=`${uo}/basis/`){const r=(0,o.useThree)((e=>e.gl)),n=(0,o.useLoader)(co.a,ka(e)?Object.values(e):e,(e=>{e.detectSupport(r),e.setTranscoderPath(t)}));if((0,i.useEffect)((()=>{(Array.isArray(n)?n:[n]).forEach(r.initTexture)}),[r,n]),ka(e)){const t=Object.keys(e),r={};return t.forEach((e=>Object.assign(r,{[e]:n[t.indexOf(e)]}))),r}return n}ho.preload=(e,t=`${uo}/basis/`)=>o.useLoader.preload(co.a,e,(e=>{e.setTranscoderPath(t)})),ho.clear=e=>o.useLoader.clear(co.a,e);var po=r(73466),fo=r.n(po);function mo(e,t){"function"==typeof e?e(t):null!=e&&(e.current=t)}function go({showPanel:e=0,className:t,parent:r}){const n=function(e,t=[],r){const[n,a]=i.useState();return i.useLayoutEffect((()=>{const t=e();return a(t),mo(r,t),()=>mo(r,null)}),t),n}((()=>new(fo())),[]);return i.useEffect((()=>{if(n){const i=r&&r.current||document.body;n.showPanel(e),null==i||i.appendChild(n.dom),t&&n.dom.classList.add(...t.split(" ").filter((e=>e)));const a=(0,o.addEffect)((()=>n.begin())),s=(0,o.addAfterEffect)((()=>n.end()));return()=>{null==i||i.removeChild(n.dom),a(),s()}}}),[r,n,t,e]),null}function vo(e,t,r){const{gl:n,size:a}=(0,o.useThree)(),l=i.useMemo((()=>n.getPixelRatio()),[n]),c="number"==typeof e?e:a.width*l,u="number"==typeof t?t:a.height*l,h=("number"==typeof e?r:e)||{},{samples:d,...p}=h,f=i.useMemo((()=>{let e;return e=new s.WebGLRenderTarget(c,u,{minFilter:s.LinearFilter,magFilter:s.LinearFilter,encoding:n.outputEncoding,type:s.HalfFloatType,...p}),e.samples=d,e}),[]);return i.useLayoutEffect((()=>{f.setSize(c,u),d&&(f.samples=d)}),[d,f,c,u]),i.useEffect((()=>()=>f.dispose()),[]),f}function Ao({size:e=256,frames:t=1/0}={}){const r=(0,o.useThree)((e=>e.viewport.dpr)),{width:n,height:a}=(0,o.useThree)((e=>e.size)),l=e||n*r,c=e||a*r,u=i.useMemo((()=>{const e=new s.DepthTexture(l,c);return e.format=s.DepthFormat,e.type=s.UnsignedShortType,{depthTexture:e}}),[l,c]);let h=0;const d=vo(l,c,u);return(0,o.useFrame)((e=>{(t===1/0||he.viewport)),i=t*(n.aspect>e/t?n.width/e:n.height/t);return[e*(n.aspect>e/t?n.width/e:n.height/t)*r,i*r,1]}function xo(e,t,r,n){return new(r||(r=Promise))((function(i,a){function s(e){try{l(n.next(e))}catch(e){a(e)}}function o(e){try{l(n.throw(e))}catch(e){a(e)}}function l(e){var t;e.done?i(e.value):(t=e.value,t instanceof r?t:new r((function(e){e(t)}))).then(s,o)}l((n=n.apply(e,t||[])).next())}))}const bo=["geforce 320m","geforce 8600","geforce 8600m gt","geforce 8800 gs","geforce 8800 gt","geforce 9400","geforce 9400m g","geforce 9400m","geforce 9600m gt","geforce 9600m","geforce fx go5200","geforce gt 120","geforce gt 130","geforce gt 330m","geforce gtx 285","google swiftshader","intel g41","intel g45","intel gma 4500mhd","intel gma x3100","intel hd 3000","intel q45","legacy","mali-2","mali-3","mali-4","quadro fx 1500","quadro fx 4","quadro fx 5","radeon hd 2400","radeon hd 2600","radeon hd 4670","radeon hd 4850","radeon hd 4870","radeon hd 5670","radeon hd 5750","radeon hd 6290","radeon hd 6300","radeon hd 6310","radeon hd 6320","radeon hd 6490m","radeon hd 6630m","radeon hd 6750m","radeon hd 6770m","radeon hd 6970m","sgx 543","sgx543"];function wo(e){return e.toLowerCase().replace(/^angle ?\((.+)\)*$/,"$1").replace(/\s(\d{1,2}gb|direct3d.+$)|\(r\)| \([^)]+\)$/g,"").replace(/(?:vulkan|opengl) \d+\.\d+(?:\.\d+)?(?: \((.*)\))?/,"$1")}const Eo="undefined"==typeof window,_o=(()=>{if(Eo)return;const{userAgent:e,platform:t,maxTouchPoints:r}=window.navigator,n=/(iphone|ipod|ipad)/i.test(e),i="iPad"===t||"MacIntel"===t&&r>0&&!window.MSStream;return{isIpad:i,isMobile:/android/i.test(e)||n||i,isSafari12:/Version\/12.+Safari/.test(e)}})();const So=[],Mo=[];function To(e,t){if(e===t)return 0;const r=e;e.length>t.length&&(e=t,t=r);let n=e.length,i=t.length;for(;n>0&&e.charCodeAt(~-n)===t.charCodeAt(~-i);)n--,i--;let a,s=0;for(;sc?l>c?c+1:l:l>o?o+1:l;return c}function Co(e){return null!=e}class Io extends Error{constructor(e){super(e),Object.setPrototypeOf(this,new.target.prototype)}}const Bo=({mobileTiers:e=[0,15,30,60],desktopTiers:t=[0,15,30,60],override:r={},glContext:n,failIfMajorPerformanceCaveat:i=!1,benchmarksURL:a="https://unpkg.com/detect-gpu@4.0.50/dist/benchmarks"}={})=>xo(void 0,void 0,void 0,(function*(){const s={};if(Eo)return{tier:0,type:"SSR"};const{isIpad:o=!!(null==_o?void 0:_o.isIpad),isMobile:l=!!(null==_o?void 0:_o.isMobile),screenSize:c=window.screen,loadBenchmarks:u=(e=>xo(void 0,void 0,void 0,(function*(){const t=yield fetch(`${a}/${e}`).then((e=>e.json()));if(parseInt(t.shift().split(".")[0],10)<4)throw new Io("Detect GPU benchmark data is out of date. Please update to version 4x");return t})))}=r;let{renderer:h}=r;const d=(e,t,r,n,i)=>({device:i,fps:n,gpu:r,isMobile:l,tier:e,type:t});let p,f="";if(h)h=wo(h),p=[h];else{const e=n||function(e,t=!1){const r={alpha:!1,antialias:!1,depth:!1,failIfMajorPerformanceCaveat:t,powerPreference:"high-performance",stencil:!1};e&&delete r.powerPreference;const n=window.document.createElement("canvas"),i=n.getContext("webgl",r)||n.getContext("experimental-webgl",r);return null!=i?i:void 0}(null==_o?void 0:_o.isSafari12,i);if(!e)return d(0,"WEBGL_UNSUPPORTED");const t=e.getExtension("WEBGL_debug_renderer_info");if(t&&(h=e.getParameter(t.UNMASKED_RENDERER_WEBGL)),!h)return d(1,"FALLBACK");f=h,h=wo(h),p=function(e,t,r){return"apple gpu"===t?function(e,t,r){if(!r)return[t];const n=function(e){const t=e.createShader(35633),r=e.createShader(35632),n=e.createProgram();if(!(r&&t&&n))return;e.shaderSource(t,"\n precision highp float;\n attribute vec3 aPosition;\n varying float vvv;\n void main() {\n vvv = 0.31622776601683794;\n gl_Position = vec4(aPosition, 1.0);\n }\n "),e.shaderSource(r,"\n precision highp float;\n varying float vvv;\n void main() {\n vec4 enc = vec4(1.0, 255.0, 65025.0, 16581375.0) * vvv;\n enc = fract(enc);\n enc -= enc.yzww * vec4(1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0, 0.0);\n gl_FragColor = enc;\n }\n "),e.compileShader(t),e.compileShader(r),e.attachShader(n,t),e.attachShader(n,r),e.linkProgram(n),e.detachShader(n,t),e.detachShader(n,r),e.deleteShader(t),e.deleteShader(r),e.useProgram(n);const i=e.createBuffer();e.bindBuffer(34962,i),e.bufferData(34962,new Float32Array([-1,-1,0,3,-1,0,-1,3,0]),35044);const a=e.getAttribLocation(n,"aPosition");e.vertexAttribPointer(a,3,5126,!1,0,0),e.enableVertexAttribArray(a),e.clearColor(1,1,1,1),e.clear(16384),e.viewport(0,0,1,1),e.drawArrays(4,0,3);const s=new Uint8Array(4);return e.readPixels(0,0,1,1,6408,5121,s),e.deleteProgram(n),e.deleteBuffer(i),s.join("")}(e),i="801621810",a="8016218135",s="80162181161",o=(null==_o?void 0:_o.isIpad)?[["a7",s,12],["a8",a,15],["a8x",a,15],["a9",a,15],["a9x",a,15],["a10",a,15],["a10x",a,15],["a12",i,15],["a12x",i,15],["a12z",i,15],["a14",i,15],["m1",i,15]]:[["a7",s,12],["a8",a,12],["a9",a,15],["a10",a,15],["a11",i,15],["a12",i,15],["a13",i,15],["a14",i,15]];let l;return"80162181255"===n?l=o.filter((([,,e])=>e>=14)):(l=o.filter((([,e])=>e===n)),l.length||(l=o)),l.map((([e])=>`apple ${e} gpu`))}(e,t,r):[t]}(e,h,l)}const m=(yield Promise.all(p.map((function(e){var t;return xo(this,void 0,void 0,(function*(){const r=(e=>{const t=l?["adreno","apple","mali-t","mali","nvidia","powervr"]:["intel","apple","amd","radeon","nvidia","geforce"];for(const r of t)if(e.includes(r))return r})(e);if(!r)return;const n=`${l?"m":"d"}-${r}${o?"-ipad":""}.json`,i=s[n]=null!==(t=s[n])&&void 0!==t?t:u(n);let a;try{a=yield i}catch(r){if(r instanceof Io)throw r;return}const h=function(e){var t;const r=(e=e.replace(/\([^)]+\)/,"")).match(/\d+/)||e.match(/(\W|^)([A-Za-z]{1,3})(\W|$)/g);return null!==(t=null==r?void 0:r.join("").replace(/\W|amd/g,""))&&void 0!==t?t:""}(e);let d=a.filter((([,e])=>e===h));d.length||(d=a.filter((([t])=>t.includes(e))));const p=d.length;if(0===p)return;let f,[m,,,g]=p>1?d.map((t=>[t,To(e,t[0])])).sort((([,e],[,t])=>e-t))[0][0]:d[0],v=Number.MAX_VALUE;const{devicePixelRatio:A}=window,y=c.width*A*c.height*A;for(const e of g){const[t,r]=e,n=t*r,i=Math.abs(y-n);ie===r?t-n:e-r));if(!m.length){const e=bo.find((e=>h.includes(e)));return e?d(0,"BLOCKLISTED",e):d(1,"FALLBACK",`${h} (${f})`)}const[,g,v,A]=m[0];if(-1===g)return d(0,"BLOCKLISTED",v,g,A);const y=l?e:t;let x=0;for(let e=0;e=y[e]&&(x=e);return d(x,"BENCHMARK",v,g,A)})),Ro=e=>Ca((()=>Bo(e)),["useDetectGPU"]);function Po(e,t,...r){const n=i.useRef(),a=(0,o.useThree)((e=>e.scene));return i.useEffect((()=>(e&&t&&null!=e&&e.current&&(n.current=new t(e.current,...r),n.current&&a.add(n.current)),!e&&n.current&&a.remove(n.current),()=>{n.current&&a.remove(n.current)})),[a,t,e,r]),(0,o.useFrame)((()=>{var e;null!=(e=n.current)&&e.update&&n.current.update()})),n}const Fo=new s.Vector3,Lo=new s.Vector3,Do=new s.Vector3,Uo=new s.Vector2,No=new s.Vector2,ko=new s.Vector2,Oo=new s.Vector3;function Go(e,t,r,n,i,a,o){Fo.fromBufferAttribute(t,n),Lo.fromBufferAttribute(t,i),Do.fromBufferAttribute(t,a);const l=function(e,t,r,n,i,a){let o;return o=a===s.BackSide?e.intersectTriangle(n,r,t,!0,i):e.intersectTriangle(t,r,n,a!==s.DoubleSide,i),null===o?null:{distance:e.origin.distanceTo(i),point:i.clone()}}(e,Fo,Lo,Do,Oo,o);if(l){r&&(Uo.fromBufferAttribute(r,n),No.fromBufferAttribute(r,i),ko.fromBufferAttribute(r,a),l.uv=s.Triangle.getUV(Oo,Fo,Lo,Do,Uo,No,ko,new s.Vector2));const e={a:n,b:i,c:a,normal:new s.Vector3,materialIndex:0};s.Triangle.getNormal(Fo,Lo,Do,e.normal),l.face=e,l.faceIndex=n}return l}function zo(e,t,r,n,i){const a=3*n,s=e.index.getX(a),o=e.index.getX(a+1),l=e.index.getX(a+2),c=Go(r,e.attributes.position,e.attributes.uv,s,o,l,t);return c?(c.faceIndex=n,i&&i.push(c),c):null}function Vo(e,t,r){return null===e?null:(e.point.applyMatrix4(t.matrixWorld),e.distance=e.point.distanceTo(r.ray.origin),e.object=t,e.distancer.far?null:e)}const Ho=1.25,Qo=65535,jo=Math.pow(2,-24);class Wo{constructor(){}}function Xo(e,t,r){return r.min.x=t[e],r.min.y=t[e+1],r.min.z=t[e+2],r.max.x=t[e+3],r.max.y=t[e+4],r.max.z=t[e+5],r}function Yo(e){let t=-1,r=-1/0;for(let n=0;n<3;n++){const i=e[n+3]-e[n];i>r&&(r=i,t=n)}return t}function qo(e,t){t.set(e)}function Ko(e,t,r){let n,i;for(let a=0;a<3;a++){const s=a+3;n=e[a],i=t[a],r[a]=ni?n:i}}function Jo(e,t,r){for(let n=0;n<3;n++){const i=t[e+2*n],a=t[e+2*n+1],s=i-a,o=i+a;sr[n+3]&&(r[n+3]=o)}}function Zo(e){const t=e[3]-e[0],r=e[4]-e[1],n=e[5]-e[2];return 2*(t*r+r*n+n*t)}function $o(e,t,r,n,i=null){let a=1/0,s=1/0,o=1/0,l=-1/0,c=-1/0,u=-1/0,h=1/0,d=1/0,p=1/0,f=-1/0,m=-1/0,g=-1/0;const v=null!==i;for(let A=6*t,y=6*(t+r);Al&&(l=i),v&&tf&&(f=t);const y=e[A+2],x=e[A+3],b=y-x,w=y+x;bc&&(c=w),v&&ym&&(m=y);const E=e[A+4],_=e[A+5],S=E-_,M=E+_;Su&&(u=M),v&&Eg&&(g=E)}n[0]=a,n[1]=s,n[2]=o,n[3]=l,n[4]=c,n[5]=u,v&&(i[0]=h,i[1]=d,i[2]=p,i[3]=f,i[4]=m,i[5]=g)}const el=32,tl=(e,t)=>e.candidate-t.candidate,rl=new Array(el).fill().map((()=>({count:0,bounds:new Float32Array(6),rightCacheBounds:new Float32Array(6),leftCacheBounds:new Float32Array(6),candidate:0}))),nl=new Float32Array(6);function il(e,t){function r(e){p&&p(e/f)}function n(t,i,s,p=null,f=0){if(!m&&f>=c&&(m=!0,u&&(console.warn(`MeshBVH: Max depth of ${c} reached when generating BVH. Consider increasing maxDepth.`),console.warn(e))),s<=h||f>=c)return r(i+s),t.offset=i,t.count=s,t;const g=function(e,t,r,n,i,a){let s=-1,o=0;if(0===a)s=Yo(t),-1!==s&&(o=(t[s]+t[s+3])/2);else if(1===a)s=Yo(e),-1!==s&&(o=function(e,t,r,n){let i=0;for(let a=t,s=t+r;a=a.candidate?Jo(i,r,a.rightCacheBounds):(Jo(i,r,a.leftCacheBounds),a.count++)}}for(let r=0;r=el&&(t=31);const a=rl[t];a.count++,Jo(i,r,a.bounds)}const t=rl[31];qo(t.bounds,t.rightCacheBounds);for(let e=30;e>=0;e--){const t=rl[e],r=rl[e+1];Ko(t.bounds,r.rightCacheBounds,t.rightCacheBounds)}let d=0;for(let r=0;r<31;r++){const t=rl[r],n=t.count,c=t.bounds,u=rl[r+1].rightCacheBounds;0!==n&&(0===d?qo(c,nl):Ko(c,nl,nl)),d+=n;let h=0,p=0;0!==d&&(h=Zo(nl)/a);const f=i-d;0!==f&&(p=Zo(u)/a);const m=1+Ho*(h*d+p*f);m=o;)s--;if(!(a65535?new Uint32Array(new n(4*r)):new Uint16Array(new n(2*r)),e.setIndex(new s.BufferAttribute(i,1));for(let e=0;em&&(m=l),c>m&&(m=c);const g=(m-h)/2,v=2*n;a[i+v+0]=h+g,a[i+v+1]=g+(Math.abs(h)+g)*jo,ht[n+3]&&(t[n+3]=m)}}return a}(e,i),l=e.index.array,c=t.maxDepth,u=t.verbose,h=t.maxLeafTris,d=t.strategy,p=t.onProgress,f=e.index.count/3;let m=!1;const g=[],v=function(e){if(!e.groups||!e.groups.length)return[{offset:0,count:e.index.count/3}];const t=[],r=new Set;for(const i of e.groups)r.add(i.start),r.add(i.start+i.count);const n=Array.from(r.values()).sort(((e,t)=>e-t));for(let i=0;io&&(o=t);const r=e[u+2];rl&&(l=r);const n=e[u+4];nc&&(c=n)}n[0]=i,n[1]=a,n[2]=s,n[3]=o,n[4]=l,n[5]=c}(o,e.offset,e.count,a),n(t,e.offset,e.count,a),g.push(t)}else for(let s of v){const e=new Wo;e.boundingData=new Float32Array(6),$o(o,s.offset,s.count,e.boundingData,a),n(e,s.offset,s.count,a),g.push(e)}return g}class al{constructor(){this.min=1/0,this.max=-1/0}setFromPointsField(e,t){let r=1/0,n=-1/0;for(let i=0,a=e.length;in?a:n}this.min=r,this.max=n}setFromPoints(e,t){let r=1/0,n=-1/0;for(let i=0,a=t.length;in?s:n}this.min=r,this.max=n}isSeparated(e){return this.min>e.max||e.min>this.max}}al.prototype.setFromBox=function(){const e=new s.Vector3;return function(t,r){const n=r.min,i=r.max;let a=1/0,s=-1/0;for(let o=0;o<=1;o++)for(let r=0;r<=1;r++)for(let l=0;l<=1;l++){e.x=n.x*o+i.x*(1-o),e.y=n.y*r+i.y*(1-r),e.z=n.z*l+i.z*(1-l);const c=t.dot(e);a=Math.min(c,a),s=Math.max(c,s)}this.min=a,this.max=s}}();!function(){const e=new al}();const sl=function(){const e=new s.Vector3,t=new s.Vector3,r=new s.Vector3;return function(n,i,a){const s=n.start,o=e,l=i.start,c=t;r.subVectors(s,l),e.subVectors(n.end,n.start),t.subVectors(i.end,i.start);const u=r.dot(c),h=c.dot(o),d=c.dot(c),p=r.dot(o),f=o.dot(o)*d-h*h;let m,g;m=0!==f?(u*h-p*d)/f:0,g=(u+m*h)/d,a.x=m,a.y=g}}(),ol=function(){const e=new s.Vector2,t=new s.Vector3,r=new s.Vector3;return function(n,i,a,s){sl(n,i,e);let o=e.x,l=e.y;if(o>=0&&o<=1&&l>=0&&l<=1)return n.at(o,a),void i.at(l,s);if(o>=0&&o<=1)return l<0?i.at(0,s):i.at(1,s),void n.closestPointToPoint(s,!0,a);if(l>=0&&l<=1)return o<0?n.at(0,a):n.at(1,a),void i.closestPointToPoint(a,!0,s);{let e,c;e=o<0?n.start:n.end,c=l<0?i.start:i.end;const u=t,h=r;return n.closestPointToPoint(c,!0,t),i.closestPointToPoint(e,!0,r),u.distanceToSquared(c)<=h.distanceToSquared(e)?(a.copy(u),void s.copy(c)):(a.copy(e),void s.copy(h))}}}(),ll=function(){const e=new s.Vector3,t=new s.Vector3,r=new s.Plane,n=new s.Line3;return function(i,a){const{radius:s,center:o}=i,{a:l,b:c,c:u}=a;n.start=l,n.end=c;if(n.closestPointToPoint(o,!0,e).distanceTo(o)<=s)return!0;n.start=l,n.end=u;if(n.closestPointToPoint(o,!0,e).distanceTo(o)<=s)return!0;n.start=c,n.end=u;if(n.closestPointToPoint(o,!0,e).distanceTo(o)<=s)return!0;const h=a.getPlane(r);if(Math.abs(h.distanceToPoint(o))<=s){const e=h.projectPoint(o,t);if(a.containsPoint(e))return!0}return!1}}();function cl(e){return Math.abs(e)<1e-15}class ul extends s.Triangle{constructor(...e){super(...e),this.isExtendedTriangle=!0,this.satAxes=new Array(4).fill().map((()=>new s.Vector3)),this.satBounds=new Array(4).fill().map((()=>new al)),this.points=[this.a,this.b,this.c],this.sphere=new s.Sphere,this.plane=new s.Plane,this.needsUpdate=!0}intersectsSphere(e){return ll(e,this)}update(){const e=this.a,t=this.b,r=this.c,n=this.points,i=this.satAxes,a=this.satBounds,s=i[0],o=a[0];this.getNormal(s),o.setFromPoints(s,n);const l=i[1],c=a[1];l.subVectors(e,t),c.setFromPoints(l,n);const u=i[2],h=a[2];u.subVectors(t,r),h.setFromPoints(u,n);const d=i[3],p=a[3];d.subVectors(r,e),p.setFromPoints(d,n),this.sphere.setFromPoints(this.points),this.plane.setFromNormalAndCoplanarPoint(s,e),this.needsUpdate=!1}}ul.prototype.closestPointToSegment=function(){const e=new s.Vector3,t=new s.Vector3,r=new s.Line3;return function(n,i=null,a=null){const{start:s,end:o}=n,l=this.points;let c,u=1/0;for(let h=0;h<3;h++){const s=(h+1)%3;r.start.copy(l[h]),r.end.copy(l[s]),ol(r,n,e,t),c=e.distanceToSquared(t),c1-1e-10){const e=this.satBounds,o=this.satAxes;r[0]=s.a,r[1]=s.b,r[2]=s.c;for(let t=0;t<4;t++){const i=e[t],a=o[t];if(n.setFromPoints(a,r),i.isSeparated(n))return!1}const l=s.satBounds,c=s.satAxes;t[0]=this.a,t[1]=this.b,t[2]=this.c;for(let r=0;r<4;r++){const e=l[r],i=c[r];if(n.setFromPoints(i,t),e.isSeparated(n))return!1}for(let s=0;s<4;s++){const e=o[s];for(let s=0;s<4;s++){const o=c[s];if(a.crossVectors(e,o),n.setFromPoints(a,t),i.setFromPoints(a,r),n.isSeparated(i))return!1}}return p&&(f||console.warn("ExtendedTriangle.intersectsTriangle: Triangles are coplanar which does not support an output edge. Setting edge to 0, 0, 0."),p.start.set(0,0,0),p.end.set(0,0,0)),!0}{const e=this.points;let t=!1,r=0;for(let s=0;s<3;s++){const n=e[s],i=e[(s+1)%3];u.start.copy(n),u.end.copy(i),u.delta(o);const a=t?h.start:h.end,l=cl(g.distanceToPoint(n));if(cl(g.normal.dot(o))&&l){h.copy(u),r=2;break}if((g.intersectLine(u,a)||l)&&!cl(a.distanceTo(i))){if(r++,t)break;t=!0}}if(1===r&&s.containsPoint(h.end))return p&&(p.start.copy(h.end),p.end.copy(h.end)),!0;if(2!==r)return!1;const n=s.points;let i=!1,a=0;for(let s=0;s<3;s++){const e=n[s],t=n[(s+1)%3];u.start.copy(e),u.end.copy(t),u.delta(l);const r=i?d.start:d.end,o=cl(m.distanceToPoint(e));if(cl(m.normal.dot(l))&&o){d.copy(u),a=2;break}if((m.intersectLine(u,r)||o)&&!cl(r.distanceTo(t))){if(a++,i)break;i=!0}}if(1===a&&this.containsPoint(d.end))return p&&(p.start.copy(d.end),p.end.copy(d.end)),!0;if(2!==a)return!1;if(h.delta(o),d.delta(l),o.dot(l)<0){let e=d.start;d.start=d.end,d.end=e}const f=h.start.dot(o),v=h.end.dot(o),A=d.start.dot(o),y=d.end.dot(o),x=v0?p.start.copy(h.start):p.start.copy(d.start),c.subVectors(h.end,d.end),c.dot(o)<0?p.end.copy(h.end):p.end.copy(d.end)),!0)}}}(),ul.prototype.distanceToPoint=function(){const e=new s.Vector3;return function(t){return this.closestPointToPoint(t,e),t.distanceTo(e)}}(),ul.prototype.distanceToTriangle=function(){const e=new s.Vector3,t=new s.Vector3,r=["a","b","c"],n=new s.Line3,i=new s.Line3;return function(a,s=null,o=null){const l=s||o?n:null;if(this.intersectsTriangle(a,l))return(s||o)&&(s&&l.getCenter(s),o&&l.getCenter(o)),0;let c=1/0;for(let t=0;t<3;t++){let n;const i=r[t],l=a[i];this.closestPointToPoint(l,e),n=l.distanceToSquared(e),nnew s.Vector3)),this.satAxes=new Array(3).fill().map((()=>new s.Vector3)),this.satBounds=new Array(3).fill().map((()=>new al)),this.alignedSatBounds=new Array(3).fill().map((()=>new al)),this.needsUpdate=!1,e&&this.min.copy(e),t&&this.max.copy(t),r&&this.matrix.copy(r)}set(e,t,r){this.min.copy(e),this.max.copy(t),this.matrix.copy(r),this.needsUpdate=!0}copy(e){this.min.copy(e.min),this.max.copy(e.max),this.matrix.copy(e.matrix),this.needsUpdate=!0}}function dl(e,t,r,n){const i=e.a,a=e.b,s=e.c;let o=t,l=t+1,c=t+2;r&&(o=r.getX(t),l=r.getX(t+1),c=r.getX(t+2)),i.x=n.getX(o),i.y=n.getY(o),i.z=n.getZ(o),a.x=n.getX(l),a.y=n.getY(l),a.z=n.getZ(l),s.x=n.getX(c),s.y=n.getY(c),s.z=n.getZ(c)}function pl(e,t,r,n,i,a,s){const o=r.index,l=r.attributes.position;for(let c=e,u=t+e;cnew s.Line3)),r=new Array(12).fill().map((()=>new s.Line3)),n=new s.Vector3,i=new s.Vector3;return function(a,s=0,o=null,l=null){if(this.needsUpdate&&this.update(),this.intersectsBox(a))return(o||l)&&(a.getCenter(i),this.closestPointToPoint(i,n),a.closestPointToPoint(n,i),o&&o.copy(n),l&&l.copy(i)),0;const c=s*s,u=a.min,h=a.max,d=this.points;let p=1/0;for(let e=0;e<8;e++){const t=d[e];i.copy(t).clamp(u,h);const r=t.distanceToSquared(i);if(r=0;let c,u;l?(c=Al(e),u=yl(e,o)):(c=yl(e,o),u=Al(e));const h=Tl(c,a,n,bl)?_l(c,t,r,n):null;if(h){const e=h.point[s];if(l?e<=a[u+i]:e>=a[u+i+3])return h}const d=Tl(u,a,n,bl)?_l(u,t,r,n):null;return h&&d?h.distance<=d.distance?h:d:h||d||null}}const Sl=function(){let e,t;const r=[],n=new fl((()=>new s.Box3));return function(...a){e=n.getPrimitive(),t=n.getPrimitive(),r.push(e,t);const s=i(...a);n.releasePrimitive(e),n.releasePrimitive(t),r.pop(),r.pop();const o=r.length;return o>0&&(t=r[o-1],e=r[o-2]),s};function i(r,n,a,s,o=null,l=0,c=0){function u(e){let t=2*e,r=Rl,n=Pl;for(;!ml(t,r);)t=2*(e=Al(e));return gl(e,n)}function h(e){let t=2*e,r=Rl,n=Pl;for(;!ml(t,r);)t=2*(e=yl(e,n));return gl(e,n)+vl(t,r)}let d=2*r,p=Bl,f=Rl,m=Pl;if(ml(d,f)){const t=gl(r,m),n=vl(d,f);return Xo(r,p,e),s(t,n,!1,c,l+r,e)}{const d=Al(r),g=yl(r,m);let v,A,y,x,b=d,w=g;if(o&&(y=e,x=t,Xo(b,p,y),Xo(w,p,x),v=o(y),A=o(x),Ai.intersectsBox(e),intersectsTriangle:e=>{e.a.applyMatrix4(c),e.b.applyMatrix4(c),e.c.applyMatrix4(c),e.needsUpdate=!0;for(let r=3*v,n=3*(A+v);rnew ul));class Yl{static serialize(e,t={}){if(t.isBufferGeometry)return console.warn("MeshBVH.serialize: The arguments for the function have changed. See documentation for new signature."),Yl.serialize(arguments[0],{cloneBuffers:void 0===arguments[2]||arguments[2]});t={cloneBuffers:!0,...t};const r=e.geometry,n=e._roots,i=r.getIndex();let a;return a=t.cloneBuffers?{roots:n.map((e=>e.slice())),index:i.array.slice()}:{roots:n,index:i.array},a}static deserialize(e,t,r={}){if("boolean"==typeof r)return console.warn("MeshBVH.deserialize: The arguments for the function have changed. See documentation for new signature."),Yl.deserialize(arguments[0],arguments[1],{setIndex:void 0===arguments[2]||arguments[2]});r={setIndex:!0,...r};const{index:n,roots:i}=e,a=new Yl(t,{...r,[Dl]:!0});if(a._roots=i,r.setIndex){const r=t.getIndex();if(null===r){const r=new s.BufferAttribute(e.index,1,!1);t.setIndex(r)}else r.array!==n&&(r.array.set(n),r.needsUpdate=!0)}return a}constructor(e,t={}){if(!e.isBufferGeometry)throw new Error("MeshBVH: Only BufferGeometries are supported.");if(e.index&&e.index.isInterleavedBufferAttribute)throw new Error("MeshBVH: InterleavedBufferAttribute is not supported for the index attribute.");if((t=Object.assign({strategy:0,maxDepth:40,maxLeafTris:10,verbose:!0,useSharedArrayBuffer:!1,setBoundingBox:!0,onProgress:null,[Dl]:!1},t)).useSharedArrayBuffer&&"undefined"==typeof SharedArrayBuffer)throw new Error("MeshBVH: SharedArrayBuffer is not available.");this._roots=null,t[Dl]||(this._roots=function(e,t){const r=il(e,t);let n,i,a;const s=[],o=t.useSharedArrayBuffer?SharedArrayBuffer:ArrayBuffer;for(let u=0;uMath.pow(2,32))throw new Error("MeshBVH: Cannot store child pointer greater than 32 bits.");return i[r+6]=o/4,o=c(o,a),i[r+7]=s,o}}}(e,t),!e.boundingBox&&t.setBoundingBox&&(e.boundingBox=this.getBoundingBox(new s.Box3))),this.geometry=e}refit(e=null){e&&Array.isArray(e)&&(e=new Set(e));const t=this.geometry,r=t.index.array,n=t.attributes.position;let i,a,s,o,l=0;const c=this._roots;for(let h=0,d=c.length;hh&&(h=a),sd&&(d=s),op&&(p=o)}return(o[t+0]!==i||o[t+1]!==l||o[t+2]!==u||o[t+3]!==h||o[t+4]!==d||o[t+5]!==p)&&(o[t+0]=i,o[t+1]=l,o[t+2]=u,o[t+3]=h,o[t+4]=d,o[t+5]=p,!0)}{const r=t+8,n=a[t+6],s=r+i,c=n+i;let h=l,d=!1,p=!1;e?h||(d=e.has(s),p=e.has(c),h=!d&&!p):(d=!0,p=!0);const f=h||p;let m=!1;(h||d)&&(m=u(r,i,h));let g=!1;f&&(g=u(n,i,h));const v=m||g;if(v)for(let e=0;e<3;e++){const i=r+e,a=n+e,s=o[i],l=o[i+3],c=o[a],u=o[a+3];o[t+e]=su?l:u}return v}}}traverse(e,t=0){const r=this._roots[t],n=new Uint32Array(r),i=new Uint16Array(r);!function t(a,s=0){const o=2*a,l=i[o+15]===Qo;if(l){const t=n[a+6],c=i[o+14];e(s,l,new Float32Array(r,4*a,6),t,c)}else{const i=a+8,o=n[a+6],c=n[a+7];e(s,l,new Float32Array(r,4*a,6),c)||(t(i,s+1),t(o,s+1))}}(0)}raycast(e,t=s.FrontSide){const r=this._roots,n=this.geometry,i=[],a=t.isMaterial,o=Array.isArray(t),l=n.groups,c=a?t.side:t;for(let s=0,u=r.length;s{const a=3*r;return e(t,a,a+1,a+2,n,i)}}e={boundsTraverseOrder:r,intersectsBounds:e,intersectsTriangle:t,intersectsRange:null},console.warn("MeshBVH: Shapecast function signature has changed and now takes an object of callbacks as a second argument. See docs for new signature.")}const i=Xl.getPrimitive();let{boundsTraverseOrder:a,intersectsBounds:s,intersectsRange:o,intersectsTriangle:l}=e;if(o&&l){const e=o;o=(t,r,a,s,o)=>!!e(t,r,a,s,o)||pl(t,r,n,l,a,s,i)}else o||(o=l?(e,t,r,a)=>pl(e,t,n,l,r,a,i):(e,t,r)=>r);let c=!1,u=0;for(const h of this._roots){if(Fl(h),c=Sl(0,n,s,o,a,u),Ll(),c)break;u+=h.byteLength}return Xl.releasePrimitive(i),c}bvhcast(e,t,r){let{intersectsRanges:n,intersectsTriangles:i}=r;const a=this.geometry.index,s=this.geometry.attributes.position,o=e.geometry.index,l=e.geometry.attributes.position;kl.copy(t).invert();const c=Xl.getPrimitive(),u=Xl.getPrimitive();if(i){function d(e,r,n,h,d,p,f,m){for(let g=n,v=n+h;gNl.intersectsBox(e),intersectsRange:(t,r,i,a,s,o)=>(Ul.copy(o),Ul.applyMatrix4(kl),e.shapecast({intersectsBounds:e=>Ul.intersectsBox(e),intersectsRange:(e,i,o,l,c)=>n(t,r,e,i,a,s,l,c)}))});return Xl.releasePrimitive(c),Xl.releasePrimitive(u),h}intersectsBox(e,t){return Ol.set(e.min,e.max,t),Ol.needsUpdate=!0,this.shapecast({intersectsBounds:e=>Ol.intersectsBox(e),intersectsTriangle:e=>Ol.intersectsTriangle(e)})}intersectsSphere(e){return this.shapecast({intersectsBounds:t=>e.intersectsBox(t),intersectsTriangle:t=>t.intersectsSphere(e)})}closestPointToGeometry(e,t,r={},n={},i=0,a=1/0){e.boundingBox||e.computeBoundingBox(),Ol.set(e.boundingBox.min,e.boundingBox.max,t),Ol.needsUpdate=!0;const s=this.geometry,o=s.attributes.position,l=s.index,c=e.attributes.position,u=e.index,h=Xl.getPrimitive(),d=Xl.getPrimitive();let p=Vl,f=Hl,m=null,g=null;n&&(m=Ql,g=jl);let v=1/0,A=null,y=null;return kl.copy(t).invert(),Gl.matrix.copy(kl),this.shapecast({boundsTraverseOrder:e=>Ol.distanceToBox(e),intersectsBounds:(e,t,r)=>r{if(e.boundsTree)return e.boundsTree.shapecast({boundsTraverseOrder:e=>Gl.distanceToBox(e),intersectsBounds:(e,t,r)=>r{for(let s=3*e,x=3*(e+a);s(zl.copy(e).clamp(t.min,t.max),zl.distanceToSquared(e)),intersectsBounds:(e,t,r)=>r{t.closestPointToPoint(e,zl);const n=e.distanceToSquared(zl);return n{Xo(0,new Float32Array(t),Wl),e.union(Wl)})),e}}const ql=new s.Ray,Kl=new s.Matrix4,Jl=s.Mesh.prototype.raycast;function Zl(e,t){if(this.geometry.boundsTree){if(void 0===this.material)return;Kl.copy(this.matrixWorld).invert(),ql.copy(e.ray).applyMatrix4(Kl);const r=this.geometry.boundsTree;if(!0===e.firstHitOnly){const n=Vo(r.raycastFirst(ql,this.material),this,e);n&&t.push(n)}else{const n=r.raycast(ql,this.material);for(let r=0,i=n.length;r{if(e.current){e.current.raycast=Zl;const r=e.current.geometry;return r.computeBoundsTree=$l,r.disposeBoundsTree=ec,r.computeBoundsTree(t),()=>{r.boundsTree&&r.disposeBoundsTree()}}}),[e,t])}function rc(...e){const t=i.useRef([]);return t.current=e.map((e=>i.useContext(e))),i.useMemo((()=>({children:r})=>e.reduceRight(((e,r,n)=>i.createElement(r.Provider,{value:t.current[n],children:e})),r)),[])}function nc(e,t){const r=i.useRef(),[n]=i.useState((()=>t?t instanceof s.Object3D?{current:t}:t:r)),[a]=i.useState((()=>new s.AnimationMixer(void 0))),l=i.useRef({}),[c]=i.useState((()=>{const t={};return e.forEach((e=>Object.defineProperty(t,e.name,{enumerable:!0,get(){if(n.current)return l.current[e.name]||(l.current[e.name]=a.clipAction(e,n.current))}}))),{ref:n,clips:e,actions:t,names:e.map((e=>e.name)),mixer:a}}));return(0,o.useFrame)(((e,t)=>a.update(t))),i.useEffect((()=>{const e=n.current;return()=>{l.current={},Object.values(c.actions).forEach((t=>{e&&a.uncacheAction(t,e)}))}}),[e]),c}function ic(e){const t=i.useRef(null),r=i.useRef(!1),n=i.useRef(!1);return i.useEffect((()=>{const i=t.current;if(i){const t=(0,o.addEffect)((()=>(r.current=!1,!0))),a=i.onBeforeRender;i.onBeforeRender=()=>r.current=!0;const s=(0,o.addAfterEffect)((()=>(r.current!==n.current&&e(n.current=r.current),!0)));return()=>{i.onBeforeRender=a,t(),s()}}}),[]),t}function ac(e=new s.Vector3,t=new s.Vector3){const[r]=i.useState((()=>({position:new s.Vector3,size:new s.Vector3})));(0,o.applyProps)(r,{position:e,size:t});const n=i.useRef(null),a=i.useMemo((()=>({ref:n,onBeforeCompile:e=>function(e,t,r){e.defines.BOX_PROJECTED_ENV_MAP=!0,e.uniforms.envMapPosition={value:t},e.uniforms.envMapSize={value:r},e.vertexShader=`\n varying vec3 vWorldPosition;\n ${e.vertexShader.replace("#include ","\n#if defined( USE_ENVMAP ) || defined( DISTANCE ) || defined ( USE_SHADOWMAP )\n vec4 worldPosition = modelMatrix * vec4( transformed, 1.0 );\n #ifdef BOX_PROJECTED_ENV_MAP\n vWorldPosition = worldPosition.xyz;\n #endif\n#endif\n")}`,e.fragmentShader=`\n \n#ifdef BOX_PROJECTED_ENV_MAP\n uniform vec3 envMapSize;\n uniform vec3 envMapPosition;\n varying vec3 vWorldPosition;\n \n vec3 parallaxCorrectNormal( vec3 v, vec3 cubeSize, vec3 cubePos ) {\n vec3 nDir = normalize( v );\n vec3 rbmax = ( .5 * cubeSize + cubePos - vWorldPosition ) / nDir;\n vec3 rbmin = ( -.5 * cubeSize + cubePos - vWorldPosition ) / nDir;\n vec3 rbminmax;\n rbminmax.x = ( nDir.x > 0. ) ? rbmax.x : rbmin.x;\n rbminmax.y = ( nDir.y > 0. ) ? rbmax.y : rbmin.y;\n rbminmax.z = ( nDir.z > 0. ) ? rbmax.z : rbmin.z;\n float correction = min( min( rbminmax.x, rbminmax.y ), rbminmax.z );\n vec3 boxIntersection = vWorldPosition + nDir * correction; \n return boxIntersection - cubePos;\n }\n#endif\n\n ${e.fragmentShader.replace("#include ",s.ShaderChunk.envmap_physical_pars_fragment).replace("vec3 worldNormal = inverseTransformDirection( normal, viewMatrix );","vec3 worldNormal = inverseTransformDirection( normal, viewMatrix );\n \n#ifdef BOX_PROJECTED_ENV_MAP\n worldNormal = parallaxCorrectNormal( worldNormal, envMapSize, envMapPosition );\n#endif\n\n ").replace("reflectVec = inverseTransformDirection( reflectVec, viewMatrix );","reflectVec = inverseTransformDirection( reflectVec, viewMatrix );\n \n#ifdef BOX_PROJECTED_ENV_MAP\n reflectVec = parallaxCorrectNormal( reflectVec, envMapSize, envMapPosition );\n#endif\n\n ")}`}(e,r.position,r.size),customProgramCacheKey:()=>JSON.stringify(r.position.toArray())+JSON.stringify(r.size.toArray())})),[...r.position.toArray(),...r.size.toArray()]);return i.useLayoutEffect((()=>{n.current.needsUpdate=!0}),[r]),a}var sc=r(53615);const oc=i.forwardRef((({children:e,curve:t},r)=>{const[n]=i.useState((()=>new s.Scene)),[a,l]=i.useState(),c=i.useRef();return i.useEffect((()=>{c.current=new sc.Zw(n.children[0]),l(c.current.object3D)}),[e]),i.useEffect((()=>{var e;t&&(null==(e=c.current)||e.updateCurve(0,t))}),[t]),i.useImperativeHandle(r,(()=>({moveAlongCurve:e=>{var t;null==(t=c.current)||t.moveAlongCurve(e)}}))),i.createElement(i.Fragment,null,(0,o.createPortal)(e,n),a&&i.createElement("primitive",{object:a}))}));class lc extends s.MeshPhysicalMaterial{constructor(e={}){super(e),this.setValues(e),this._time={value:0},this._distort={value:.4},this._radius={value:1}}onBeforeCompile(e){e.uniforms.time=this._time,e.uniforms.radius=this._radius,e.uniforms.distort=this._distort,e.vertexShader=`\n uniform float time;\n uniform float radius;\n uniform float distort;\n #define GLSLIFY 1\nvec3 mod289(vec3 x){return x-floor(x*(1.0/289.0))*289.0;}vec4 mod289(vec4 x){return x-floor(x*(1.0/289.0))*289.0;}vec4 permute(vec4 x){return mod289(((x*34.0)+1.0)*x);}vec4 taylorInvSqrt(vec4 r){return 1.79284291400159-0.85373472095314*r;}float snoise(vec3 v){const vec2 C=vec2(1.0/6.0,1.0/3.0);const vec4 D=vec4(0.0,0.5,1.0,2.0);vec3 i=floor(v+dot(v,C.yyy));vec3 x0=v-i+dot(i,C.xxx);vec3 g=step(x0.yzx,x0.xyz);vec3 l=1.0-g;vec3 i1=min(g.xyz,l.zxy);vec3 i2=max(g.xyz,l.zxy);vec3 x1=x0-i1+C.xxx;vec3 x2=x0-i2+C.yyy;vec3 x3=x0-D.yyy;i=mod289(i);vec4 p=permute(permute(permute(i.z+vec4(0.0,i1.z,i2.z,1.0))+i.y+vec4(0.0,i1.y,i2.y,1.0))+i.x+vec4(0.0,i1.x,i2.x,1.0));float n_=0.142857142857;vec3 ns=n_*D.wyz-D.xzx;vec4 j=p-49.0*floor(p*ns.z*ns.z);vec4 x_=floor(j*ns.z);vec4 y_=floor(j-7.0*x_);vec4 x=x_*ns.x+ns.yyyy;vec4 y=y_*ns.x+ns.yyyy;vec4 h=1.0-abs(x)-abs(y);vec4 b0=vec4(x.xy,y.xy);vec4 b1=vec4(x.zw,y.zw);vec4 s0=floor(b0)*2.0+1.0;vec4 s1=floor(b1)*2.0+1.0;vec4 sh=-step(h,vec4(0.0));vec4 a0=b0.xzyw+s0.xzyw*sh.xxyy;vec4 a1=b1.xzyw+s1.xzyw*sh.zzww;vec3 p0=vec3(a0.xy,h.x);vec3 p1=vec3(a0.zw,h.y);vec3 p2=vec3(a1.xy,h.z);vec3 p3=vec3(a1.zw,h.w);vec4 norm=taylorInvSqrt(vec4(dot(p0,p0),dot(p1,p1),dot(p2,p2),dot(p3,p3)));p0*=norm.x;p1*=norm.y;p2*=norm.z;p3*=norm.w;vec4 m=max(0.6-vec4(dot(x0,x0),dot(x1,x1),dot(x2,x2),dot(x3,x3)),0.0);m=m*m;return 42.0*dot(m*m,vec4(dot(p0,x0),dot(p1,x1),dot(p2,x2),dot(p3,x3)));}\n ${e.vertexShader}\n `,e.vertexShader=e.vertexShader.replace("#include ","\n float updateTime = time / 50.0;\n float noise = snoise(vec3(position / 2.0 + updateTime * 5.0));\n vec3 transformed = vec3(position * (noise * pow(distort, 2.0) + radius));\n ")}get time(){return this._time.value}set time(e){this._time.value=e}get distort(){return this._distort.value}set distort(e){this._distort.value=e}get radius(){return this._radius.value}set radius(e){this._radius.value=e}}const cc=i.forwardRef((({speed:e=1,...t},r)=>{const[a]=i.useState((()=>new lc));return(0,o.useFrame)((t=>a&&(a.time=t.clock.getElapsedTime()*e))),i.createElement("primitive",n({dispose:void 0,object:a,ref:r,attach:"material"},t))}));class uc extends s.MeshStandardMaterial{constructor(e={}){super(e),this.setValues(e),this._time={value:0},this._factor={value:1}}onBeforeCompile(e){e.uniforms.time=this._time,e.uniforms.factor=this._factor,e.vertexShader=`\n uniform float time;\n uniform float factor;\n ${e.vertexShader}\n `,e.vertexShader=e.vertexShader.replace("#include ","float theta = sin( time + position.y ) / 2.0 * factor;\n float c = cos( theta );\n float s = sin( theta );\n mat3 m = mat3( c, 0, s, 0, 1, 0, -s, 0, c );\n vec3 transformed = vec3( position ) * m;\n vNormal = vNormal * m;")}get time(){return this._time.value}set time(e){this._time.value=e}get factor(){return this._factor.value}set factor(e){this._factor.value=e}}const hc=i.forwardRef((({speed:e=1,...t},r)=>{const[a]=i.useState((()=>new uc));return(0,o.useFrame)((t=>a&&(a.time=t.clock.getElapsedTime()*e))),i.createElement("primitive",n({dispose:void 0,object:a,ref:r,attach:"material"},t))}));class dc extends s.ShaderMaterial{constructor(e=new s.Vector2){super({uniforms:{inputBuffer:new s.Uniform(null),depthBuffer:new s.Uniform(null),resolution:new s.Uniform(new s.Vector2),texelSize:new s.Uniform(new s.Vector2),halfTexelSize:new s.Uniform(new s.Vector2),kernel:new s.Uniform(0),scale:new s.Uniform(1),cameraNear:new s.Uniform(0),cameraFar:new s.Uniform(1),minDepthThreshold:new s.Uniform(0),maxDepthThreshold:new s.Uniform(1),depthScale:new s.Uniform(0),depthToBlurRatioBias:new s.Uniform(.25)},fragmentShader:"#include \n #include \n uniform sampler2D inputBuffer;\n uniform sampler2D depthBuffer;\n uniform float cameraNear;\n uniform float cameraFar;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n float depthFactor = 0.0;\n \n #ifdef USE_DEPTH\n vec4 depth = texture2D(depthBuffer, vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0, min(1.0, depthFactor + 0.25));\n #endif\n \n vec4 sum = texture2D(inputBuffer, mix(vUv0, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv1, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv2, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv3, vUv, depthFactor));\n gl_FragColor = sum * 0.25 ;\n\n #include \n #include \n #include \n }",vertexShader:"uniform vec2 texelSize;\n uniform vec2 halfTexelSize;\n uniform float kernel;\n uniform float scale;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n vec2 uv = position.xy * 0.5 + 0.5;\n vUv = uv;\n\n vec2 dUv = (texelSize * vec2(kernel) + halfTexelSize) * scale;\n vUv0 = vec2(uv.x - dUv.x, uv.y + dUv.y);\n vUv1 = vec2(uv.x + dUv.x, uv.y + dUv.y);\n vUv2 = vec2(uv.x + dUv.x, uv.y - dUv.y);\n vUv3 = vec2(uv.x - dUv.x, uv.y - dUv.y);\n\n gl_Position = vec4(position.xy, 1.0, 1.0);\n }",blending:s.NoBlending,depthWrite:!1,depthTest:!1}),this.toneMapped=!1,this.setTexelSize(e.x,e.y),this.kernel=new Float32Array([0,1,2,2,3])}setTexelSize(e,t){this.uniforms.texelSize.value.set(e,t),this.uniforms.halfTexelSize.value.set(e,t).multiplyScalar(.5)}setResolution(e){this.uniforms.resolution.value.copy(e)}}class pc{constructor({gl:e,resolution:t,width:r=500,height:n=500,minDepthThreshold:i=0,maxDepthThreshold:a=1,depthScale:o=0,depthToBlurRatioBias:l=.25}){this.renderToScreen=!1,this.renderTargetA=new s.WebGLRenderTarget(t,t,{minFilter:s.LinearFilter,magFilter:s.LinearFilter,stencilBuffer:!1,depthBuffer:!1,encoding:e.outputEncoding}),this.renderTargetB=this.renderTargetA.clone(),this.convolutionMaterial=new dc,this.convolutionMaterial.setTexelSize(1/r,1/n),this.convolutionMaterial.setResolution(new s.Vector2(r,n)),this.scene=new s.Scene,this.camera=new s.Camera,this.convolutionMaterial.uniforms.minDepthThreshold.value=i,this.convolutionMaterial.uniforms.maxDepthThreshold.value=a,this.convolutionMaterial.uniforms.depthScale.value=o,this.convolutionMaterial.uniforms.depthToBlurRatioBias.value=l,this.convolutionMaterial.defines.USE_DEPTH=o>0;const c=new Float32Array([-1,-1,0,3,-1,0,-1,3,0]),u=new Float32Array([0,0,2,0,0,2]),h=new s.BufferGeometry;h.setAttribute("position",new s.BufferAttribute(c,3)),h.setAttribute("uv",new s.BufferAttribute(u,2)),this.screen=new s.Mesh(h,this.convolutionMaterial),this.screen.frustumCulled=!1,this.scene.add(this.screen)}render(e,t,r){const n=this.scene,i=this.camera,a=this.renderTargetA,s=this.renderTargetB;let o=this.convolutionMaterial,l=o.uniforms;l.depthBuffer.value=t.depthTexture;const c=o.kernel;let u,h,d,p=t;for(h=0,d=c.length-1;h","#include \n my_vUv = textureMatrix * vec4( position, 1.0 );\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );"),e.fragmentShader=`\n uniform sampler2D tDiffuse;\n uniform sampler2D tDiffuseBlur;\n uniform sampler2D tDepth;\n uniform sampler2D distortionMap;\n uniform float distortion;\n uniform float cameraNear;\n\t\t\t uniform float cameraFar;\n uniform bool hasBlur;\n uniform float mixBlur;\n uniform float mirror;\n uniform float mixStrength;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float mixContrast;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec4 my_vUv; \n ${e.fragmentShader}`,e.fragmentShader=e.fragmentShader.replace("#include ","#include \n \n float distortionFactor = 0.0;\n #ifdef USE_DISTORTION\n distortionFactor = texture2D(distortionMap, vUv).r * distortion;\n #endif\n\n vec4 new_vUv = my_vUv;\n new_vUv.x += distortionFactor;\n new_vUv.y += distortionFactor;\n\n vec4 base = texture2DProj(tDiffuse, new_vUv);\n vec4 blur = texture2DProj(tDiffuseBlur, new_vUv);\n \n vec4 merge = base;\n \n #ifdef USE_NORMALMAP\n vec2 normal_uv = vec2(0.0);\n vec4 normalColor = texture2D(normalMap, vUv * normalScale);\n vec3 my_normal = normalize( vec3( normalColor.r * 2.0 - 1.0, normalColor.b, normalColor.g * 2.0 - 1.0 ) );\n vec3 coord = new_vUv.xyz / new_vUv.w;\n normal_uv = coord.xy + coord.z * my_normal.xz * 0.05;\n vec4 base_normal = texture2D(tDiffuse, normal_uv);\n vec4 blur_normal = texture2D(tDiffuseBlur, normal_uv);\n merge = base_normal;\n blur = blur_normal;\n #endif\n\n float depthFactor = 0.0001;\n float blurFactor = 0.0;\n\n #ifdef USE_DEPTH\n vec4 depth = texture2DProj(tDepth, new_vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0001, min(1.0, depthFactor));\n\n #ifdef USE_BLUR\n blur = blur * min(1.0, depthFactor + depthToBlurRatioBias);\n merge = merge * min(1.0, depthFactor + 0.5);\n #else\n merge = merge * depthFactor;\n #endif\n \n #endif\n\n float reflectorRoughnessFactor = roughness;\n #ifdef USE_ROUGHNESSMAP\n vec4 reflectorTexelRoughness = texture2D( roughnessMap, vUv );\n reflectorRoughnessFactor *= reflectorTexelRoughness.g;\n #endif\n \n #ifdef USE_BLUR\n blurFactor = min(1.0, mixBlur * reflectorRoughnessFactor);\n merge = mix(merge, blur, blurFactor);\n #endif\n\n vec4 newMerge = vec4(0.0, 0.0, 0.0, 1.0);\n newMerge.r = (merge.r - 0.5) * mixContrast + 0.5;\n newMerge.g = (merge.g - 0.5) * mixContrast + 0.5;\n newMerge.b = (merge.b - 0.5) * mixContrast + 0.5;\n\n diffuseColor.rgb = diffuseColor.rgb * ((1.0 - min(1.0, mirror)) + newMerge.rgb * mixStrength);\n ")}get tDiffuse(){return this._tDiffuse.value}set tDiffuse(e){this._tDiffuse.value=e}get tDepth(){return this._tDepth.value}set tDepth(e){this._tDepth.value=e}get distortionMap(){return this._distortionMap.value}set distortionMap(e){this._distortionMap.value=e}get tDiffuseBlur(){return this._tDiffuseBlur.value}set tDiffuseBlur(e){this._tDiffuseBlur.value=e}get textureMatrix(){return this._textureMatrix.value}set textureMatrix(e){this._textureMatrix.value=e}get hasBlur(){return this._hasBlur.value}set hasBlur(e){this._hasBlur.value=e}get mirror(){return this._mirror.value}set mirror(e){this._mirror.value=e}get mixBlur(){return this._mixBlur.value}set mixBlur(e){this._mixBlur.value=e}get mixStrength(){return this._blurStrength.value}set mixStrength(e){this._blurStrength.value=e}get minDepthThreshold(){return this._minDepthThreshold.value}set minDepthThreshold(e){this._minDepthThreshold.value=e}get maxDepthThreshold(){return this._maxDepthThreshold.value}set maxDepthThreshold(e){this._maxDepthThreshold.value=e}get depthScale(){return this._depthScale.value}set depthScale(e){this._depthScale.value=e}get depthToBlurRatioBias(){return this._depthToBlurRatioBias.value}set depthToBlurRatioBias(e){this._depthToBlurRatioBias.value=e}get distortion(){return this._distortion.value}set distortion(e){this._distortion.value=e}get mixContrast(){return this._mixContrast.value}set mixContrast(e){this._mixContrast.value=e}}(0,o.extend)({MeshReflectorMaterialImpl:fc});const mc=i.forwardRef((({mixBlur:e=0,mixStrength:t=1,resolution:r=256,blur:a=[0,0],minDepthThreshold:l=.9,maxDepthThreshold:c=1,depthScale:u=0,depthToBlurRatioBias:h=.25,mirror:d=0,distortion:p=1,mixContrast:f=1,distortionMap:m,reflectorOffset:g=0,...v},A)=>{const y=(0,o.useThree)((({gl:e})=>e)),x=(0,o.useThree)((({camera:e})=>e)),b=(0,o.useThree)((({scene:e})=>e)),w=(a=Array.isArray(a)?a:[a,a])[0]+a[1]>0,E=i.useRef(null),[_]=i.useState((()=>new s.Plane)),[S]=i.useState((()=>new s.Vector3)),[T]=i.useState((()=>new s.Vector3)),[C]=i.useState((()=>new s.Vector3)),[I]=i.useState((()=>new s.Matrix4)),[B]=i.useState((()=>new s.Vector3(0,0,-1))),[R]=i.useState((()=>new s.Vector4)),[P]=i.useState((()=>new s.Vector3)),[F]=i.useState((()=>new s.Vector3)),[L]=i.useState((()=>new s.Vector4)),[D]=i.useState((()=>new s.Matrix4)),[U]=i.useState((()=>new s.PerspectiveCamera)),N=i.useCallback((()=>{var e;const t=E.current.parent||(null==(e=E.current)?void 0:e.__r3f.parent);if(!t)return;if(T.setFromMatrixPosition(t.matrixWorld),C.setFromMatrixPosition(x.matrixWorld),I.extractRotation(t.matrixWorld),S.set(0,0,1),S.applyMatrix4(I),T.addScaledVector(S,g),P.subVectors(T,C),P.dot(S)>0)return;P.reflect(S).negate(),P.add(T),I.extractRotation(x.matrixWorld),B.set(0,0,-1),B.applyMatrix4(I),B.add(C),F.subVectors(T,B),F.reflect(S).negate(),F.add(T),U.position.copy(P),U.up.set(0,1,0),U.up.applyMatrix4(I),U.up.reflect(S),U.lookAt(F),U.far=x.far,U.updateMatrixWorld(),U.projectionMatrix.copy(x.projectionMatrix),D.set(.5,0,0,.5,0,.5,0,.5,0,0,.5,.5,0,0,0,1),D.multiply(U.projectionMatrix),D.multiply(U.matrixWorldInverse),D.multiply(t.matrixWorld),_.setFromNormalAndCoplanarPoint(S,T),_.applyMatrix4(U.matrixWorldInverse),R.set(_.normal.x,_.normal.y,_.normal.z,_.constant);const r=U.projectionMatrix;L.x=(Math.sign(R.x)+r.elements[8])/r.elements[0],L.y=(Math.sign(R.y)+r.elements[9])/r.elements[5],L.z=-1,L.w=(1+r.elements[10])/r.elements[14],R.multiplyScalar(2/R.dot(L)),r.elements[2]=R.x,r.elements[6]=R.y,r.elements[10]=R.z+1,r.elements[14]=R.w}),[x,g]),[k,O,G,z]=i.useMemo((()=>{const n={minFilter:s.LinearFilter,magFilter:s.LinearFilter,encoding:y.outputEncoding,type:s.HalfFloatType},i=new s.WebGLRenderTarget(r,r,n);i.depthBuffer=!0,i.depthTexture=new s.DepthTexture(r,r),i.depthTexture.format=s.DepthFormat,i.depthTexture.type=s.UnsignedShortType;const o=new s.WebGLRenderTarget(r,r,n);return[i,o,new pc({gl:y,resolution:r,width:a[0],height:a[1],minDepthThreshold:l,maxDepthThreshold:c,depthScale:u,depthToBlurRatioBias:h}),{mirror:d,textureMatrix:D,mixBlur:e,tDiffuse:i.texture,tDepth:i.depthTexture,tDiffuseBlur:o.texture,hasBlur:w,mixStrength:t,minDepthThreshold:l,maxDepthThreshold:c,depthScale:u,depthToBlurRatioBias:h,transparent:!0,distortion:p,distortionMap:m,mixContrast:f,"defines-USE_BLUR":w?"":void 0,"defines-USE_DEPTH":u>0?"":void 0,"defines-USE_DISTORTION":m?"":void 0}]}),[y,a,D,r,d,w,e,t,l,c,u,h,p,m,f]);return(0,o.useFrame)((()=>{var e;const t=E.current.parent||(null==(e=E.current)?void 0:e.__r3f.parent);if(!t)return;t.visible=!1;const r=y.xr.enabled,n=y.shadowMap.autoUpdate;N(),y.xr.enabled=!1,y.shadowMap.autoUpdate=!1,y.setRenderTarget(k),y.state.buffers.depth.setMask(!0),y.autoClear||y.clear(),y.render(b,U),w&&G.render(y,k,O),y.xr.enabled=r,y.shadowMap.autoUpdate=n,t.visible=!0,y.setRenderTarget(null)})),i.createElement("meshReflectorMaterialImpl",n({attach:"material",key:"key"+z["defines-USE_BLUR"]+z["defines-USE_DEPTH"]+z["defines-USE_DISTORTION"],ref:M([E,A])},z,v))}));class gc extends s.PointsMaterial{constructor(e){super(e),this.onBeforeCompile=e=>{e.fragmentShader=e.fragmentShader.replace("#include ","\n #include \n vec2 cxy = 2.0 * gl_PointCoord - 1.0;\n float r = dot(cxy, cxy);\n float delta = fwidth(r); \n float mask = 1.0 - smoothstep(1.0 - delta, 1.0 + delta, r);\n gl_FragColor = vec4(gl_FragColor.rgb, mask * gl_FragColor.a );\n ")}}}const vc=i.forwardRef(((e,t)=>{const[r]=i.useState((()=>new gc(null)));return i.createElement("primitive",n({},e,{object:r,ref:t,attach:"material"}))}));let Ac=!1;const yc=e=>{if(!Ac){Ac=!0;let t=s.ShaderChunk.shadowmap_pars_fragment;t=t.replace("#ifdef USE_SHADOWMAP","#ifdef USE_SHADOWMAP\n"+(({frustum:e=3.75,size:t=.005,near:r=9.5,samples:n=17,rings:i=11}={})=>`#define LIGHT_WORLD_SIZE ${t}\n#define LIGHT_FRUSTUM_WIDTH ${e}\n#define LIGHT_SIZE_UV (LIGHT_WORLD_SIZE / LIGHT_FRUSTUM_WIDTH)\n#define NEAR_PLANE ${r}\n\n#define NUM_SAMPLES ${n}\n#define NUM_RINGS ${i}\n#define BLOCKER_SEARCH_NUM_SAMPLES NUM_SAMPLES\n#define PCF_NUM_SAMPLES NUM_SAMPLES\n\nvec2 poissonDisk[NUM_SAMPLES];\n\nvoid initPoissonSamples(const in vec2 randomSeed) {\n float ANGLE_STEP = PI2 * float(NUM_RINGS) / float(NUM_SAMPLES);\n float INV_NUM_SAMPLES = 1.0 / float(NUM_SAMPLES);\n float angle = rand(randomSeed) * PI2;\n float radius = INV_NUM_SAMPLES;\n float radiusStep = radius;\n for (int i = 0; i < NUM_SAMPLES; i++) {\n poissonDisk[i] = vec2(cos(angle), sin(angle)) * pow(radius, 0.75);\n radius += radiusStep;\n angle += ANGLE_STEP;\n }\n}\n\nfloat penumbraSize(const in float zReceiver, const in float zBlocker) { // Parallel plane estimation\n return (zReceiver - zBlocker) / zBlocker;\n}\n\nfloat findBlocker(sampler2D shadowMap, const in vec2 uv, const in float zReceiver) {\n float searchRadius = LIGHT_SIZE_UV * (zReceiver - NEAR_PLANE) / zReceiver;\n float blockerDepthSum = 0.0;\n int numBlockers = 0;\n for (int i = 0; i < BLOCKER_SEARCH_NUM_SAMPLES; i++) {\n float shadowMapDepth = unpackRGBAToDepth(texture2D(shadowMap, uv + poissonDisk[i] * searchRadius));\n if (shadowMapDepth < zReceiver) {\n blockerDepthSum += shadowMapDepth;\n numBlockers++;\n }\n }\n if (numBlockers == 0) return -1.0;\n return blockerDepthSum / float(numBlockers);\n}\n\nfloat PCF_Filter(sampler2D shadowMap, vec2 uv, float zReceiver, float filterRadius) {\n float sum = 0.0;\n for (int i = 0; i < PCF_NUM_SAMPLES; i++) {\n float depth = unpackRGBAToDepth(texture2D(shadowMap, uv + poissonDisk[ i ] * filterRadius));\n if (zReceiver <= depth) sum += 1.0;\n }\n for (int i = 0; i < PCF_NUM_SAMPLES; i++) {\n float depth = unpackRGBAToDepth(texture2D(shadowMap, uv + -poissonDisk[ i ].yx * filterRadius));\n if (zReceiver <= depth) sum += 1.0;\n }\n return sum / (2.0 * float(PCF_NUM_SAMPLES));\n}\n\nfloat PCSS(sampler2D shadowMap, vec4 coords) {\n vec2 uv = coords.xy;\n float zReceiver = coords.z; // Assumed to be eye-space z in this code\n initPoissonSamples(uv);\n float avgBlockerDepth = findBlocker(shadowMap, uv, zReceiver);\n if (avgBlockerDepth == -1.0) return 1.0;\n float penumbraRatio = penumbraSize(zReceiver, avgBlockerDepth);\n float filterRadius = penumbraRatio * LIGHT_SIZE_UV * NEAR_PLANE / zReceiver;\n return PCF_Filter(shadowMap, uv, zReceiver, filterRadius);\n}`)({...e})),t=t.replace("#if defined( SHADOWMAP_TYPE_PCF )","\nreturn PCSS(shadowMap, shadowCoord);\n#if defined( SHADOWMAP_TYPE_PCF )"),s.ShaderChunk.shadowmap_pars_fragment=t}};function xc(e){const t=e+"BufferGeometry";return i.forwardRef((({args:e,children:r,...a},s)=>i.createElement("mesh",n({ref:s},a),i.createElement(t,{attach:"geometry",args:e}),r)))}const bc=xc("box"),wc=xc("circle"),Ec=xc("cone"),_c=xc("cylinder"),Sc=xc("sphere"),Mc=xc("plane"),Tc=xc("tube"),Cc=xc("torus"),Ic=xc("torusKnot"),Bc=xc("tetrahedron"),Rc=xc("ring"),Pc=xc("polyhedron"),Fc=xc("icosahedron"),Lc=xc("octahedron"),Dc=xc("dodecahedron"),Uc=xc("extrude"),Nc=xc("lathe"),kc=1e-5;const Oc=i.forwardRef((function({args:[e=1,t=1,r=1]=[],radius:a=.05,steps:o=1,smoothness:l=4,children:c,...u},h){const d=i.useMemo((()=>function(e,t,r){const n=new s.Shape,i=r-kc;return n.absarc(kc,kc,kc,-Math.PI/2,-Math.PI,!0),n.absarc(kc,t-2*i,kc,Math.PI,Math.PI/2,!0),n.absarc(e-2*i,t-2*i,kc,Math.PI/2,0,!0),n.absarc(e-2*i,kc,kc,0,-Math.PI/2,!0),n}(e,t,a)),[e,t,a]),p=i.useMemo((()=>({depth:r-2*a,bevelEnabled:!0,bevelSegments:2*l,steps:o,bevelSize:a-kc,bevelThickness:a,curveSegments:l})),[r,a,l]),f=i.useRef();return i.useLayoutEffect((()=>{f.current&&f.current.center()}),[d,p]),i.createElement("mesh",n({ref:h},u),i.createElement("extrudeBufferGeometry",{attach:"geometry",ref:f,args:[d,p]}),c)}));function Gc(){const e=new s.BufferGeometry,t=new Float32Array([-1,-1,3,-1,-1,3]);return e.setAttribute("position",new s.BufferAttribute(t,2)),e}const zc=i.forwardRef((function({children:e,...t},r){const a=i.useMemo(Gc,[]);return i.createElement("mesh",n({ref:r,geometry:a,frustumCulled:!1},t),e)})),Vc=i.forwardRef((function({children:e,alignTop:t,...r},a){const o=i.useRef(null),l=i.useRef(null);return i.useLayoutEffect((()=>{o.current.position.set(0,0,0),o.current.updateWorldMatrix(!0,!0);const e=(new s.Box3).setFromObject(l.current),r=new s.Vector3,n=new s.Sphere,i=e.max.y-e.min.y;e.getCenter(r),e.getBoundingSphere(n),o.current.position.set(-r.x,-r.y+(t?i/2:0),-r.z)}),[e]),i.createElement("group",n({ref:a},r),i.createElement("group",{ref:o},i.createElement("group",{ref:l},e)))})),Hc=e=>e&&e.isOrthographicCamera,Qc=i.createContext(null);function jc({children:e,damping:t=6,fit:r,clip:n,observe:a,margin:l=1.2,eps:c=.01,onFit:u}){const h=i.useRef(null),{camera:d,invalidate:p,size:f,controls:m}=(0,o.useThree)(),g=i.useRef(u);function v(e,t){return Math.abs(e.x-t.x)({animating:!1,focus:new s.Vector3,camera:new s.Vector3,zoom:1}))),[x]=i.useState((()=>({focus:new s.Vector3,camera:new s.Vector3,zoom:1}))),[b]=i.useState((()=>new s.Box3)),w=i.useMemo((()=>{function e(){const e=b.getSize(new s.Vector3),t=b.getCenter(new s.Vector3),r=Math.max(e.x,e.y,e.z),n=Hc(d)?4*r:r/(2*Math.atan(Math.PI*d.fov/360)),i=Hc(d)?4*r:n/d.aspect,a=l*Math.max(n,i);return{box:b,size:e,center:t,distance:a}}return{getSize:e,refresh(t){if((r=t)&&r.isBox3)b.copy(t);else{const e=t||h.current;e.updateWorldMatrix(!0,!0),b.setFromObject(e)}var r;if(b.isEmpty()){const e=d.position.length()||10;b.setFromCenterAndSize(new s.Vector3,new s.Vector3(e,e,e))}if("OrthographicTrackballControls"===(null==m?void 0:m.constructor.name)){const{distance:t}=e(),r=d.position.clone().sub(m.target).normalize().multiplyScalar(t),n=m.target.clone().add(r);d.position.copy(n)}return this},clip(){const{distance:t}=e();return m&&(m.maxDistance=10*t),d.near=t/100,d.far=100*t,d.updateProjectionMatrix(),m&&m.update(),p(),this},fit(){y.camera.copy(d.position),m&&y.focus.copy(m.target);const{center:r,distance:n}=e(),i=r.clone().sub(d.position).normalize().multiplyScalar(n);if(x.camera.copy(r).sub(i),x.focus.copy(r),Hc(d)){y.zoom=d.zoom;let e=0,n=0;const i=[new s.Vector3(b.min.x,b.min.y,b.min.z),new s.Vector3(b.min.x,b.max.y,b.min.z),new s.Vector3(b.min.x,b.min.y,b.max.z),new s.Vector3(b.min.x,b.max.y,b.max.z),new s.Vector3(b.max.x,b.max.y,b.max.z),new s.Vector3(b.max.x,b.max.y,b.min.z),new s.Vector3(b.max.x,b.min.y,b.max.z),new s.Vector3(b.max.x,b.min.y,b.min.z)];r.applyMatrix4(d.matrixWorldInverse);for(const t of i)t.applyMatrix4(d.matrixWorldInverse),e=Math.max(e,Math.abs(t.y-r.y)),n=Math.max(n,Math.abs(t.x-r.x));e*=2,n*=2;const a=(d.top-d.bottom)/e,o=(d.right-d.left)/n;x.zoom=Math.min(a,o)/l,t||(d.zoom=x.zoom,d.updateProjectionMatrix())}return t?y.animating=!0:(d.position.copy(x.camera),d.lookAt(x.focus),m&&(m.target.copy(x.focus),m.update())),g.current&&g.current(this.getSize()),p(),this}}}),[b,d,m,l,t,p]);i.useLayoutEffect((()=>{if(m){const e=()=>y.animating=!1;return m.addEventListener("start",e),()=>m.removeEventListener("start",e)}}),[m]);const E=i.useRef(0);return i.useLayoutEffect((()=>{(a||0==E.current++)&&(w.refresh(),r&&w.fit(),n&&w.clip())}),[f,n,r,a]),(0,o.useFrame)(((e,r)=>{if(y.animating){if(A(y.focus,x.focus,t,r),A(y.camera,x.camera,t,r),y.zoom=s.MathUtils.damp(y.zoom,x.zoom,t,r),d.position.copy(y.camera),Hc(d)&&(d.zoom=y.zoom,d.updateProjectionMatrix()),m?(m.target.copy(y.focus),m.update()):d.lookAt(y.focus),p(),Hc(d)&&!(Math.abs(y.zoom-x.zoom){const p=(0,o.useThree)((e=>e.camera)),f=(0,o.useThree)((e=>e.controls)),m=i.useRef(e),g=i.useRef(p.rotation.clone()),[v]=i.useState((()=>new Xc.L)),[A]=i.useState((()=>new Xc.L)),[y]=i.useState((()=>new Xc.L)),x=()=>{(m.current<0||m.current>1)&&(m.current=m.current<0?0:1)};return i.useImperativeHandle(d,(()=>({getIntensity:()=>m.current,setIntensity:e=>{m.current=e,x()}})),[]),i.useEffect((()=>{const e=f||(null==h?void 0:h.current),t=()=>{g.current=p.rotation.clone()};return null==e||e.addEventListener("change",t),()=>{null==e||e.removeEventListener("change",t)}}),[h,f]),(0,o.useFrame)((({clock:e},i)=>{const o=Math.pow(m.current,2),h=n*o*v.noise(e.elapsedTime*l,1),d=a*o*A.noise(e.elapsedTime*c,1),f=s*o*y.noise(e.elapsedTime*u,1);p.rotation.set(g.current.x+d,g.current.y+h,g.current.z+f),t&&m.current>0&&(m.current-=r*i,x())})),null})),qc=i.forwardRef((({children:e,speed:t=1,rotationIntensity:r=1,floatIntensity:n=1,...a},s)=>{const l=i.useRef(null),c=i.useRef(1e4*Math.random());return(0,o.useFrame)((e=>{const i=c.current+e.clock.getElapsedTime();l.current.rotation.x=Math.cos(i/4*t)/8*r,l.current.rotation.y=Math.sin(i/4*t)/8*r,l.current.rotation.z=Math.sin(i/4*t)/20*r,l.current.position.y=Math.sin(i/4*t)/10*n})),i.createElement("group",a,i.createElement("group",{ref:M([l,s])},e))}));var Kc=r(8197);const Jc={sunset:"venice/venice_sunset_1k.hdr",dawn:"kiara/kiara_1_dawn_1k.hdr",night:"dikhololo/dikhololo_night_1k.hdr",warehouse:"empty-wharehouse/empty_warehouse_01_1k.hdr",forest:"forrest-slope/forest_slope_1k.hdr",apartment:"lebombo/lebombo_1k.hdr",studio:"studio-small-3/studio_small_03_1k.hdr",city:"potsdamer-platz/potsdamer_platz_1k.hdr",park:"rooitou/rooitou_park_1k.hdr",lobby:"st-fagans/st_fagans_interior_1k.hdr"},Zc=e=>{return(t=e).current&&t.current.isScene?e.current:e;var t};function $c(e){return e.map?i.createElement(eu,e):e.children?i.createElement(tu,e):i.createElement(ru,e)}function eu({scene:e,background:t=!1,map:r}){const n=(0,o.useThree)((e=>e.scene));return i.useLayoutEffect((()=>{if(r){const i=Zc(e||n),a=i.background,s=i.environment;return"only"!==t&&(i.environment=r),t&&(i.background=r),()=>{"only"!==t&&(i.environment=s),t&&(i.background=a)}}}),[e,r]),null}function tu({children:e,near:t=1,far:r=1e3,resolution:n=256,frames:a=1,background:l=!1,scene:c,files:u,path:h,preset:d,extensions:p}){const f=(0,o.useThree)((e=>e.gl)),m=(0,o.useThree)((e=>e.scene)),g=i.useRef(null),[v]=i.useState((()=>new s.Scene)),A=i.useMemo((()=>{const e=new s.WebGLCubeRenderTarget(n);return e.texture.type=s.HalfFloatType,e}),[n]);i.useLayoutEffect((()=>{1===a&&g.current.update(f,v);const e=Zc(c||m),t=e.background,r=e.environment;return"only"!==l&&(e.environment=A.texture),l&&(e.background=A.texture),()=>{"only"!==l&&(e.environment=r),l&&(e.background=t)}}),[e,c]);let y=1;return(0,o.useFrame)((()=>{(a===1/0||ye.scene)),u=Array.isArray(t),h=u?s.CubeTextureLoader:Kc.x,d=(0,o.useLoader)(h,u?[t]:t,(e=>{e.setPath(r),null==e.setDataType||e.setDataType(s.FloatType),l&&l(e)})),p=u?d[0]:d;return p.mapping=u?s.CubeReflectionMapping:s.EquirectangularReflectionMapping,i.useLayoutEffect((()=>{const t=Zc(a||c),r=t.background,n=t.environment;return"only"!==e&&(t.environment=p),e&&(t.background=p),()=>{"only"!==e&&(t.environment=n),e&&(t.background=r)}}),[p,e,a]),null}var nu=r(78466),iu=r(37312);const au=i.forwardRef((({scale:e,frames:t=1/0,opacity:r=1,width:a=1,height:l=1,blur:c=1,far:u=10,resolution:h=256,smooth:d=!0,color:p="#000000",...f},m)=>{const g=(0,o.useThree)((({scene:e})=>e)),v=(0,o.useThree)((({gl:e})=>e)),A=i.useRef(null);a*=Array.isArray(e)?e[0]:e||1,l*=Array.isArray(e)?e[1]:e||1;const[y,x,b,w,E,_,S]=i.useMemo((()=>{const e=new s.WebGLRenderTarget(h,h),t=new s.WebGLRenderTarget(h,h);t.texture.generateMipmaps=e.texture.generateMipmaps=!1;const r=new s.PlaneBufferGeometry(a,l).rotateX(Math.PI/2),n=new s.Mesh(r),i=new s.MeshDepthMaterial;i.depthTest=i.depthWrite=!1,i.onBeforeCompile=e=>{e.uniforms={...e.uniforms,ucolor:{value:new s.Color(p).convertSRGBToLinear()}},e.fragmentShader=e.fragmentShader.replace("void main() {","uniform vec3 ucolor;\n void main() {\n "),e.fragmentShader=e.fragmentShader.replace("vec4( vec3( 1.0 - fragCoordZ ), opacity );","vec4( ucolor, ( 1.0 - fragCoordZ ) * 1.0 );")};const o=new s.ShaderMaterial(nu.P),c=new s.ShaderMaterial(iu.Z);return c.depthTest=o.depthTest=!1,[e,r,i,n,o,c,t]}),[h,a,l,e]),M=i.useCallback((e=>{w.visible=!0,w.material=E,E.uniforms.tDiffuse.value=y.texture,E.uniforms.h.value=1*e/256,v.setRenderTarget(S),v.render(w,A.current),w.material=_,_.uniforms.tDiffuse.value=S.texture,_.uniforms.v.value=1*e/256,v.setRenderTarget(y),v.render(w,A.current),w.visible=!1}),[]);let T=0;return(0,o.useFrame)((()=>{if(A.current&&(t===1/0||Te.camera)),g=(0,o.useThree)((e=>e.controls)),v=i.useRef(null),A=i.useRef(null),[{radius:y,width:x,height:b},w]=i.useState({radius:0,width:0,height:0});return i.useLayoutEffect((()=>{v.current.position.set(0,0,0),v.current.updateWorldMatrix(!0,!0);const e=(new s.Box3).setFromObject(A.current),t=new s.Vector3,r=new s.Sphere,n=e.max.y-e.min.y,i=e.max.x-e.min.x;e.getCenter(t),e.getBoundingSphere(r),w({radius:r.radius,width:i,height:n}),v.current.position.set(-t.x,-t.y+n/2,-t.z)}),[e]),i.useLayoutEffect((()=>{if(a){const e=y/(b>x?1.5:2.5);m.position.set(0,.5*y,2.5*y),m.near=.1,m.far=Math.max(5e3,4*y),m.lookAt(0,e,0);const r=g||(null==t?void 0:t.current);r&&(r.target.set(0,e,0),r.update())}}),[g,y,b,x,a]),i.createElement("group",p,i.createElement("group",{ref:v},i.createElement("group",{ref:A},e)),d&&i.createElement(au,n({scale:2*y,far:y/2},d)),l&&i.createElement($c,{preset:l}),i.createElement("ambientLight",{intensity:c/3}),i.createElement("spotLight",{penumbra:1,position:[f.main[0]*y,f.main[1]*y,f.main[2]*y],intensity:2*c,castShadow:r,"shadow-bias":h}),i.createElement("pointLight",{position:[f.fill[0]*y,f.fill[1]*y,f.fill[2]*y],intensity:c}))}const lu=e=>0===e?0:Math.pow(2,10*e-10);function cu({children:e,floor:t=.25,segments:r=20,receiveShadow:n,...a}){const s=i.useRef(null);return i.useLayoutEffect((()=>{let e=0;const n=r/r/2,i=s.current.attributes.position;for(let a=0;a{const c=i.useMemo((()=>{const e=document.createElement("canvas");e.width=128,e.height=128;const n=e.getContext("2d"),i=n.createRadialGradient(e.width/2,e.height/2,0,e.width/2,e.height/2,e.width/2);return i.addColorStop(t,new s.Color(r).getStyle()),i.addColorStop(1,"rgba(0,0,0,0)"),n.fillStyle=i,n.fillRect(0,0,e.width,e.height),e}),[r,t]);return i.createElement("mesh",n({ref:l},o),i.createElement("planeBufferGeometry",{attach:"geometry",args:[1,1]}),i.createElement("meshBasicMaterial",{attach:"material",transparent:!0,opacity:a,fog:e},i.createElement("canvasTexture",{attach:"map",args:[c]})))}));(0,o.extend)({MeshReflectorMaterial:fc});const hu=i.forwardRef((({mixBlur:e=0,mixStrength:t=.5,resolution:r=256,blur:a=[0,0],args:l=[1,1],minDepthThreshold:c=.9,maxDepthThreshold:u=1,depthScale:h=0,depthToBlurRatioBias:d=.25,mirror:p=0,children:f,debug:m=0,distortion:g=1,mixContrast:v=1,distortionMap:A,...y},x)=>{i.useEffect((()=>{console.warn("Reflector has been deprecated and will be removed next major. Replace it with !")}),[]);const b=(0,o.useThree)((({gl:e})=>e)),w=(0,o.useThree)((({camera:e})=>e)),E=(0,o.useThree)((({scene:e})=>e)),_=(a=Array.isArray(a)?a:[a,a])[0]+a[1]>0,S=i.useRef(null),[T]=i.useState((()=>new s.Plane)),[C]=i.useState((()=>new s.Vector3)),[I]=i.useState((()=>new s.Vector3)),[B]=i.useState((()=>new s.Vector3)),[R]=i.useState((()=>new s.Matrix4)),[P]=i.useState((()=>new s.Vector3(0,0,-1))),[F]=i.useState((()=>new s.Vector4)),[L]=i.useState((()=>new s.Vector3)),[D]=i.useState((()=>new s.Vector3)),[U]=i.useState((()=>new s.Vector4)),[N]=i.useState((()=>new s.Matrix4)),[k]=i.useState((()=>new s.PerspectiveCamera)),O=i.useCallback((()=>{if(I.setFromMatrixPosition(S.current.matrixWorld),B.setFromMatrixPosition(w.matrixWorld),R.extractRotation(S.current.matrixWorld),C.set(0,0,1),C.applyMatrix4(R),L.subVectors(I,B),L.dot(C)>0)return;L.reflect(C).negate(),L.add(I),R.extractRotation(w.matrixWorld),P.set(0,0,-1),P.applyMatrix4(R),P.add(B),D.subVectors(I,P),D.reflect(C).negate(),D.add(I),k.position.copy(L),k.up.set(0,1,0),k.up.applyMatrix4(R),k.up.reflect(C),k.lookAt(D),k.far=w.far,k.updateMatrixWorld(),k.projectionMatrix.copy(w.projectionMatrix),N.set(.5,0,0,.5,0,.5,0,.5,0,0,.5,.5,0,0,0,1),N.multiply(k.projectionMatrix),N.multiply(k.matrixWorldInverse),N.multiply(S.current.matrixWorld),T.setFromNormalAndCoplanarPoint(C,I),T.applyMatrix4(k.matrixWorldInverse),F.set(T.normal.x,T.normal.y,T.normal.z,T.constant);const e=k.projectionMatrix;U.x=(Math.sign(F.x)+e.elements[8])/e.elements[0],U.y=(Math.sign(F.y)+e.elements[9])/e.elements[5],U.z=-1,U.w=(1+e.elements[10])/e.elements[14],F.multiplyScalar(2/F.dot(U)),e.elements[2]=F.x,e.elements[6]=F.y,e.elements[10]=F.z+1,e.elements[14]=F.w}),[]),[G,z,V,H]=i.useMemo((()=>{const n={minFilter:s.LinearFilter,magFilter:s.LinearFilter,encoding:b.outputEncoding},i=new s.WebGLRenderTarget(r,r,n);i.depthBuffer=!0,i.depthTexture=new s.DepthTexture(r,r),i.depthTexture.format=s.DepthFormat,i.depthTexture.type=s.UnsignedShortType;const o=new s.WebGLRenderTarget(r,r,n);return[i,o,new pc({gl:b,resolution:r,width:a[0],height:a[1],minDepthThreshold:c,maxDepthThreshold:u,depthScale:h,depthToBlurRatioBias:d}),{mirror:p,textureMatrix:N,mixBlur:e,tDiffuse:i.texture,tDepth:i.depthTexture,tDiffuseBlur:o.texture,hasBlur:_,mixStrength:t,minDepthThreshold:c,maxDepthThreshold:u,depthScale:h,depthToBlurRatioBias:d,transparent:!0,debug:m,distortion:g,distortionMap:A,mixContrast:v,"defines-USE_BLUR":_?"":void 0,"defines-USE_DEPTH":h>0?"":void 0,"defines-USE_DISTORTION":A?"":void 0}]}),[b,a,N,r,p,_,e,t,c,u,h,d,m,g,A,v]);return(0,o.useFrame)((()=>{if(null==S||!S.current)return;S.current.visible=!1;const e=b.xr.enabled,t=b.shadowMap.autoUpdate;O(),b.xr.enabled=!1,b.shadowMap.autoUpdate=!1,b.setRenderTarget(G),b.state.buffers.depth.setMask(!0),b.autoClear||b.clear(),b.render(E,k),_&&V.render(b,G,z),b.xr.enabled=e,b.shadowMap.autoUpdate=t,S.current.visible=!0,b.setRenderTarget(null)})),i.createElement("mesh",n({ref:M([S,x])},y),i.createElement("planeBufferGeometry",{args:l}),f?f("meshReflectorMaterial",H):i.createElement("meshReflectorMaterial",H))}));class du extends s.ShaderMaterial{constructor(){super({uniforms:{depth:{value:null},opacity:{value:1},attenuation:{value:2.5},anglePower:{value:12},spotPosition:{value:new s.Vector3(0,0,0)},lightColor:{value:new s.Color("white")},cameraNear:{value:0},cameraFar:{value:1},resolution:{value:new s.Vector2(0,0)}},transparent:!0,depthWrite:!1,vertexShader:"\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n varying float vViewZ;\n varying float vIntensity;\n uniform vec3 spotPosition;\n uniform float attenuation; \n\n void main() {\n // compute intensity\n vNormal = normalize( normalMatrix * normal );\n vec4 worldPosition\t= modelMatrix * vec4( position, 1.0 );\n vWorldPosition = worldPosition.xyz;\n vec4 viewPosition = viewMatrix * worldPosition;\n vViewZ = viewPosition.z;\n float intensity\t= distance(worldPosition.xyz, spotPosition) / attenuation;\n intensity\t= 1.0 - clamp(intensity, 0.0, 1.0);\n vIntensity = intensity; \n // set gl_Position\n gl_Position\t= projectionMatrix * viewPosition;\n\n }",fragmentShader:"\n #include \n\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n uniform vec3 lightColor;\n uniform vec3 spotPosition;\n uniform float attenuation;\n uniform float anglePower;\n uniform sampler2D depth;\n uniform vec2 resolution;\n uniform float cameraNear;\n uniform float cameraFar;\n varying float vViewZ;\n varying float vIntensity;\n uniform float opacity;\n\n float readDepth( sampler2D depthSampler, vec2 coord ) {\n float fragCoordZ = texture2D( depthSampler, coord ).x;\n float viewZ = perspectiveDepthToViewZ(fragCoordZ, cameraNear, cameraFar);\n return viewZ;\n }\n\n void main() {\n float d = 1.0;\n bool isSoft = resolution[0] > 0.0 && resolution[1] > 0.0;\n if (isSoft) {\n vec2 sUv = gl_FragCoord.xy / resolution;\n d = readDepth(depth, sUv);\n }\n float intensity = vIntensity;\n vec3 normal\t= vec3(vNormal.x, vNormal.y, abs(vNormal.z));\n float angleIntensity\t= pow( dot(normal, vec3(0.0, 0.0, 1.0)), anglePower );\n intensity\t*= angleIntensity;\n // fades when z is close to sampled depth, meaning the cone is intersecting existing geometry\n if (isSoft) {\n intensity\t*= smoothstep(0., 1., vViewZ - d);\n }\n gl_FragColor = vec4(lightColor, intensity * opacity);\n\n #include \n\t #include \n }"})}}const pu=new s.Vector3,fu=i.forwardRef((({opacity:e=1,radiusTop:t,radiusBottom:r,depthBuffer:a,color:l="white",distance:c=5,angle:u=.15,attenuation:h=5,anglePower:d=5,...p},f)=>{const m=i.useRef(null),g=(0,o.useThree)((e=>e.size)),v=(0,o.useThree)((e=>e.camera)),A=(0,o.useThree)((e=>e.viewport.dpr)),[y]=i.useState((()=>new du));t=void 0===t?.1:t,r=void 0===r?7*u:r,(0,o.useFrame)((()=>{y.uniforms.spotPosition.value.copy(m.current.getWorldPosition(pu)),m.current.lookAt(m.current.parent.target.getWorldPosition(pu))}));const x=i.useMemo((()=>{const e=new s.CylinderGeometry(t,r,c,128,64,!0);return e.applyMatrix4((new s.Matrix4).makeTranslation(0,-c/2,0)),e.applyMatrix4((new s.Matrix4).makeRotationX(-Math.PI/2)),e}),[u,c,t,r]);return i.createElement("spotLight",n({ref:f,angle:u,color:l,distance:c},p),i.createElement("mesh",{ref:m,geometry:x,raycast:()=>null},i.createElement("primitive",{object:y,attach:"material","uniforms-opacity-value":e,"uniforms-lightColor-value":l,"uniforms-attenuation-value":h,"uniforms-anglePower-value":d,"uniforms-depth-value":a,"uniforms-cameraNear-value":v.near,"uniforms-cameraFar-value":v.far,"uniforms-resolution-value":a?[g.width*A,g.height*A]:[0,0]})))})),mu=i.forwardRef((({args:e,map:t,toneMapped:r=!1,color:a="white",form:l="rect",intensity:c=1,scale:u=1,target:h,children:d,...p},f)=>{const m=i.useRef(null);return i.useLayoutEffect((()=>{d||p.material||((0,o.applyProps)(m.current.material,{color:a}),m.current.material.color.multiplyScalar(c))}),[a,c,d,p.material]),i.useLayoutEffect((()=>{h&&m.current.lookAt(Array.isArray(h)?new s.Vector3(...h):h)}),[h]),u=Array.isArray(u)&&2===u.length?[u[0],u[1],1]:u,i.createElement("mesh",n({ref:M([m,f]),scale:u},p),"circle"===l?i.createElement("ringGeometry",{args:[0,1,64]}):"ring"===l?i.createElement("ringGeometry",{args:[.5,1,64]}):"rect"===l?i.createElement("planeGeometry",null):i.createElement(l,{args:e}),d||(p.material?null:i.createElement("meshBasicMaterial",{toneMapped:r,map:t,side:s.DoubleSide})))}));var gu=r(48756);function vu(e,t,r=new s.Vector3){const n=Math.PI*(e-.5),i=2*Math.PI*(t-.5);return r.x=Math.cos(i),r.y=Math.sin(n),r.z=Math.sin(i),r}const Au=i.forwardRef((({inclination:e=.6,azimuth:t=.1,distance:r=1e3,mieCoefficient:a=.005,mieDirectionalG:o=.8,rayleigh:l=.5,turbidity:c=10,sunPosition:u=vu(e,t),...h},d)=>{const p=i.useMemo((()=>(new s.Vector3).setScalar(r)),[r]),[f]=i.useState((()=>new gu.q));return i.createElement("primitive",n({dispose:void 0,object:f,ref:d,"material-uniforms-mieCoefficient-value":a,"material-uniforms-mieDirectionalG-value":o,"material-uniforms-rayleigh-value":l,"material-uniforms-sunPosition-value":u,"material-uniforms-turbidity-value":c,scale:p},h))}));class yu extends s.ShaderMaterial{constructor(){super({uniforms:{time:{value:0},fade:{value:1}},vertexShader:"\n uniform float time;\n attribute float size;\n varying vec3 vColor;\n void main() {\n vColor = color;\n vec4 mvPosition = modelViewMatrix * vec4(position, 0.5);\n gl_PointSize = size * (30.0 / -mvPosition.z) * (3.0 + sin(mvPosition.x + 2.0 * time + 100.0));\n gl_Position = projectionMatrix * mvPosition;\n }",fragmentShader:"\n uniform sampler2D pointTexture;\n uniform float fade;\n varying vec3 vColor;\n void main() {\n float opacity = 1.0;\n if (fade == 1.0) {\n float d = distance(gl_PointCoord, vec2(0.5, 0.5));\n opacity = 1.0 / (1.0 + exp(16.0 * (d - 0.25)));\n }\n gl_FragColor = vec4(vColor, opacity);\n\n #include \n\t #include \n }"})}}const xu=e=>(new s.Vector3).setFromSpherical(new s.Spherical(e,Math.acos(1-2*Math.random()),2*Math.random()*Math.PI)),bu=i.forwardRef((({radius:e=100,depth:t=50,count:r=5e3,saturation:n=0,factor:a=4,fade:l=!1},c)=>{const u=i.useRef(),[h,d,p]=i.useMemo((()=>{const i=[],o=[],l=Array.from({length:r},(()=>(.5+.5*Math.random())*a)),c=new s.Color;let u=e+t;const h=t/r;for(let e=0;eu.current&&(u.current.uniforms.time.value=e.clock.getElapsedTime())));const[f]=i.useState((()=>new yu));return i.createElement("points",{ref:c},i.createElement("bufferGeometry",{attach:"geometry"},i.createElement("bufferAttribute",{attachObject:["attributes","position"],args:[h,3]}),i.createElement("bufferAttribute",{attachObject:["attributes","color"],args:[d,3]}),i.createElement("bufferAttribute",{attachObject:["attributes","size"],args:[p,1]})),i.createElement("primitive",{dispose:void 0,ref:u,object:f,attach:"material",blending:s.AdditiveBlending,"uniforms-fade-value":l,transparent:!0,vertexColors:!0}))}));function wu({opacity:e=.5,speed:t=.4,width:r=10,depth:n=1.5,segments:a=20,texture:s="https://rawcdn.githack.com/pmndrs/drei-assets/9225a9f1fbd449d9411125c2f419b843d0308c9f/cloud.png",color:l="#ffffff",depthTest:c=!0,...u}){const h=i.useRef(),d=Oa(s),p=i.useMemo((()=>[...new Array(a)].map(((e,n)=>({x:r/2-Math.random()*r,y:r/2-Math.random()*r,scale:.4+Math.sin((n+1)/a*Math.PI)*(10*(.2+Math.random())),density:Math.max(.2,Math.random()),rotation:Math.max(.002,.005*Math.random())*t})))),[r,a,t]);return(0,o.useFrame)((e=>{var t;return null==(t=h.current)?void 0:t.children.forEach(((t,r)=>{t.children[0].rotation.z+=p[r].rotation,t.children[0].scale.setScalar(p[r].scale+(1+Math.sin(e.clock.getElapsedTime()/10))/2*r/10)}))})),i.createElement("group",u,i.createElement("group",{position:[0,0,a/2*n],ref:h},p.map((({x:t,y:r,scale:a,density:s},o)=>i.createElement(yi,{key:o,position:[t,r,-o*n]},i.createElement(Mc,{scale:a,rotation:[0,0,0]},i.createElement("meshStandardMaterial",{map:d,transparent:!0,opacity:a/6*s*e,depthTest:c,color:l})))))))}function Eu(e=0,t=1024){const r=Ca((()=>fetch("https://cdn.jsdelivr.net/gh/pmndrs/drei-assets@master/matcaps.json").then((e=>e.json()))),["matcapList"]),n=r[0],a=i.useMemo((()=>Object.keys(r).length),[]),s=`${i.useMemo((()=>"string"==typeof e?e:"number"==typeof e?r[e]:null),[e])||n}${function(e){switch(e){case 64:return"-64px";case 128:return"-128px";case 256:return"-256px";case 512:return"-512px";default:return""}}(t)}.png`,o=`https://rawcdn.githack.com/emmelleppi/matcaps/9b36ccaaf0a24881a39062d05566c9e92be4aa0d/${t}/${s}`;return[Oa(o),o,a]}function _u(e=0,t={}){const{repeat:r=[1,1],anisotropy:n=1,offset:a=[0,0]}=t,o=Ca((()=>fetch("https://cdn.jsdelivr.net/gh/pmndrs/drei-assets@master/normals/normals.json").then((e=>e.json()))),["normalsList"]),l=i.useMemo((()=>Object.keys(o).length),[]),c=o[0],u=`https://rawcdn.githack.com/pmndrs/drei-assets/7a3104997e1576f83472829815b00880d88b32fb/normals/${o[e]||c}`,h=Oa(u);return i.useLayoutEffect((()=>{h&&(h.wrapS=h.wrapT=s.RepeatWrapping,h.repeat=new s.Vector2(r[0],r[1]),h.offset=new s.Vector2(a[0],a[1]),h.anisotropy=n)}),[h,n,r,a]),[h,u,l]}const Su=new s.Matrix4,Mu=new s.Matrix4,Tu=[],Cu=new s.Mesh;class Iu extends s.Group{constructor(){super(),this.color=new s.Color("white"),this.instance={current:void 0},this.instanceKey={current:void 0}}get geometry(){var e;return null==(e=this.instance.current)?void 0:e.geometry}raycast(e,t){const r=this.instance.current;if(!r)return;if(!r.geometry||!r.material)return;Cu.geometry=r.geometry;const n=r.matrixWorld;let i=r.userData.instances.indexOf(this.instanceKey);if(!(-1===i||i>r.count)){r.getMatrixAt(i,Su),Mu.multiplyMatrices(n,Su),Cu.matrixWorld=Mu,Cu.raycast(e,Tu);for(let e=0,r=Tu.length;e{const c=i.useRef(null),[u,h]=i.useState([]),[[d,p,f]]=i.useState((()=>[new Float32Array(3*r),Float32Array.from({length:3*r},(()=>1)),Float32Array.from({length:r},(()=>1))]));i.useLayoutEffect((()=>{c.current.geometry.drawRange.count=Math.min(r,void 0!==t?t:r,u.length)}),[u,t]),i.useEffect((()=>{c.current.geometry.attributes.position.needsUpdate=!0})),(0,o.useFrame)((()=>{for(c.current.updateMatrix(),c.current.updateMatrixWorld(),Fu.copy(c.current.matrixWorld).invert(),Bu=0;Bu{const e={};for(Bu=0;Bu({...e,[t]:e=>{var r,n,i;const a=null==(r=u[e.index])?void 0:r.current;return null==a||null==(n=a.__r3f)||null==(i=n.handlers)?void 0:i[t]({...e,object:a})}})),{})}),[e,u]),g=i.useMemo((()=>({subscribe:e=>(h((t=>[...t,e])),()=>h((t=>t.filter((t=>t.current!==e.current)))))})),[]);return i.createElement("points",n({matrixAutoUpdate:!1,ref:M([l,c])},m,a),i.createElement("bufferGeometry",null,i.createElement("bufferAttribute",{attachObject:["attributes","position"],count:d.length/3,array:d,itemSize:3,usage:s.DynamicDrawUsage}),i.createElement("bufferAttribute",{attachObject:["attributes","color"],count:p.length/3,array:p,itemSize:3,usage:s.DynamicDrawUsage}),i.createElement("bufferAttribute",{attachObject:["attributes","size"],count:f.length,array:f,itemSize:1,usage:s.DynamicDrawUsage})),i.createElement(Pu.Provider,{value:g},e))})),Uu=i.forwardRef((({children:e,...t},r)=>{i.useMemo((()=>(0,o.extend)({Position:Iu})),[]);const a=i.useRef(),{subscribe:s}=i.useContext(Pu);return i.useLayoutEffect((()=>s(a)),[]),i.createElement("position",n({ref:M([r,a])},t),e)})),Nu=i.forwardRef((({children:e,positions:t,colors:r,sizes:a,stride:l=3,...c},u)=>{const h=i.useRef(null);return(0,o.useFrame)((()=>{const e=h.current.geometry.attributes;e.position.needsUpdate=!0,r&&(e.color.needsUpdate=!0),a&&(e.size.needsUpdate=!0)})),i.createElement("points",n({ref:M([u,h])},c),i.createElement("bufferGeometry",null,i.createElement("bufferAttribute",{attachObject:["attributes","position"],count:t.length/l,array:t,itemSize:l,usage:s.DynamicDrawUsage}),r&&i.createElement("bufferAttribute",{attachObject:["attributes","color"],count:r.length/l,array:r,itemSize:3,usage:s.DynamicDrawUsage}),a&&i.createElement("bufferAttribute",{attachObject:["attributes","size"],count:a.length/l,array:a,itemSize:1,usage:s.DynamicDrawUsage})),e)})),ku=i.forwardRef(((e,t)=>e.positions instanceof Float32Array?i.createElement(Nu,n({},e,{ref:t})):i.createElement(Du,n({},e,{ref:t}))));var Ou=r(45697),Gu=r.n(Ou);function zu(e){return Vu(e.children,e.components)}function Vu(e,t,r){if(r=r||[],!t[0])return e(r);function n(n){return Vu(e,t.slice(1),r.concat([n]))}return"function"==typeof t[0]?t[0]({results:r,render:n}):(0,i.cloneElement)(t[0],{children:n})}let Hu,Qu;zu.propTypes={children:Gu().func.isRequired,components:Gu().arrayOf(Gu().oneOfType([Gu().element,Gu().func])).isRequired};const ju=i.createContext(null),Wu=new s.Matrix4,Xu=new s.Matrix4,Yu=new s.Matrix4;new s.Color;const qu=new s.Vector3,Ku=new s.Quaternion,Ju=new s.Vector3,Zu=i.forwardRef((({context:e,children:t,...r},a)=>{i.useMemo((()=>(0,o.extend)({Position:Iu})),[]);const s=i.useRef(),{subscribe:l,getParent:c}=i.useContext(e||ju);return i.useLayoutEffect((()=>l(s)),[]),i.createElement("position",n({instance:c(),instanceKey:s,ref:M([a,s])},r),t)})),$u=i.forwardRef((({children:e,range:t,limit:r=1e3,frames:a=1/0,...l},c)=>{const[{context:u,instance:h}]=i.useState((()=>{const e=i.createContext(null);return{context:e,instance:i.forwardRef(((t,r)=>i.createElement(Zu,n({context:e},t,{ref:r}))))}})),d=i.useRef(null),[p,f]=i.useState([]),[[m,g]]=i.useState((()=>{const e=new Float32Array(16*r);for(Hu=0;Hu1)))]}));i.useLayoutEffect((()=>{d.current.count=d.current.instanceMatrix.updateRange.count=d.current.instanceColor.updateRange.count=Math.min(r,void 0!==t?t:r,p.length)}),[p,t]),i.useEffect((()=>{d.current.instanceMatrix.needsUpdate=!0}));let v=0;(0,o.useFrame)((()=>{if(a===1/0||v({getParent:()=>d,subscribe:e=>(f((t=>[...t,e])),()=>f((t=>t.filter((t=>t.current!==e.current)))))})),[]);return i.createElement("instancedMesh",n({userData:{instances:p},matrixAutoUpdate:!1,ref:M([c,d]),args:[null,null,0],raycast:()=>null},l),i.createElement("instancedBufferAttribute",{attach:"instanceMatrix",count:m.length/16,array:m,itemSize:16,usage:s.DynamicDrawUsage}),i.createElement("instancedBufferAttribute",{attach:"instanceColor",count:g.length/3,array:g,itemSize:3,usage:s.DynamicDrawUsage}),"function"==typeof e?i.createElement(u.Provider,{value:A},e(h)):i.createElement(ju.Provider,{value:A},e))}));function eh({meshes:e,children:t,...r}){const a=Array.isArray(e);if(!a)for(const n of Object.keys(e))e[n].isMesh||delete e[n];return i.createElement(zu,{components:(a?e:Object.values(e)).map((({geometry:e,material:t})=>i.createElement($u,n({key:e.uuid,geometry:e,material:t},r))))},(r=>a?t(...r):t(Object.keys(e).filter((t=>e[t].isMesh)).reduce(((e,t,n)=>({...e,[t]:r[n]})),{}))))}var th=r(3415);const rh=i.createContext(null),nh=e=>e instanceof s.Vector3?e.toArray():e,ih=i.forwardRef(((e,t)=>{const{limit:r=1e3,lineWidth:a=1,children:l,...c}=e,[u,h]=i.useState([]),[d]=i.useState((()=>new xi.w)),[p]=i.useState((()=>new bi.Y)),[f]=i.useState((()=>new th.z)),[m]=i.useState((()=>new s.Vector2(512,512))),[g]=i.useState((()=>Array(6*r).fill(0))),[v]=i.useState((()=>Array(6*r).fill(0))),A=i.useMemo((()=>({subscribe:e=>(h((t=>[...t,e])),()=>h((t=>t.filter((t=>t.current!==e.current)))))})),[]);return(0,o.useFrame)((()=>{for(let i=0;i{const r=i.useContext(rh);if(!r)throw"Segment must used inside Segments component.";const a=i.useRef();return i.useMemo((()=>(0,o.extend)({SegmentObject:ah})),[]),i.useLayoutEffect((()=>r.subscribe(a)),[]),i.createElement("segmentObject",n({ref:M([a,t])},e))})),oh=i.forwardRef((({children:e,distances:t,...r},a)=>{const s=i.useRef(null);return i.useLayoutEffect((()=>{const{current:e}=s;e.levels.length=0,e.children.forEach(((r,n)=>e.levels.push({object:r,distance:t[n]})))})),(0,o.useFrame)((e=>{var t;return null==(t=s.current)?void 0:t.update(e.camera)})),i.createElement("lOD",n({ref:M([s,a])},r),e)}));function lh({all:e,scene:t,camera:r}){const n=(0,o.useThree)((({gl:e})=>e)),a=(0,o.useThree)((({camera:e})=>e)),l=(0,o.useThree)((({scene:e})=>e));return i.useLayoutEffect((()=>{const i=[];e&&(t||l).traverse((e=>{!1===e.visible&&(i.push(e),e.visible=!0)})),n.compile(t||l,r||a);const o=new s.WebGLCubeRenderTarget(128);new s.CubeCamera(.01,1e5,o).update(n,t||l),o.dispose(),i.forEach((e=>e.visible=!1))}),[]),null}function ch(){const e=(0,o.useThree)((e=>e.gl));return(0,i.useEffect)((()=>(e.shadowMap.autoUpdate=!1,e.shadowMap.needsUpdate=!0,()=>{e.shadowMap.autoUpdate=e.shadowMap.needsUpdate=!0})),[e.shadowMap]),null}const uh=new s.Matrix4,hh=new s.Ray,dh=new s.Sphere,ph=new s.Vector3;function fh(e,t){const r=this.geometry,n=this.material,i=this.matrixWorld;void 0!==n&&(null===r.boundingSphere&&r.computeBoundingSphere(),dh.copy(r.boundingSphere),dh.applyMatrix4(i),!1!==e.ray.intersectsSphere(dh)&&(uh.copy(i).invert(),hh.copy(e.ray).applyMatrix4(uh),null!==r.boundingBox&&null===hh.intersectBox(r.boundingBox,ph)||t.push({distance:ph.distanceTo(e.ray.origin),point:ph.clone(),object:this})))}function mh({pixelated:e}){const t=(0,o.useThree)((e=>e.gl)),r=(0,o.useThree)((e=>e.internal.active)),n=(0,o.useThree)((e=>e.performance.current)),a=(0,o.useThree)((e=>e.viewport.initialDpr)),s=(0,o.useThree)((e=>e.setDpr));return i.useEffect((()=>{const n=t.domElement;return()=>{r&&s(a),e&&n&&(n.style.imageRendering="auto")}}),[]),i.useEffect((()=>{s(n*a),e&&t.domElement&&(t.domElement.style.imageRendering=1===n?"auto":"pixelated")}),[n]),null}function gh(){const e=(0,o.useThree)((e=>e.get)),t=(0,o.useThree)((e=>e.performance.current));return i.useEffect((()=>{const t=e().raycaster.enabled;return()=>{e().raycaster.enabled=t}}),[]),i.useEffect((()=>{e().raycaster.enabled=1===t}),[t]),null}},64232:(e,t,r)=>{"use strict";r.r(t),r.d(t,{Canvas:()=>K,ReactThreeFiber:()=>w,_roots:()=>ne,act:()=>pe,addAfterEffect:()=>V,addEffect:()=>z,addTail:()=>H,advance:()=>se,applyProps:()=>le,context:()=>D,createPortal:()=>me,dispose:()=>de,events:()=>W,extend:()=>P,invalidate:()=>ae,reconciler:()=>oe,render:()=>ce,unmountComponentAtNode:()=>he,useFrame:()=>Z,useGraph:()=>ee,useLoader:()=>re,useThree:()=>J});var n=r(99477),i=r(67294),a=r(14671),s=r(60374),o=r(76525),l=r.n(o),c=r(63840),u=r(64063),h=r.n(u);const d=[];function p(e,t,r,n=0,i=!1){for(const s of t)if(h()(r,s.args)){if(i)return;if(s.error)throw s.error;if(s.response)return s.response;throw s.promise}const a={args:r,promise:e(...r).then((e=>a.response=null==e||e)).catch((e=>a.error=null!=e?e:"unknown error")).then((()=>{n>0&&setTimeout((()=>{const e=t.indexOf(a);-1!==e&&t.splice(e,1)}),n)}))};if(t.push(a),!i)throw a.promise}function f(e,...t){if(void 0===t||0===t.length)e.splice(0,e.length);else{const r=e.find((e=>h()(t,e.args)));if(r){const t=e.indexOf(r);-1!==t&&e.splice(t,1)}}}function m(e,...t){return p(e,d,t,m.lifespan)}m.lifespan=0,m.clear=(...e)=>f(d,...e),m.preload=(e,...t)=>{p(e,d,t,m.lifespan,!0)},m.peek=(...e)=>{var t;return null==(t=d.find((t=>h()(e,t.args))))?void 0:t.response};var g=r(20296),v=r.n(g);function A(e){let{debounce:t,scroll:r,polyfill:n,offsetSize:a}=void 0===e?{debounce:0,scroll:!1,offsetSize:!1}:e;const s=n||("undefined"==typeof window?class{}:window.ResizeObserver);if(!s)throw new Error("This browser does not support ResizeObserver out of the box. See: https://github.com/react-spring/react-use-measure/#resize-observer-polyfills");const[o,l]=(0,i.useState)({left:0,top:0,width:0,height:0,bottom:0,right:0,x:0,y:0}),c=(0,i.useRef)({element:null,scrollContainers:null,resizeObserver:null,lastBounds:o}),u=t?"number"==typeof t?t:t.scroll:null,h=t?"number"==typeof t?t:t.resize:null,d=(0,i.useRef)(!1);(0,i.useEffect)((()=>(d.current=!0,()=>{d.current=!1})));const[p,f,m]=(0,i.useMemo)((()=>{const e=()=>{if(!c.current.element)return;const{left:e,top:t,width:r,height:n,bottom:i,right:s,x:o,y:u}=c.current.element.getBoundingClientRect(),h={left:e,top:t,width:r,height:n,bottom:i,right:s,x:o,y:u};c.current.element instanceof HTMLElement&&a&&(h.height=c.current.element.offsetHeight,h.width=c.current.element.offsetWidth),Object.freeze(h),d.current&&!b(c.current.lastBounds,h)&&l(c.current.lastBounds=h)};return[e,h?v()(e,h):e,u?v()(e,u):e]}),[l,a,u,h]);function g(){c.current.scrollContainers&&(c.current.scrollContainers.forEach((e=>e.removeEventListener("scroll",m,!0))),c.current.scrollContainers=null),c.current.resizeObserver&&(c.current.resizeObserver.disconnect(),c.current.resizeObserver=null)}function A(){c.current.element&&(c.current.resizeObserver=new s(m),c.current.resizeObserver.observe(c.current.element),r&&c.current.scrollContainers&&c.current.scrollContainers.forEach((e=>e.addEventListener("scroll",m,{capture:!0,passive:!0}))))}var x,w,E;return x=m,w=Boolean(r),(0,i.useEffect)((()=>{if(w){const e=x;return window.addEventListener("scroll",e,{capture:!0,passive:!0}),()=>{window.removeEventListener("scroll",e,!0)}}}),[x,w]),E=f,(0,i.useEffect)((()=>{const e=E;return window.addEventListener("resize",e),()=>{window.removeEventListener("resize",e)}}),[E]),(0,i.useEffect)((()=>{g(),A()}),[r,m,f]),(0,i.useEffect)((()=>g),[]),[e=>{e&&e!==c.current.element&&(g(),c.current.element=e,c.current.scrollContainers=y(e),A())},o,p]}function y(e){const t=[];if(!e||e===document.body)return t;const{overflow:r,overflowX:n,overflowY:i}=window.getComputedStyle(e);return[r,n,i].some((e=>"auto"===e||"scroll"===e))&&t.push(e),[...t,...y(e.parentElement)]}const x=["x","y","top","bottom","left","right","width","height"],b=(e,t)=>x.every((r=>e[r]===t[r]));var w=Object.freeze({__proto__:null});const E={obj:e=>e===Object(e)&&!E.arr(e)&&"function"!=typeof e,fun:e=>"function"==typeof e,str:e=>"string"==typeof e,num:e=>"number"==typeof e,und:e=>void 0===e,arr:e=>Array.isArray(e),equ(e,t){if(typeof e!=typeof t||!!e!=!!t)return!1;if(E.str(e)||E.num(e)||E.obj(e))return e===t;if(E.arr(e)&&e==t)return!0;let r;for(r in e)if(!(r in t))return!1;for(r in t)if(e[r]!==t[r])return!1;return!E.und(r)||e===t}};function _(e){return(e.eventObject||e.object).uuid+"/"+e.index}function S(e){const t=new n.Vector3;function r(t){const{internal:r}=e.getState(),n=t.offsetX-r.initialClick[0],i=t.offsetY-r.initialClick[1];return Math.round(Math.sqrt(n*n+i*i))}function i(e){return e.filter((e=>["Move","Over","Enter","Out","Leave"].some((t=>{var r;return null==(r=e.__r3f.handlers)?void 0:r["onPointer"+t]}))))}function a(t){const{internal:r}=e.getState();Array.from(r.hovered.values()).forEach((e=>{if(!t.length||!t.find((t=>t.object===e.object&&t.index===e.index))){const n=e.eventObject.__r3f.handlers;if(r.hovered.delete(_(e)),n){const r={...e,intersections:t||[]};null==n.onPointerOut||n.onPointerOut(r),null==n.onPointerLeave||n.onPointerLeave(r)}}}))}function s(e,t){t.forEach((t=>{var r;return null==(r=t.__r3f.handlers)||null==r.onPointerMissed?void 0:r.onPointerMissed(e)}))}return{handlePointer:n=>{switch(n){case"onPointerLeave":case"onPointerCancel":return()=>a([]);case"onLostPointerCapture":return t=>{"pointerId"in t&&e.getState().internal.capturedMap.delete(t.pointerId),a([])}}return o=>{const{onPointerMissed:l,internal:c}=e.getState();!function(t){var r;const n=e.getState(),{raycaster:i,mouse:a,camera:s,size:o}=n,{offsetX:l,offsetY:c}=null!=(r=null==i.computeOffsets?void 0:i.computeOffsets(t,n))?r:t,{width:u,height:h}=o;a.set(l/u*2-1,-c/h*2+1),i.setFromCamera(a,s)}(o);const u="onPointerMove"===n,h=function(t,r){const{internal:n}=e.getState();return"pointerId"in r&&n.capturedMap.has(r.pointerId)&&t.push(...n.capturedMap.get(r.pointerId).values()),t}(function(t){const r=e.getState(),{raycaster:n,internal:i}=r;if(!n.enabled)return[];const a=new Set,s=[],o=t?t(i.interaction):i.interaction;let l=n.intersectObjects(o,!0).filter((e=>{const t=_(e);return!a.has(t)&&(a.add(t),!0)}));n.filter&&(l=n.filter(l,r));for(const e of l){let t=e.object;for(;t;){var c;(null==(c=t.__r3f)?void 0:c.handlers)&&s.push({...e,eventObject:t}),t=t.parent}}return s}(u?i:void 0),o);u&&a(h),function(n,i,s){const{raycaster:o,mouse:l,camera:c,internal:u}=e.getState();if(n.length){const e=t.set(l.x,l.y,0).unproject(c),h="click"===i.type?r(i):0,d=e=>i.target.releasePointerCapture(e),p={stopped:!1};for(const t of n){const r=e=>{var r,n;return null!=(r=null==(n=u.capturedMap.get(e))?void 0:n.has(t.eventObject))&&r},f=e=>{u.capturedMap.has(e)?u.capturedMap.get(e).set(t.eventObject,t):u.capturedMap.set(e,new Map([[t.eventObject,t]])),i.target.setPointerCapture(e)};let m={};for(let e in Object.getPrototypeOf(i)){let t=i[e];"function"!=typeof t&&(m[e]=t)}let g={...t,...m,spaceX:l.x,spaceY:l.y,intersections:n,stopped:p.stopped,delta:h,unprojectedPoint:e,ray:o.ray,camera:c,stopPropagation:()=>{const e="pointerId"in i&&u.capturedMap.get(i.pointerId);(!e||e.has(t.eventObject))&&(g.stopped=p.stopped=!0,u.hovered.size&&Array.from(u.hovered.values()).find((e=>e.eventObject===t.eventObject)))&&a([...n.slice(0,n.indexOf(t)),t])},target:{hasPointerCapture:r,setPointerCapture:f,releasePointerCapture:d},currentTarget:{hasPointerCapture:r,setPointerCapture:f,releasePointerCapture:d},sourceEvent:i,nativeEvent:i};if(s(g),!0===p.stopped)break}}}(h,o,(e=>{const t=e.eventObject,r=t.__r3f.handlers;if(r)if(u){if(r.onPointerOver||r.onPointerEnter||r.onPointerOut||r.onPointerLeave){const t=_(e),n=c.hovered.get(t);n?n.stopped&&e.stopPropagation():(c.hovered.set(t,e),null==r.onPointerOver||r.onPointerOver(e),null==r.onPointerEnter||r.onPointerEnter(e))}null==r.onPointerMove||r.onPointerMove(e)}else{const i=null==r?void 0:r[n];i&&("onClick"!==n&&"onContextMenu"!==n&&"onDoubleClick"!==n||c.initialHits.includes(t))&&(i(e),s(o,c.interaction.filter((e=>e!==t))))}})),"onPointerDown"===n&&(c.initialClick=[o.offsetX,o.offsetY],c.initialHits=h.map((e=>e.eventObject))),"onClick"!==n&&"onContextMenu"!==n&&"onDoubleClick"!==n||h.length||r(o)<=2&&(s(o,c.interaction),l&&l(o))}}}}const M=e=>e&&!!e.getState,T=(e,t)=>{var r,n;return{root:M(e)?e:null!=(r=null==(n=e.__r3f)?void 0:n.root)?r:t.__r3f.root,container:M(e)?e.getState().scene:e}},C="__default",I={},B=["children","key","ref"];let R={},P=e=>{R={...R,...e}};function F(e,t){const r=e;return(null!=t&&t.instance||!r.__r3f)&&(r.__r3f={root:null,memoizedProps:{},objects:[],...t}),e}const L=e=>e&&e.isOrthographicCamera,D=i.createContext(null);function U(e,t){const r=t.length;return t.push(e),()=>{t.splice(r,1)}}let N,k=[],O=[],G=[];const z=e=>U(e,k),V=e=>U(e,O),H=e=>U(e,G);function Q(e,t){for(N=0;N({...e,[r]:t(r)})),{}),connect:t=>{var n;const{set:i,events:a}=e.getState();null==a.disconnect||a.disconnect(),i((e=>({events:{...e.events,connected:t}}))),Object.entries(null!=(n=null==a?void 0:a.handlers)?n:[]).forEach((([e,n])=>{const[i,a]=r[e];t.addEventListener(i,n,{passive:a})}))},disconnect:()=>{const{set:t,events:n}=e.getState();var i;n.connected&&(Object.entries(null!=(i=n.handlers)?i:[]).forEach((([e,t])=>{if(n&&n.connected instanceof HTMLElement){const[i]=r[e];n.connected.removeEventListener(i,t)}})),t((e=>({events:{...e.events,connected:!1}}))))}}}const X="undefined"!=typeof window?i.useLayoutEffect:i.useEffect;function Y({set:e}){return X((()=>(e(new Promise((()=>null))),()=>e(!1))),[]),null}class q extends i.Component{constructor(...e){super(...e),this.state={error:!1}}componentDidCatch(e){this.props.set(e)}render(){return this.state.error?null:this.props.children}}function K({children:e,fallback:t,tabIndex:r,resize:n,id:a,style:s,className:o,events:l,...c}){const[u,h]=A({scroll:!0,debounce:{scroll:50,resize:0},...n}),d=i.useRef(null),[p,f]=i.useState(!1),[m,g]=i.useState(!1);if(p)throw p;if(m)throw m;return X((()=>{h.width>0&&h.height>0&&ce(i.createElement(q,{set:g},i.createElement(i.Suspense,{fallback:i.createElement(Y,{set:f})},e)),d.current,{...c,size:h,events:l||W})}),[h,e]),X((()=>{const e=d.current;return()=>he(e)}),[]),i.createElement("div",{ref:u,id:a,className:o,tabIndex:r,style:{position:"relative",width:"100%",height:"100%",overflow:"hidden",...s}},i.createElement("canvas",{ref:d,style:{display:"block"}},t))}function J(e=(e=>e),t){const r=i.useContext(D);if(!r)throw"R3F hooks can only be used within the Canvas component!";return r(e,t)}function Z(e,t=0){const{subscribe:r}=i.useContext(D).getState().internal,n=i.useRef(e);return i.useLayoutEffect((()=>{n.current=e}),[e]),i.useLayoutEffect((()=>{const e=r(n,t);return()=>e()}),[t,r]),null}function $(e){const t={nodes:{},materials:{}};return e&&e.traverse((e=>{e.name&&(t.nodes[e.name]=e),e.material&&!t.materials[e.material.name]&&(t.materials[e.material.name]=e.material)})),t}function ee(e){return i.useMemo((()=>$(e)),[e])}function te(e,t){return function(r,...n){const i=new r;return e&&e(i),Promise.all(n.map((e=>new Promise(((r,n)=>i.load(e,(e=>{e.scene&&Object.assign(e,$(e.scene)),r(e)}),t,(t=>n(`Could not load ${e}: ${t.message}`))))))))}}function re(e,t,r,n){const i=Array.isArray(t)?t:[t],a=m(te(r,n),e,...i);return Array.isArray(t)?a:a[0]}q.getDerivedStateFromError=()=>({error:!0}),re.preload=function(e,t,r){const n=Array.isArray(t)?t:[t];return m.preload(te(r),e,...n)};const ne=new Map,ie=["legacy","blocking","concurrent"],{invalidate:ae,advance:se}=function(e){let t,r=!1;function n(i){if(r=!0,t=0,Q(k,i),e.forEach((e=>{const r=e.store.getState();r.internal.active&&("always"===r.frameloop||r.internal.frames>0)&&(t+=j(i,r))})),Q(O,i),t>0)return requestAnimationFrame(n);Q(G,i),r=!1}return{loop:n,invalidate:function t(i){if(!i)return e.forEach((e=>t(e.store.getState())));!i.vr&&i.internal.active&&"never"!==i.frameloop&&(i.internal.frames=Math.min(60,i.internal.frames+1),r||(r=!0,requestAnimationFrame(n)))},advance:function(t,r=!0,n){r&&Q(k,t),n?j(t,n):e.forEach((e=>j(t,e.store.getState()))),r&&Q(O,t)}}}(ne),{reconciler:oe,applyProps:le}=function(e){function t(e,t,a={},s=!1){var o,l,c;const u=null!=(o=null==e?void 0:e.__r3f)?o:{},h=u.root,d=null!=(l=null==h||null==h.getState?void 0:h.getState())?l:{},p=[],f=[],m={};let g=0;Object.entries(t).forEach((([e,t])=>{-1===B.indexOf(e)&&(m[e]=t)})),u.memoizedProps&&u.memoizedProps.args&&(m.args=u.memoizedProps.args),u.memoizedProps&&u.memoizedProps.attach&&(m.attach=u.memoizedProps.attach),e.__r3f&&(e.__r3f.memoizedProps=m);let v=Object.keys(t);for(g=0;g-1&&delete x[v[g]];const b=Object.entries(x);for(g=0;g0){if(b.forEach((([t,i])=>{if(!f.includes(t)){let a=e,s=a[t];if(t.includes("-")){const r=t.split("-");if(s=r.reduce(((e,t)=>e[t]),e),!s||!s.set){const[n,...i]=r.reverse();a=i.reverse().reduce(((e,t)=>e[t]),e),t=n}}if(i===C+"remove")if(s&&s.constructor)i=new s.constructor(m.args);else if(a.constructor){const e=new a.constructor(a.__r3f.memoizedProps.args);i=e[s],e.dispose&&e.dispose()}else i=0;if(s&&s.set&&(s.copy||s instanceof n.Layers)){if(Array.isArray(i))s.fromArray?s.fromArray(i):s.set(...i);else if(s.copy&&i&&i.constructor&&s.constructor.name===i.constructor.name)s.copy(i);else if(void 0!==i){const e=s instanceof n.Color;!e&&s.setScalar?s.setScalar(i):s instanceof n.Layers&&i instanceof n.Layers?s.mask=i.mask:s.set(i),!d.linear&&e&&s.convertSRGBToLinear()}}else a[t]=i,!d.linear&&a[t]instanceof n.Texture&&(a[t].encoding=n.sRGBEncoding);r(e)}})),s&&h&&e.raycast&&u.handlers){u.handlers=void 0;const t=d.internal.interaction.indexOf(e);t>-1&&d.internal.interaction.splice(t,1)}f.length&&(s&&h&&e.raycast&&d.internal.interaction.push(e),u.handlers=f.reduce(((e,r)=>({...e,[r]:t[r]})),{})),e.parent&&i(e)}}function r(e){var t,r;const n=null==(t=e.__r3f)||null==(r=t.root)||null==r.getState?void 0:r.getState();n&&0===n.internal.frames&&n.invalidate()}function i(e){null==e.onUpdate||e.onUpdate(e)}function a(e,{args:r=[],...i},a,s,o){let l,c=`${e[0].toUpperCase()}${e.slice(1)}`;if(!M(a)&&o){const e=t=>t.return?e(t.return):t.stateNode&&t.stateNode.containerInfo;a=e(o)}if(!a||!M(a))throw`No valid root for ${c}!`;if("primitive"===e){if(void 0===i.object)throw"Primitives without 'object' are invalid!";l=F(i.object,{root:a,instance:!0})}else{const e=R[c]||n[c];if(!e)throw`${c} is not part of the THREE namespace! Did you forget to extend? See: https://github.com/pmndrs/react-three-fiber/blob/master/markdown/api.md#using-3rd-party-objects-declaratively`;const t=E.arr(r);l=F(t?new e(...r):new e(r),{root:a,memoizedProps:{args:t&&0===r.length?null:r}})}return c.endsWith("Geometry")?i={attach:"geometry",...i}:c.endsWith("Material")&&(i={attach:"material",...i}),t(l,i,{}),l}function s(e,t){let n=!1;t&&(t.attachArray?(E.arr(e[t.attachArray])||(e[t.attachArray]=[]),e[t.attachArray].push(t)):t.attachObject?(E.obj(e[t.attachObject[0]])||(e[t.attachObject[0]]={}),e[t.attachObject[0]][t.attachObject[1]]=t):t.attach&&!E.fun(t.attach)?e[t.attach]=t:t.isObject3D&&(e.add(t),n=!0),n||(e.__r3f.objects.push(t),t.parent=e),i(t),r(t))}function o(e,t,n){let a=!1;if(t){if(t.attachArray){const r=e[t.attachArray];E.arr(r)||(e[t.attachArray]=[]),r.splice(r.indexOf(n),0,t)}else{if(t.attachObject||t.attach&&!E.fun(t.attach))return a=!0,s(e,t);if(t.isObject3D){t.parent=e,t.dispatchEvent({type:"added"});const r=e.children.filter((e=>e!==t)),i=r.indexOf(n);e.children=[...r.slice(0,i),t,...r.slice(i)],a=!0}}a||(e.__r3f.objects.push(t),t.parent=e),i(t),r(t)}}function u(e,t,r=!1){e&&[...e].forEach((e=>h(t,e,r)))}function h(e,t,n){if(t){var i;if(e.__r3f.objects){const r=e.__r3f.objects.length;e.__r3f.objects=e.__r3f.objects.filter((e=>e!==t));e.__r3f.objects.lengthe!==t));else if(t.attachObject)delete e[t.attachObject[0]][t.attachObject[1]];else if(t.attach&&!E.fun(t.attach))e[t.attach]=null;else if(t.isObject3D){var a;e.remove(t),null!=(a=t.__r3f)&&a.root&&function(e,t){const{internal:r}=e.getState();r.interaction=r.interaction.filter((e=>e!==t)),r.initialHits=r.initialHits.filter((e=>e!==t)),r.hovered.forEach(((e,n)=>{e.eventObject!==t&&e.object!==t||r.hovered.delete(n)}))}(t.__r3f.root,t)}const o=null==(i=t.__r3f)?void 0:i.instance,l=void 0===n?null!==t.dispose&&!o:n;var s;if(!o)u(null==(s=t.__r3f)?void 0:s.objects,t,l),u(t.children,t,l);t.__r3f&&(delete t.__r3f.root,delete t.__r3f.objects,delete t.__r3f.handlers,delete t.__r3f.memoizedProps,o||delete t.__r3f),l&&t.dispose&&"Scene"!==t.type&&(0,c.unstable_runWithPriority)(c.unstable_IdlePriority,(()=>t.dispose())),r(e)}}function d(e,t,r,n){const i=e.parent;if(!i)return;const o=a(t,r,e.__r3f.root);e.children&&(e.children.forEach((e=>s(o,e))),e.children=[]),e.__r3f.objects.forEach((e=>s(o,e))),e.__r3f.objects=[],h(i,e),s(i,o),[n,n.alternate].forEach((e=>{null!==e&&(e.stateNode=o,e.ref&&("function"==typeof e.ref?e.ref(o):e.ref.current=o))}))}return{reconciler:l()({now:c.unstable_now,createInstance:a,removeChild:h,appendChild:s,appendInitialChild:s,insertBefore:o,warnsIfNotActing:!0,supportsMutation:!0,isPrimaryRenderer:!1,scheduleTimeout:E.fun(setTimeout)?setTimeout:void 0,cancelTimeout:E.fun(clearTimeout)?clearTimeout:void 0,setTimeout:E.fun(setTimeout)?setTimeout:void 0,clearTimeout:E.fun(clearTimeout)?clearTimeout:void 0,noTimeout:-1,appendChildToContainer:(e,t)=>{const{container:r,root:n}=T(e,t);r.__r3f.root=n,s(r,t)},removeChildFromContainer:(e,t)=>{const{container:r}=T(e,t);h(r,t)},insertInContainerBefore:(e,t,r)=>{const{container:n}=T(e,t);o(n,t,r)},commitUpdate(e,r,n,i,a,s){if(e.__r3f.instance&&a.object&&a.object!==e)d(e,n,a,s);else{const{args:r=[],...o}=a,{args:l=[],...c}=i;r.some(((e,t)=>E.obj(e)?Object.entries(e).some((([e,r])=>r!==l[t][e])):e!==l[t]))?d(e,n,a,s):t(e,o,c,!0)}},hideInstance(e){e.isObject3D&&(e.visible=!1,r(e))},unhideInstance(e,t){(e.isObject3D&&null==t.visible||t.visible)&&(e.visible=!0,r(e))},hideTextInstance(){throw new Error("Text is not allowed in the R3F tree.")},getPublicInstance:e=>e,getRootHostContext:e=>I,getChildHostContext:e=>I,createTextInstance(){},finalizeInitialChildren:e=>!!e.__r3f.handlers,commitMount(e){e.raycast&&e.__r3f.handlers&&e.__r3f.root.getState().internal.interaction.push(e)},prepareUpdate:()=>I,shouldDeprioritizeSubtree:()=>!1,prepareForCommit:()=>null,preparePortalMount(...e){},resetAfterCommit(){},shouldSetTextContent:()=>!1,clearContainer:()=>!1}),applyProps:t}}();function ce(e,t,{gl:r,size:o,mode:l=ie[1],events:c,onCreated:u,...h}={}){var d,p,f,m,g;o||(o={width:null!=(p=null==(f=t.parentElement)?void 0:f.clientWidth)?p:0,height:null!=(m=null==(g=t.parentElement)?void 0:g.clientHeight)?m:0});let v=ne.get(t),A=null==v?void 0:v.fiber,y=null==v?void 0:v.store,x=null==(d=y)?void 0:d.getState();if(A&&x){const e=x.internal.lastProps;void 0===h.dpr||E.equ(e.dpr,h.dpr)||x.setDpr(h.dpr),void 0===o||E.equ(e.size,o)||x.setSize(o.width,o.height);h.linear!==e.linear&&(he(t),A=void 0)}if(!A){const e=((e,t)=>{return(r=e)&&r.render?e:new n.WebGLRenderer({powerPreference:"high-performance",canvas:t,antialias:!0,alpha:!0,...e});var r})(r,t);h.vr&&(e.xr.enabled=!0,e.setAnimationLoop((e=>se(e,!0)))),y=((e,t,r,i)=>{const{gl:o,size:l,shadows:c=!1,linear:u=!1,flat:h=!1,vr:d=!1,orthographic:p=!1,frameloop:f="always",dpr:m=1,performance:g,clock:v=new n.Clock,raycaster:A,camera:y,onPointerMissed:x}=i;c&&(o.shadowMap.enabled=!0,"object"==typeof c?Object.assign(o.shadowMap,c):o.shadowMap.type=n.PCFSoftShadowMap),u||(h||(o.toneMapping=n.ACESFilmicToneMapping),o.outputEncoding=n.sRGBEncoding),"never"===f&&(v.stop(),v.elapsedTime=0);const b=(0,a.Z)(((a,s)=>{const l=new n.Raycaster,{params:c,...b}=A||{};e(l,{enabled:!0,...b,params:{...l.params,...c}},{});const w=y instanceof n.Camera,E=w?y:p?new n.OrthographicCamera(0,0,0,0,.1,1e3):new n.PerspectiveCamera(75,0,.1,1e3);function _(e){return Array.isArray(e)?Math.min(Math.max(e[0],window.devicePixelRatio),e[1]):e}w||(E.position.z=5,y&&e(E,y,{}),E.lookAt(0,0,0));const S=_(m),M=new n.Vector3,T=new n.Vector3;function C(e=s().camera,t=T,r=s().size){const{width:n,height:i}=r,a=n/i,o=e.getWorldPosition(M).distanceTo(t);if(L(e))return{width:n/e.zoom,height:i/e.zoom,factor:1,distance:o,aspect:a};{const t=e.fov*Math.PI/180,r=2*Math.tan(t/2)*o,s=r*(n/i);return{width:s,height:r,factor:n/s,distance:o,aspect:a}}}let I;const B=e=>a((t=>({performance:{...t.performance,current:e}})));return{gl:o,set:a,get:s,invalidate:()=>t(s()),advance:(e,t)=>r(e,t,s()),linear:u,flat:h,scene:F(new n.Scene),camera:E,raycaster:l,clock:v,mouse:new n.Vector2,vr:d,frameloop:f,onPointerMissed:x,performance:{current:1,min:.5,max:1,debounce:200,...g,regress:()=>{const e=s();I&&clearTimeout(I),e.performance.current!==e.performance.min&&B(e.performance.min),I=setTimeout((()=>B(s().performance.max)),e.performance.debounce)}},size:{width:0,height:0},viewport:{initialDpr:S,dpr:S,width:0,height:0,aspect:0,distance:0,factor:0,getCurrentViewport:C},setSize:(e,t)=>{const r={width:e,height:t};a((e=>({size:r,viewport:{...e.viewport,...C(E,T,r)}})))},setDpr:e=>a((t=>({viewport:{...t.viewport,dpr:_(e)}}))),events:{connected:!1},internal:{active:!1,priority:0,frames:0,lastProps:i,interaction:[],hovered:new Map,subscribers:[],initialClick:[0,0],initialHits:[],capturedMap:new Map,subscribe:(e,t=0)=>(a((({internal:r})=>({internal:{...r,priority:r.priority+(t?1:0),subscribers:[...r.subscribers,{ref:e,priority:t}].sort(((e,t)=>e.priority-t.priority))}}))),()=>{a((({internal:r})=>({internal:{...r,priority:r.priority-(t?1:0),subscribers:r.subscribers.filter((t=>t.ref!==e))}})))})}}}));b.subscribe((()=>{const{camera:e,size:t,viewport:r,internal:i}=b.getState();i.lastProps.camera instanceof n.Camera||(L(e)?(e.left=t.width/-2,e.right=t.width/2,e.top=t.height/2,e.bottom=t.height/-2):e.aspect=t.width/t.height,e.updateProjectionMatrix(),e.updateMatrixWorld()),o.setPixelRatio(r.dpr),o.setSize(t.width,t.height)}),(e=>[e.viewport.dpr,e.size]),s.Z);const w=b.getState();return l&&w.setSize(l.width,l.height),b.subscribe((e=>t(e))),b})(le,ae,se,{gl:e,size:o,...h});const i=y.getState();i.get,A=oe.createContainer(y,ie.indexOf(l),!1,null),ne.set(t,{fiber:A,store:y}),c&&i.set({events:c(y)})}if(y&&A)return oe.updateContainer(i.createElement(ue,{store:y,element:e,onCreated:u,target:t}),A,null,(()=>{})),y;throw"Error creating root!"}function ue({store:e,element:t,onCreated:r,target:n}){return i.useEffect((()=>{const t=e.getState();t.set((e=>({internal:{...e.internal,active:!0}}))),null==t.events.connect||t.events.connect(n),r&&r(t)}),[]),i.createElement(D.Provider,{value:e},t)}function he(e,t){const r=ne.get(e),n=null==r?void 0:r.fiber;if(n){const i=null==r?void 0:r.store.getState();i&&(i.internal.active=!1),oe.updateContainer(null,n,null,(()=>{i&&setTimeout((()=>{var r,n,a;null==i.events.disconnect||i.events.disconnect(),null==(r=i.gl)||null==(n=r.renderLists)||null==n.dispose||n.dispose(),null==(a=i.gl)||null==a.forceContextLoss||a.forceContextLoss(),de(i),ne.delete(e),t&&t(e)}),500)}))}}function de(e){e.dispose&&"Scene"!==e.type&&e.dispose();for(const n in e){var t,r;null==(t=(r=n).dispose)||t.call(r),delete e[n]}}const pe=oe.act,fe=E.fun(Symbol)&&Symbol.for?Symbol.for("react.portal"):60106;function me(e,t,r,n=null){return{$$typeof:fe,key:null==n?null:""+n,children:e,containerInfo:F(t),implementation:r}}oe.injectIntoDevTools({bundleType:0,rendererPackageName:"@react-three/fiber",version:"17.0.2"})},40417:(e,t,r)=>{"use strict";t._b=t.H7=t.LK=t.Kv=t.Mf=t.SE=t.N$=t.oI=t.$9=t.pT=t.fK=t.ej=t.hI=t.Wx=t.Sj=t.r3=t.ue=t.ZW=t.z5=t.Oq=t.P7=t.bS=t.N7=t.Hs=t.dV=t.oC=t.vA=t.ol=t.l$=t.sd=t.dK=t.V3=t.Bw=t.hW=t._o=t.b4=t.nu=t.wd=t.q4=void 0;var n=r(97657);Object.defineProperty(t,"q4",{enumerable:!0,get:function(){return n.VERSION}});var i=r(81183);Object.defineProperty(t,"wd",{enumerable:!0,get:function(){return i.CstParser}}),Object.defineProperty(t,"nu",{enumerable:!0,get:function(){return i.EmbeddedActionsParser}}),Object.defineProperty(t,"b4",{enumerable:!0,get:function(){return i.ParserDefinitionErrorType}}),Object.defineProperty(t,"_o",{enumerable:!0,get:function(){return i.EMPTY_ALT}});var a=r(88200);Object.defineProperty(t,"hW",{enumerable:!0,get:function(){return a.Lexer}}),Object.defineProperty(t,"Bw",{enumerable:!0,get:function(){return a.LexerDefinitionErrorType}});var s=r(58980);Object.defineProperty(t,"V3",{enumerable:!0,get:function(){return s.createToken}}),Object.defineProperty(t,"dK",{enumerable:!0,get:function(){return s.createTokenInstance}}),Object.defineProperty(t,"sd",{enumerable:!0,get:function(){return s.EOF}}),Object.defineProperty(t,"l$",{enumerable:!0,get:function(){return s.tokenLabel}}),Object.defineProperty(t,"ol",{enumerable:!0,get:function(){return s.tokenMatcher}}),Object.defineProperty(t,"vA",{enumerable:!0,get:function(){return s.tokenName}});var o=r(63670);Object.defineProperty(t,"oC",{enumerable:!0,get:function(){return o.getLookaheadPaths}});var l=r(3449);Object.defineProperty(t,"dV",{enumerable:!0,get:function(){return l.LLkLookaheadStrategy}});var c=r(27621);Object.defineProperty(t,"Hs",{enumerable:!0,get:function(){return c.defaultParserErrorProvider}});var u=r(34400);Object.defineProperty(t,"N7",{enumerable:!0,get:function(){return u.EarlyExitException}}),Object.defineProperty(t,"bS",{enumerable:!0,get:function(){return u.isRecognitionException}}),Object.defineProperty(t,"P7",{enumerable:!0,get:function(){return u.MismatchedTokenException}}),Object.defineProperty(t,"Oq",{enumerable:!0,get:function(){return u.NotAllInputParsedException}}),Object.defineProperty(t,"z5",{enumerable:!0,get:function(){return u.NoViableAltException}});var h=r(58232);Object.defineProperty(t,"ZW",{enumerable:!0,get:function(){return h.defaultLexerErrorProvider}});var d=r(93062);Object.defineProperty(t,"ue",{enumerable:!0,get:function(){return d.Alternation}}),Object.defineProperty(t,"r3",{enumerable:!0,get:function(){return d.Alternative}}),Object.defineProperty(t,"Sj",{enumerable:!0,get:function(){return d.NonTerminal}}),Object.defineProperty(t,"Wx",{enumerable:!0,get:function(){return d.Option}}),Object.defineProperty(t,"hI",{enumerable:!0,get:function(){return d.Repetition}}),Object.defineProperty(t,"ej",{enumerable:!0,get:function(){return d.RepetitionMandatory}}),Object.defineProperty(t,"fK",{enumerable:!0,get:function(){return d.RepetitionMandatoryWithSeparator}}),Object.defineProperty(t,"pT",{enumerable:!0,get:function(){return d.RepetitionWithSeparator}}),Object.defineProperty(t,"$9",{enumerable:!0,get:function(){return d.Rule}}),Object.defineProperty(t,"oI",{enumerable:!0,get:function(){return d.Terminal}});var p=r(93062);Object.defineProperty(t,"N$",{enumerable:!0,get:function(){return p.serializeGrammar}}),Object.defineProperty(t,"SE",{enumerable:!0,get:function(){return p.serializeProduction}}),Object.defineProperty(t,"Mf",{enumerable:!0,get:function(){return p.GAstVisitor}});var f=r(82688);Object.defineProperty(t,"Kv",{enumerable:!0,get:function(){return f.generateCstDts}}),t.LK=function(){console.warn("The clearCache function was 'soft' removed from the Chevrotain API.\n\t It performs no action other than printing this message.\n\t Please avoid using it as it will be completely removed in the future")};var m=r(77141);Object.defineProperty(t,"H7",{enumerable:!0,get:function(){return m.createSyntaxDiagramsCode}});var g=function(){throw new Error("The Parser class has been deprecated, use CstParser or EmbeddedActionsParser instead.\t\nSee: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_7-0-0")};t._b=g},77141:(e,t,r)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.createSyntaxDiagramsCode=void 0;var n=r(97657);t.createSyntaxDiagramsCode=function(e,t){var r=void 0===t?{}:t,i=r.resourceBase,a=void 0===i?"https://unpkg.com/chevrotain@".concat(n.VERSION,"/diagrams/"):i,s=r.css,o=void 0===s?"https://unpkg.com/chevrotain@".concat(n.VERSION,"/diagrams/diagrams.css"):s;return'\n\x3c!-- This is a generated file --\x3e\n\n\n\n\n'+"\n\n")+"\n + \ No newline at end of file diff --git a/community/index.html b/community/index.html new file mode 100644 index 00000000..e64a87d3 --- /dev/null +++ b/community/index.html @@ -0,0 +1,26 @@ + + + + + +Join the community | Vac Research + + + + + + + + + + +
+

Join the community

Join the Vac Community!

Keep up to date with our latest research by connecting with us on our communities channels.
+ + + + \ No newline at end of file diff --git a/contribute/index.html b/contribute/index.html new file mode 100644 index 00000000..e133a174 --- /dev/null +++ b/contribute/index.html @@ -0,0 +1,29 @@ + + + + + +Contribute | Vac Research + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/deepresearch/index.html b/deepresearch/index.html new file mode 100644 index 00000000..c8c1a08e --- /dev/null +++ b/deepresearch/index.html @@ -0,0 +1,36 @@ + + + + + +Deep Research | Vac Research + + + + + + + + + + +
+

Vac Deep Research

Vac Deep Research is at the forefront of exploration and cutting-edge innovation within the IFT. +Their work extends beyond scientific publications, actively bridging the gap between theory and practice. +The team collaborates with various entities, such as Vac R&D units, incubator projects, and IFT projects, to bring their research findings to fruition. +Part of this effort includes identifying opportunities for and spawning new incubator projects, +allowing Vac Deep Research to translate their research findings into practical applications within the IFT. +Deep Research encompasses several key areas, including zero knowledge (ZK), decentralised privacy-preserving node provider networks, validator privacy, and libp2p gossipsub improvements.

Zero-Knowledge Proofs

In the realm of ZKP, Vac Deep Research has made contributions that have given rise to the incubator project Nescience. +The team delved into the intricacies of zero-knowledge proofs, exploring their applications and pushing the boundaries of privacy-preserving technologies. +By advancing the field of ZK, Vac Deep Research strengthens the foundation for secure and confidential interactions within decentralised networks.

Libp2p Gossipsub Improvements

Another area of focus for Vac Deep Research is "libp2p gossipsub improvements". +The team explores ways to enhance the performance, efficiency, and reliability of the libp2p gossipsub protocol. +By conducting in-depth research and proposing improvements, Vac Deep Research aims to optimise information sharing and communication within decentralised networks, +contributing to the overall robustness and scalability of the P2P layers of IFT projects.

Anonymisation Networks

Vac also researches anonymisation networks, with the main goal of a libp2p gossipsub anonymisation layer with pluggable project-specific components.

+ + + + \ No newline at end of file diff --git a/device-pairing-in-js-waku-and-go-waku/index.html b/device-pairing-in-js-waku-and-go-waku/index.html new file mode 100644 index 00000000..cbdbcf76 --- /dev/null +++ b/device-pairing-in-js-waku-and-go-waku/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/dns-based-discovery/index.html b/dns-based-discovery/index.html new file mode 100644 index 00000000..732582e7 --- /dev/null +++ b/dns-based-discovery/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/ethics-surveillance-tech/index.html b/ethics-surveillance-tech/index.html new file mode 100644 index 00000000..ec1d1b59 --- /dev/null +++ b/ethics-surveillance-tech/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/feasibility-discv5/index.html b/feasibility-discv5/index.html new file mode 100644 index 00000000..f848fb52 --- /dev/null +++ b/feasibility-discv5/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/feasibility-semaphore-rate-limiting-zksnarks/index.html b/feasibility-semaphore-rate-limiting-zksnarks/index.html new file mode 100644 index 00000000..f6cbb404 --- /dev/null +++ b/feasibility-semaphore-rate-limiting-zksnarks/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/fixing-whisper-with-waku/index.html b/fixing-whisper-with-waku/index.html new file mode 100644 index 00000000..51095b0b --- /dev/null +++ b/fixing-whisper-with-waku/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/fonts/Inter/Inter-Black.ttf b/fonts/Inter/Inter-Black.ttf new file mode 100644 index 00000000..5aecf7dc Binary files /dev/null and b/fonts/Inter/Inter-Black.ttf differ diff --git a/fonts/Inter/Inter-Bold.ttf b/fonts/Inter/Inter-Bold.ttf new file mode 100644 index 00000000..8e82c70d Binary files /dev/null and b/fonts/Inter/Inter-Bold.ttf differ diff --git a/fonts/Inter/Inter-ExtraBold.ttf b/fonts/Inter/Inter-ExtraBold.ttf new file mode 100644 index 00000000..cb4b8217 Binary files /dev/null and b/fonts/Inter/Inter-ExtraBold.ttf differ diff --git a/fonts/Inter/Inter-ExtraLight.ttf b/fonts/Inter/Inter-ExtraLight.ttf new file mode 100644 index 00000000..64aee30a Binary files /dev/null and b/fonts/Inter/Inter-ExtraLight.ttf differ diff --git a/fonts/Inter/Inter-Light.ttf b/fonts/Inter/Inter-Light.ttf new file mode 100644 index 00000000..9e265d89 Binary files /dev/null and b/fonts/Inter/Inter-Light.ttf differ diff --git a/fonts/Inter/Inter-Medium.ttf b/fonts/Inter/Inter-Medium.ttf new file mode 100644 index 00000000..b53fb1c4 Binary files /dev/null and b/fonts/Inter/Inter-Medium.ttf differ diff --git a/fonts/Inter/Inter-Regular.ttf b/fonts/Inter/Inter-Regular.ttf new file mode 100644 index 00000000..8d4eebf2 Binary files /dev/null and b/fonts/Inter/Inter-Regular.ttf differ diff --git a/fonts/Inter/Inter-SemiBold.ttf b/fonts/Inter/Inter-SemiBold.ttf new file mode 100644 index 00000000..c6aeeb16 Binary files /dev/null and b/fonts/Inter/Inter-SemiBold.ttf differ diff --git a/fonts/Inter/Inter-Thin.ttf b/fonts/Inter/Inter-Thin.ttf new file mode 100644 index 00000000..7aed55d5 Binary files /dev/null and b/fonts/Inter/Inter-Thin.ttf differ diff --git a/future-of-waku-network/index.html b/future-of-waku-network/index.html new file mode 100644 index 00000000..c3a89cf8 --- /dev/null +++ b/future-of-waku-network/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/generated/.placeholder b/generated/.placeholder new file mode 100644 index 00000000..e69de29b diff --git a/generated/jobs.json b/generated/jobs.json new file mode 100644 index 00000000..cc4665cd --- /dev/null +++ b/generated/jobs.json @@ -0,0 +1 @@ +{"departments":[{"id":87842,"name":"App","parent_id":43806,"child_ids":[87847,87852,87850,87848,45530,87849],"jobs":[]},{"id":54504,"name":"Brand Design Studio","parent_id":null,"child_ids":[],"jobs":[]},{"id":45532,"name":"Business Development","parent_id":null,"child_ids":[],"jobs":[]},{"id":87841,"name":"Codex","parent_id":43806,"child_ids":[],"jobs":[]},{"id":84549,"name":"Communications","parent_id":null,"child_ids":[],"jobs":[]},{"id":45531,"name":"Design","parent_id":null,"child_ids":[],"jobs":[]},{"id":87847,"name":"Desktop","parent_id":87842,"child_ids":[],"jobs":[]},{"id":87852,"name":"Documentation","parent_id":87842,"child_ids":[],"jobs":[]},{"id":45547,"name":"Engineering ","parent_id":null,"child_ids":[],"jobs":[]},{"id":49925,"name":"Finance","parent_id":87845,"child_ids":[],"jobs":[]},{"id":87854,"name":"Infrastructure","parent_id":43806,"child_ids":[],"jobs":[]},{"id":87853,"name":"Insights","parent_id":87845,"child_ids":[],"jobs":[]},{"id":87850,"name":"Keycard","parent_id":87842,"child_ids":[],"jobs":[]},{"id":145838,"name":"Leadership","parent_id":null,"child_ids":[],"jobs":[]},{"id":74156,"name":"Legal","parent_id":87845,"child_ids":[],"jobs":[]},{"id":91698,"name":"Logos","parent_id":null,"child_ids":[],"jobs":[]},{"id":43807,"name":"Marketing","parent_id":null,"child_ids":[],"jobs":[]},{"id":87848,"name":"Mobile","parent_id":87842,"child_ids":[],"jobs":[]},{"id":87843,"name":"Nimbus","parent_id":43806,"child_ids":[],"jobs":[]},{"id":144866,"name":"Nomos","parent_id":43806,"child_ids":[],"jobs":[]},{"id":45548,"name":"People Operations","parent_id":87845,"child_ids":[],"jobs":[]},{"id":45530,"name":"Product Design","parent_id":87842,"child_ids":[],"jobs":[]},{"id":90941,"name":"Program Management","parent_id":null,"child_ids":[],"jobs":[]},{"id":43806,"name":"Research & Development","parent_id":null,"child_ids":[87842,87841,87854,87843,144866,87846,87981,87847,87852,87850,87848,45530,87849],"jobs":[]},{"id":87851,"name":"Security","parent_id":87845,"child_ids":[],"jobs":[]},{"id":87845,"name":"Services","parent_id":null,"child_ids":[49925,87853,74156,45548,87851],"jobs":[]},{"id":91697,"name":"Status App ","parent_id":null,"child_ids":[],"jobs":[]},{"id":216509,"name":"Status Network","parent_id":null,"child_ids":[],"jobs":[]},{"id":54783,"name":"Technical Writing ","parent_id":null,"child_ids":[],"jobs":[]},{"id":87846,"name":"Vac","parent_id":43806,"child_ids":[],"jobs":[{"absolute_url":"https://boards.greenhouse.io/vac/jobs/5671819","data_compliance":[{"type":"gdpr","requires_consent":false,"requires_processing_consent":false,"requires_retention_consent":false,"retention_period":null}],"internal_job_id":2778364,"location":{"name":"Remote (Worldwide)"},"metadata":null,"id":5671819,"updated_at":"2024-04-29T11:27:14-04:00","requisition_id":"BACK-1205","title":"Libp2p Networking Engineer"},{"absolute_url":"https://boards.greenhouse.io/vac/jobs/5700917","data_compliance":[{"type":"gdpr","requires_consent":false,"requires_processing_consent":false,"requires_retention_consent":false,"retention_period":null}],"internal_job_id":2785448,"location":{"name":"Remote (Worldwide)"},"metadata":null,"id":5700917,"updated_at":"2024-04-29T11:27:14-04:00","requisition_id":"BACK-1330","title":"Software Developer in Test (Rust or Go)"},{"absolute_url":"https://boards.greenhouse.io/vac/jobs/5543925","data_compliance":[{"type":"gdpr","requires_consent":false,"requires_processing_consent":false,"requires_retention_consent":false,"retention_period":null}],"internal_job_id":2735796,"location":{"name":"Remote (Worldwide)"},"metadata":null,"id":5543925,"updated_at":"2024-04-29T11:27:14-04:00","requisition_id":"Back-1245","title":"Zero Knowledge Research Engineer (ACZ)"},{"absolute_url":"https://boards.greenhouse.io/vac/jobs/5453093","data_compliance":[{"type":"gdpr","requires_consent":false,"requires_processing_consent":false,"requires_retention_consent":false,"retention_period":null}],"internal_job_id":2331302,"location":{"name":"Remote (Worldwide)"},"metadata":null,"id":5453093,"updated_at":"2024-04-29T11:27:14-04:00","requisition_id":"PROV-ZKE-1","title":"Zero Knowledge Researcher (Nescience) "}]},{"id":87981,"name":"Waku","parent_id":43806,"child_ids":[],"jobs":[]},{"id":87849,"name":"Web","parent_id":87842,"child_ids":[],"jobs":[]},{"id":0,"name":"No Department","parent_id":null,"child_ids":[],"jobs":[]}]} \ No newline at end of file diff --git a/icons/add.svg b/icons/add.svg new file mode 100644 index 00000000..a75d3674 --- /dev/null +++ b/icons/add.svg @@ -0,0 +1,4 @@ + + + + diff --git a/icons/arrow-left-circle.svg b/icons/arrow-left-circle.svg new file mode 100644 index 00000000..7aa2614d --- /dev/null +++ b/icons/arrow-left-circle.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/arrow-left.svg b/icons/arrow-left.svg new file mode 100644 index 00000000..51142a1b --- /dev/null +++ b/icons/arrow-left.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/arrow-right-circle.svg b/icons/arrow-right-circle.svg new file mode 100644 index 00000000..3746eed0 --- /dev/null +++ b/icons/arrow-right-circle.svg @@ -0,0 +1,4 @@ + + + + diff --git a/icons/arrow-right.svg b/icons/arrow-right.svg new file mode 100644 index 00000000..5c92dbba --- /dev/null +++ b/icons/arrow-right.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/avatar.svg b/icons/avatar.svg new file mode 100644 index 00000000..94307fbb --- /dev/null +++ b/icons/avatar.svg @@ -0,0 +1,7 @@ + + + + + \ No newline at end of file diff --git a/icons/chevron-left.svg b/icons/chevron-left.svg new file mode 100644 index 00000000..f1cdcbdd --- /dev/null +++ b/icons/chevron-left.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/chevron-up.svg b/icons/chevron-up.svg new file mode 100644 index 00000000..9c20b961 --- /dev/null +++ b/icons/chevron-up.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/close.svg b/icons/close.svg new file mode 100644 index 00000000..c2476089 --- /dev/null +++ b/icons/close.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/icons/cmd.svg b/icons/cmd.svg new file mode 100644 index 00000000..f9484def --- /dev/null +++ b/icons/cmd.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/copy.svg b/icons/copy.svg new file mode 100644 index 00000000..c8e2f22e --- /dev/null +++ b/icons/copy.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/dashicons_share.svg b/icons/dashicons_share.svg new file mode 100644 index 00000000..6a2c080a --- /dev/null +++ b/icons/dashicons_share.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/day.svg b/icons/day.svg new file mode 100644 index 00000000..62fa63f4 --- /dev/null +++ b/icons/day.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/discord-black.svg b/icons/discord-black.svg new file mode 100644 index 00000000..d8c03f02 --- /dev/null +++ b/icons/discord-black.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/discord-white.svg b/icons/discord-white.svg new file mode 100644 index 00000000..a9a9b3e2 --- /dev/null +++ b/icons/discord-white.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/discord.svg b/icons/discord.svg new file mode 100644 index 00000000..9da6ee03 --- /dev/null +++ b/icons/discord.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/icons/discourse.svg b/icons/discourse.svg new file mode 100644 index 00000000..af01707a --- /dev/null +++ b/icons/discourse.svg @@ -0,0 +1 @@ +Discourse \ No newline at end of file diff --git a/icons/document.svg b/icons/document.svg new file mode 100644 index 00000000..248ad061 --- /dev/null +++ b/icons/document.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/dot.svg b/icons/dot.svg new file mode 100644 index 00000000..2db710bf --- /dev/null +++ b/icons/dot.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/download.svg b/icons/download.svg new file mode 100644 index 00000000..35f433bf --- /dev/null +++ b/icons/download.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/drop-down-1.svg b/icons/drop-down-1.svg new file mode 100644 index 00000000..2cbf2150 --- /dev/null +++ b/icons/drop-down-1.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/drop-down.svg b/icons/drop-down.svg new file mode 100644 index 00000000..4675fcf7 --- /dev/null +++ b/icons/drop-down.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/dropdown.svg b/icons/dropdown.svg new file mode 100644 index 00000000..f583120f --- /dev/null +++ b/icons/dropdown.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/edit.svg b/icons/edit.svg new file mode 100644 index 00000000..b444489d --- /dev/null +++ b/icons/edit.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/esc.svg b/icons/esc.svg new file mode 100644 index 00000000..34ab5a09 --- /dev/null +++ b/icons/esc.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/icons/external-link.svg b/icons/external-link.svg new file mode 100644 index 00000000..5dbb5af2 --- /dev/null +++ b/icons/external-link.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/icons/file.svg b/icons/file.svg new file mode 100644 index 00000000..d279573a --- /dev/null +++ b/icons/file.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/folder.svg b/icons/folder.svg new file mode 100644 index 00000000..a852a103 --- /dev/null +++ b/icons/folder.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/fullscreen-exit.svg b/icons/fullscreen-exit.svg new file mode 100644 index 00000000..3c283ba2 --- /dev/null +++ b/icons/fullscreen-exit.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/fullscreen.svg b/icons/fullscreen.svg new file mode 100644 index 00000000..3dc6da71 --- /dev/null +++ b/icons/fullscreen.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/github-black.svg b/icons/github-black.svg new file mode 100644 index 00000000..8b7ccb30 --- /dev/null +++ b/icons/github-black.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/github-white.svg b/icons/github-white.svg new file mode 100644 index 00000000..de1871e6 --- /dev/null +++ b/icons/github-white.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/gscholar.svg b/icons/gscholar.svg new file mode 100644 index 00000000..a9c4f6d5 --- /dev/null +++ b/icons/gscholar.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/icons/hamburger-menu.svg b/icons/hamburger-menu.svg new file mode 100644 index 00000000..fee91181 --- /dev/null +++ b/icons/hamburger-menu.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/hashtag.svg b/icons/hashtag.svg new file mode 100644 index 00000000..e16e3b31 --- /dev/null +++ b/icons/hashtag.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/icons/history.svg b/icons/history.svg new file mode 100644 index 00000000..b210843c --- /dev/null +++ b/icons/history.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/home.svg b/icons/home.svg new file mode 100644 index 00000000..04ace857 --- /dev/null +++ b/icons/home.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/icons/k.svg b/icons/k.svg new file mode 100644 index 00000000..d246c595 --- /dev/null +++ b/icons/k.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/light.svg b/icons/light.svg new file mode 100644 index 00000000..bde05491 --- /dev/null +++ b/icons/light.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/linkedin.svg b/icons/linkedin.svg new file mode 100644 index 00000000..1d64065e --- /dev/null +++ b/icons/linkedin.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/remove.svg b/icons/remove.svg new file mode 100644 index 00000000..61291389 --- /dev/null +++ b/icons/remove.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/search.svg b/icons/search.svg new file mode 100644 index 00000000..c92d829a --- /dev/null +++ b/icons/search.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/icons/share.svg b/icons/share.svg new file mode 100644 index 00000000..fe0cd065 --- /dev/null +++ b/icons/share.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/status.svg b/icons/status.svg new file mode 100644 index 00000000..831e5d1d --- /dev/null +++ b/icons/status.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/telegram-black.svg b/icons/telegram-black.svg new file mode 100644 index 00000000..a50641b2 --- /dev/null +++ b/icons/telegram-black.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/telegram-white.svg b/icons/telegram-white.svg new file mode 100644 index 00000000..941e6bba --- /dev/null +++ b/icons/telegram-white.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/telegram.svg b/icons/telegram.svg new file mode 100644 index 00000000..52274832 --- /dev/null +++ b/icons/telegram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/twitter.svg b/icons/twitter.svg new file mode 100644 index 00000000..9f2b66e6 --- /dev/null +++ b/icons/twitter.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/x-black.svg b/icons/x-black.svg new file mode 100644 index 00000000..9f19b491 --- /dev/null +++ b/icons/x-black.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/x-white.svg b/icons/x-white.svg new file mode 100644 index 00000000..6b7e64d3 --- /dev/null +++ b/icons/x-white.svg @@ -0,0 +1,3 @@ + + + diff --git a/icons/youtube.svg b/icons/youtube.svg new file mode 100644 index 00000000..79dfc909 --- /dev/null +++ b/icons/youtube.svg @@ -0,0 +1,3 @@ + + + diff --git a/img/anonymity_trilemma.svg b/img/anonymity_trilemma.svg new file mode 100644 index 00000000..4215c978 --- /dev/null +++ b/img/anonymity_trilemma.svg @@ -0,0 +1,90 @@ + + + + + + + + + + + low latency + low bandwidth + strong anonymity + frequency / pattern + + diff --git a/img/black-waku-logo-with-name.png b/img/black-waku-logo-with-name.png new file mode 100644 index 00000000..51309032 Binary files /dev/null and b/img/black-waku-logo-with-name.png differ diff --git a/img/building_privacy_infra.png b/img/building_privacy_infra.png new file mode 100644 index 00000000..e90da382 Binary files /dev/null and b/img/building_privacy_infra.png differ diff --git a/img/building_private_infra_adaptive.png b/img/building_private_infra_adaptive.png new file mode 100644 index 00000000..257363f5 Binary files /dev/null and b/img/building_private_infra_adaptive.png differ diff --git a/img/building_private_infra_circuit.png b/img/building_private_infra_circuit.png new file mode 100644 index 00000000..c28bd5dd Binary files /dev/null and b/img/building_private_infra_circuit.png differ diff --git a/img/building_private_infra_interactions.png b/img/building_private_infra_interactions.png new file mode 100644 index 00000000..a26b159d Binary files /dev/null and b/img/building_private_infra_interactions.png differ diff --git a/img/building_private_infra_intro.png b/img/building_private_infra_intro.png new file mode 100644 index 00000000..a2c76d5f Binary files /dev/null and b/img/building_private_infra_intro.png differ diff --git a/img/building_private_infra_misc.png b/img/building_private_infra_misc.png new file mode 100644 index 00000000..a4af2cf3 Binary files /dev/null and b/img/building_private_infra_misc.png differ diff --git a/img/building_private_infra_network.png b/img/building_private_infra_network.png new file mode 100644 index 00000000..2683252c Binary files /dev/null and b/img/building_private_infra_network.png differ diff --git a/img/building_private_infra_principles.png b/img/building_private_infra_principles.png new file mode 100644 index 00000000..6e9ba745 Binary files /dev/null and b/img/building_private_infra_principles.png differ diff --git a/img/building_private_infra_rlnrelay.png b/img/building_private_infra_rlnrelay.png new file mode 100644 index 00000000..0a3d7415 Binary files /dev/null and b/img/building_private_infra_rlnrelay.png differ diff --git a/img/building_private_infra_servicecred.png b/img/building_private_infra_servicecred.png new file mode 100644 index 00000000..9c79d145 Binary files /dev/null and b/img/building_private_infra_servicecred.png differ diff --git a/img/building_private_infra_shamir.png b/img/building_private_infra_shamir.png new file mode 100644 index 00000000..a56ec0df Binary files /dev/null and b/img/building_private_infra_shamir.png differ diff --git a/img/building_private_infra_shamircode.png b/img/building_private_infra_shamircode.png new file mode 100644 index 00000000..61c8857d Binary files /dev/null and b/img/building_private_infra_shamircode.png differ diff --git a/img/building_private_infra_testnet.png b/img/building_private_infra_testnet.png new file mode 100644 index 00000000..a893407b Binary files /dev/null and b/img/building_private_infra_testnet.png differ diff --git a/img/building_private_infra_vote.png b/img/building_private_infra_vote.png new file mode 100644 index 00000000..30859ed8 Binary files /dev/null and b/img/building_private_infra_vote.png differ diff --git a/img/building_private_infra_zk.png b/img/building_private_infra_zk.png new file mode 100644 index 00000000..f5944ed6 Binary files /dev/null and b/img/building_private_infra_zk.png differ diff --git a/img/coscup-waku/huilong.jpg b/img/coscup-waku/huilong.jpg new file mode 100644 index 00000000..4d93bd6e Binary files /dev/null and b/img/coscup-waku/huilong.jpg differ diff --git a/img/coscup-waku/talk.png b/img/coscup-waku/talk.png new file mode 100644 index 00000000..7a7c5fc2 Binary files /dev/null and b/img/coscup-waku/talk.png differ diff --git a/img/coscup-waku/walletconnect.png b/img/coscup-waku/walletconnect.png new file mode 100644 index 00000000..54d84902 Binary files /dev/null and b/img/coscup-waku/walletconnect.png differ diff --git a/img/js-waku-gist.png b/img/js-waku-gist.png new file mode 100644 index 00000000..f17dd66b Binary files /dev/null and b/img/js-waku-gist.png differ diff --git a/img/libp2p_gossipsub_types_of_peering.png b/img/libp2p_gossipsub_types_of_peering.png new file mode 100644 index 00000000..f977d04f Binary files /dev/null and b/img/libp2p_gossipsub_types_of_peering.png differ diff --git a/img/light-rln-verifiers.png b/img/light-rln-verifiers.png new file mode 100644 index 00000000..51959a1f Binary files /dev/null and b/img/light-rln-verifiers.png differ diff --git a/img/mvds_batch.png b/img/mvds_batch.png new file mode 100644 index 00000000..aa3449be Binary files /dev/null and b/img/mvds_batch.png differ diff --git a/img/mvds_interactive.png b/img/mvds_interactive.png new file mode 100644 index 00000000..84e24501 Binary files /dev/null and b/img/mvds_interactive.png differ diff --git a/img/noise/N11M.png b/img/noise/N11M.png new file mode 100644 index 00000000..bc28d58e Binary files /dev/null and b/img/noise/N11M.png differ diff --git a/img/noise/NM.png b/img/noise/NM.png new file mode 100644 index 00000000..b445d3d7 Binary files /dev/null and b/img/noise/NM.png differ diff --git a/img/peacock-signaling.jpg b/img/peacock-signaling.jpg new file mode 100644 index 00000000..71a13816 Binary files /dev/null and b/img/peacock-signaling.jpg differ diff --git a/img/rain.jpg b/img/rain.jpg new file mode 100644 index 00000000..c087e42a Binary files /dev/null and b/img/rain.jpg differ diff --git a/img/rain.png b/img/rain.png new file mode 100644 index 00000000..8fd08c6b Binary files /dev/null and b/img/rain.png differ diff --git a/img/remote-log.png b/img/remote-log.png new file mode 100644 index 00000000..89266024 Binary files /dev/null and b/img/remote-log.png differ diff --git a/img/rln-relay-2023-update/proof_generation_time.png b/img/rln-relay-2023-update/proof_generation_time.png new file mode 100644 index 00000000..d74d5f60 Binary files /dev/null and b/img/rln-relay-2023-update/proof_generation_time.png differ diff --git a/img/rln-relay-2023-update/proof_verification_time.png b/img/rln-relay-2023-update/proof_verification_time.png new file mode 100644 index 00000000..0863024c Binary files /dev/null and b/img/rln-relay-2023-update/proof_verification_time.png differ diff --git a/img/rln-relay-2023-update/rln_dep_tree.jpg b/img/rln-relay-2023-update/rln_dep_tree.jpg new file mode 100644 index 00000000..bdd15bc4 Binary files /dev/null and b/img/rln-relay-2023-update/rln_dep_tree.jpg differ diff --git a/img/rln-relay-2023-update/spam_prevention_in_action.png b/img/rln-relay-2023-update/spam_prevention_in_action.png new file mode 100644 index 00000000..70977f16 Binary files /dev/null and b/img/rln-relay-2023-update/spam_prevention_in_action.png differ diff --git a/img/rln-relay/rain.jpg b/img/rln-relay/rain.jpg new file mode 100644 index 00000000..c087e42a Binary files /dev/null and b/img/rln-relay/rain.jpg differ diff --git a/img/rln-relay/rln-message-verification.msc b/img/rln-relay/rln-message-verification.msc new file mode 100644 index 00000000..ce162600 --- /dev/null +++ b/img/rln-relay/rln-message-verification.msc @@ -0,0 +1,43 @@ +# Sequence diagram for RLN Relay protocol (publishing,routing, and slashing) +msc { + hscale="1", + wordwraparcs=true; + + a [label="RLN-Relay Node: Publisher"], + b [label="RLN-Relay Node: Router"], + c [label="RLN-Relay Node"], + d [label="RLN-Relay Node"], + e [label="Membership Contract"]; + + # a rbox a [label="RLN-Relay Node: Publisher"], + # b rbox b [label="RLN-Relay Node: Router"], + # c rbox c [label="RLN-Relay Node"], + # d rbox d [label="RLN-Relay Node"], + # e note e [label="Membership Contract"]; + |||; + b box b [label=" \n nullifierMap= [(nullifier, shareX, shareY)...] \n \n Initialize an empty map of the received nullifiers \n "], + c box c [label=" \n nullifierMap= [(nullifier, shareX, shareY)...] \n \n Initialize an empty map of the received nullifiers \n "], + d box d [label=" \n nullifierMap= [(nullifier, shareX, shareY)...] \n \n Initialize an empty map of the received nullifiers \n "]; + |||; + ..., + a -> a [label="Keep track of epoch"], + b -> b [label="Keep track of epoch"], + c -> c [label="Keep track of epoch"], + d -> d [label="Keep track of epoch"]; + a box a [label=" \n Message: the intended message \n \n epoch: the current epoch \n "]; + a box a [label=" \n A(x) = sk + H(sk, epoch)x \n \n shareX = H(message), shareY = A(shareX) \n \n nullifier = H(H(sk,epoch)) \n "]; + a box a [label=" \n zkProof: generate the proof using zkSNARK \n "]; + |||; + a => b [label="Message, epoch, proofBundle:(shareX, shareY, nullifier, zkProof) \n "]; + b box b [label="1. If the received epoch is far from the current epoch"]; + b -x c [label="Do not relay"]; + b box b [label=" \n 2. If verification of zkProof failed \n "]; + b -x c [label="Do not relay"]; + b box b [label=" \n 3. If identical nullifier exists in the nullifierMap, \n \n extract the publisher sk \n "]; + b -x c [label="Do not relay"]; + b => e [label="Slash the publisher: Unlbock the deposit associated with sk"]; + e => b [label="x ETH"]; + b box b [label=" \n 4. If none of 1-3 happens, update the nullifierMap \n "]; + b => c [label="Relay"]; + b => d [label="Relay"]; +} \ No newline at end of file diff --git a/img/rln-relay/rln-message-verification.png b/img/rln-relay/rln-message-verification.png new file mode 100644 index 00000000..5a56274b Binary files /dev/null and b/img/rln-relay/rln-message-verification.png differ diff --git a/img/rln-relay/rln-relay-overview.png b/img/rln-relay/rln-relay-overview.png new file mode 100644 index 00000000..d8edfd76 Binary files /dev/null and b/img/rln-relay/rln-relay-overview.png differ diff --git a/img/rln-relay/rln-relay.msc b/img/rln-relay/rln-relay.msc new file mode 100644 index 00000000..8a96912d --- /dev/null +++ b/img/rln-relay/rln-relay.msc @@ -0,0 +1,32 @@ +# Sequence diagram for RLN Relay protocol (registration) +msc { + hscale = "2"; + + d [label = "RLN-Relay Node B"], a [label = "RLN-Relay Node A"],b [label = "Membership Contract"]; + # a rbox a [label="Relay Node A"], + # b note b [label="Membership Contract"], + # d rbox d [label = "Relay Node B"]; + |||; + + a box a [ label=" \n Generate sk,pk \n "] ; + a=>b [ label = " \n Register(pk, x ETH) \n " ] ; + |||; + + b box b [label=" \n Insert pk to the list \n "]; + //|||; + //b=>a [ label = "index"]; + |||; + ..., ---; + ... [ label = "Other relay nodes register and the membership contract gets updatetd" ]; + ..., ---; + # a=>b [ label = "getRoot()" ] ; + # b box b [label=" \n root: Get the current root\n "]; + # b=>a [ label = "root"]; + + # ..., --- [ label = " " ]; + + # a=>b [ label = "getAuthPath(index)" ] ; + # b box b [label=" \n authPath: Calculate the authentication path of the leaf with the given index and based on the current tree\n "]; + + # b=>a [ label = "authPath"]; +} \ No newline at end of file diff --git a/img/rln-relay/rln-relay.png b/img/rln-relay/rln-relay.png new file mode 100644 index 00000000..2ed43f01 Binary files /dev/null and b/img/rln-relay/rln-relay.png differ diff --git a/img/status_scaling_model_fig1.png b/img/status_scaling_model_fig1.png new file mode 100644 index 00000000..75eff106 Binary files /dev/null and b/img/status_scaling_model_fig1.png differ diff --git a/img/status_scaling_model_fig10.png b/img/status_scaling_model_fig10.png new file mode 100644 index 00000000..12801973 Binary files /dev/null and b/img/status_scaling_model_fig10.png differ diff --git a/img/status_scaling_model_fig11.png b/img/status_scaling_model_fig11.png new file mode 100644 index 00000000..8a98c8ba Binary files /dev/null and b/img/status_scaling_model_fig11.png differ diff --git a/img/status_scaling_model_fig12.png b/img/status_scaling_model_fig12.png new file mode 100644 index 00000000..30c4b53d Binary files /dev/null and b/img/status_scaling_model_fig12.png differ diff --git a/img/status_scaling_model_fig13.png b/img/status_scaling_model_fig13.png new file mode 100644 index 00000000..ddd6b966 Binary files /dev/null and b/img/status_scaling_model_fig13.png differ diff --git a/img/status_scaling_model_fig2.png b/img/status_scaling_model_fig2.png new file mode 100644 index 00000000..8b9b900e Binary files /dev/null and b/img/status_scaling_model_fig2.png differ diff --git a/img/status_scaling_model_fig3.png b/img/status_scaling_model_fig3.png new file mode 100644 index 00000000..288e3dd0 Binary files /dev/null and b/img/status_scaling_model_fig3.png differ diff --git a/img/status_scaling_model_fig4.png b/img/status_scaling_model_fig4.png new file mode 100644 index 00000000..9912d0ae Binary files /dev/null and b/img/status_scaling_model_fig4.png differ diff --git a/img/status_scaling_model_fig5.png b/img/status_scaling_model_fig5.png new file mode 100644 index 00000000..6a6cf411 Binary files /dev/null and b/img/status_scaling_model_fig5.png differ diff --git a/img/status_scaling_model_fig8.png b/img/status_scaling_model_fig8.png new file mode 100644 index 00000000..5e659776 Binary files /dev/null and b/img/status_scaling_model_fig8.png differ diff --git a/img/status_scaling_model_fig9.png b/img/status_scaling_model_fig9.png new file mode 100644 index 00000000..a1ce2d91 Binary files /dev/null and b/img/status_scaling_model_fig9.png differ diff --git a/img/taipei_ethereum_meetup.png b/img/taipei_ethereum_meetup.png new file mode 100644 index 00000000..9269f16f Binary files /dev/null and b/img/taipei_ethereum_meetup.png differ diff --git a/img/taipei_ethereum_meetup_slide.png b/img/taipei_ethereum_meetup_slide.png new file mode 100644 index 00000000..3b92e1b0 Binary files /dev/null and b/img/taipei_ethereum_meetup_slide.png differ diff --git a/img/tianstatue.jpg b/img/tianstatue.jpg new file mode 100644 index 00000000..0b0fd665 Binary files /dev/null and b/img/tianstatue.jpg differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-10-nodes.png b/img/waku1-vs-waku2/waku1-vs-waku2-10-nodes.png new file mode 100644 index 00000000..d37302f9 Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-10-nodes.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-150-nodes.png b/img/waku1-vs-waku2/waku1-vs-waku2-150-nodes.png new file mode 100644 index 00000000..b3d134db Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-150-nodes.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-30-nodes.png b/img/waku1-vs-waku2/waku1-vs-waku2-30-nodes.png new file mode 100644 index 00000000..3b6c7c9e Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-30-nodes.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-50-nodes.png b/img/waku1-vs-waku2/waku1-vs-waku2-50-nodes.png new file mode 100644 index 00000000..f00d9b3a Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-50-nodes.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-85-nodes.png b/img/waku1-vs-waku2/waku1-vs-waku2-85-nodes.png new file mode 100644 index 00000000..d05390a5 Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-85-nodes.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-overall-message-rate.png b/img/waku1-vs-waku2/waku1-vs-waku2-overall-message-rate.png new file mode 100644 index 00000000..2e2f150c Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-overall-message-rate.png differ diff --git a/img/waku1-vs-waku2/waku1-vs-waku2-overall-network-size.png b/img/waku1-vs-waku2/waku1-vs-waku2-overall-network-size.png new file mode 100644 index 00000000..94fe0cc5 Binary files /dev/null and b/img/waku1-vs-waku2/waku1-vs-waku2-overall-network-size.png differ diff --git a/img/waku_infrastructure_sky.jpg b/img/waku_infrastructure_sky.jpg new file mode 100644 index 00000000..cc723013 Binary files /dev/null and b/img/waku_infrastructure_sky.jpg differ diff --git a/img/waku_simulation.jpeg b/img/waku_simulation.jpeg new file mode 100644 index 00000000..0300bafd Binary files /dev/null and b/img/waku_simulation.jpeg differ diff --git a/img/waku_v1_routing_small.png b/img/waku_v1_routing_small.png new file mode 100644 index 00000000..f1927bec Binary files /dev/null and b/img/waku_v1_routing_small.png differ diff --git a/img/waku_v2_discv5_random_walk_estimation.svg b/img/waku_v2_discv5_random_walk_estimation.svg new file mode 100644 index 00000000..8e69b10b --- /dev/null +++ b/img/waku_v2_discv5_random_walk_estimation.svg @@ -0,0 +1,230 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/img/waku_v2_routing_flood_small.png b/img/waku_v2_routing_flood_small.png new file mode 100644 index 00000000..e9bc6a9b Binary files /dev/null and b/img/waku_v2_routing_flood_small.png differ diff --git a/img/waku_v2_routing_gossip_small.png b/img/waku_v2_routing_gossip_small.png new file mode 100644 index 00000000..32fe3c1d Binary files /dev/null and b/img/waku_v2_routing_gossip_small.png differ diff --git a/img/waku_v2_routing_sharding_small.png b/img/waku_v2_routing_sharding_small.png new file mode 100644 index 00000000..4179b50f Binary files /dev/null and b/img/waku_v2_routing_sharding_small.png differ diff --git a/img/web3_holy_trinity.png b/img/web3_holy_trinity.png new file mode 100644 index 00000000..da958b4a Binary files /dev/null and b/img/web3_holy_trinity.png differ diff --git a/img/whisper_scalability.png b/img/whisper_scalability.png new file mode 100644 index 00000000..9db87bd8 Binary files /dev/null and b/img/whisper_scalability.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..9af5e25f --- /dev/null +++ b/index.html @@ -0,0 +1,29 @@ + + + + + +About Vac | Vac Research + + + + + + + + + + +
+

About Vac

Vac is a principle-driven research and development group that provides technical support to each IFT startup. +Vac comprises R&D Service Units, Deep Research, and Incubator Projects. +We do applied research based on which we build protocols, libraries, specifications, and publications. +As custodians of these protocols, our aim is to adhere to a set of principles that ensure their alignment with our core values and objectives.

+ + + + \ No newline at end of file diff --git a/introducing-nwaku/index.html b/introducing-nwaku/index.html new file mode 100644 index 00000000..083935a8 --- /dev/null +++ b/introducing-nwaku/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/join-us/index.html b/join-us/index.html new file mode 100644 index 00000000..21372734 --- /dev/null +++ b/join-us/index.html @@ -0,0 +1,26 @@ + + + + + +Join Us | Vac Research + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/kademlia-to-discv5/index.html b/kademlia-to-discv5/index.html new file mode 100644 index 00000000..cd6e6734 --- /dev/null +++ b/kademlia-to-discv5/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/media/index.html b/media/index.html new file mode 100644 index 00000000..15360ba5 --- /dev/null +++ b/media/index.html @@ -0,0 +1,26 @@ + + + + + +Media | Vac Research + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/p2p-data-sync-for-mobile/index.html b/p2p-data-sync-for-mobile/index.html new file mode 100644 index 00000000..bc8414eb --- /dev/null +++ b/p2p-data-sync-for-mobile/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/page/2/index.html b/page/2/index.html new file mode 100644 index 00000000..a44655de --- /dev/null +++ b/page/2/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/page/3/index.html b/page/3/index.html new file mode 100644 index 00000000..ddfbd598 --- /dev/null +++ b/page/3/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/page/4/index.html b/page/4/index.html new file mode 100644 index 00000000..c85231ea --- /dev/null +++ b/page/4/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/presenting-js-waku/index.html b/presenting-js-waku/index.html new file mode 100644 index 00000000..cfb52394 --- /dev/null +++ b/presenting-js-waku/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/principles/index.html b/principles/index.html new file mode 100644 index 00000000..369a9090 --- /dev/null +++ b/principles/index.html @@ -0,0 +1,53 @@ + + + + + +Principles | Vac Research + + + + + + + + + + +
+

These principles have been inherited from https://our.status.im/our-principles/. Only minor stylistic changes have been made to them.

Principles


The goal of Vac is widespread adoption of the decentralised web. +Our challenge is achieving mass adoption while staying true to the principles outlined below.

I. Liberty

We believe in the sovereignty of individuals. +As a research organisation that stands for the cause of personal liberty, +we aim to maximise social, political, and economic freedoms. +This includes being coercion resistant.

II. Censorship resistance

We enable the free flow of information. +No content is under surveillance. +We abide by the cryptoeconomic design principle of censorship resistance. +Even stronger, we design agnostic infrastructures for information.

III. Security

We don't compromise on security when building features. +We use state-of-the-art technologies, +and research new security methods and technologies to make strong security guarantees.

IV. Privacy

Privacy is the power to selectively reveal oneself to the world. +For us, it's essential to protect privacy in both communications and transactions, +as well as pseudo-anonymity. +Additionally, we strive to provide the right of total anonymity.

V. Transparency

We strive for complete openness and symmetry of information within the organisation, +and have no border between our core contributors and our community. +We are frank about our shortcomings, +especially when making short-term tradeoffs in service of our long-term goals.

VI. Openness

The software we create is a public good. +It is made available via a free and open-source licence, +for anyone to share, modify, and benefit from. +We believe in permissionless participation.

VII. Decentralisation

We minimise centralisation across both the software and the organisation itself. +In other words, we maximise the number of physical computers composing the network, +and maximise the number of individuals who have control over the system(s) we are building.

VIII. Inclusivity

We believe in fair and widespread access to our software, with an emphasis on ease of use. +This also extends to social inclusivity, permissionless participation, interoperability, +and investing in educational efforts.

IX. Continuance

We create software incentivised to continue to exist and improve +without the stewardship of a single entity or any of the current team members.

X. Resourcefulness

We are relentlessly resourceful. +As we grow and have ready access to capital, +it is our obligation to token holders to fight bureaucracy and inefficiencies within the organisation. +This means solving problems in the most effective way possible at lower economic costs +(in terms of capital, time, and resources).

+ + + + \ No newline at end of file diff --git a/privacy-policy/index.html b/privacy-policy/index.html new file mode 100644 index 00000000..c28c5fcb --- /dev/null +++ b/privacy-policy/index.html @@ -0,0 +1,26 @@ + + + + + +Privacy Policy | Vac Research + + + + + + + + + + +
+

Privacy Policy

Last updated: 9 February 2024

This Privacy Policy is intended to inform users of our approach to privacy in respect of this website ("Website"). In this regard, if you are visiting our Website, this Privacy Policy applies to you.

1) Who we are

For the purposes of this Privacy Policy and the collection and processing of personal data as a controller, the relevant entity is the Logos Collective Association, which has its registered office in Zug and its legal domicile address at

Logos Collective Association
c/o PST Consulting GmbH
Baarerstrasse 10
6300 Zug
Switzerland

Whenever we refer to “Logos”, “we” or other similar references, we are referring to the Logos Collective Association.

2) We limit the collection and processing of personal data from your use of the Website

We aim to limit the collection and collection and processing of personal data from users of the Website. We only collect and process certain personal data for specific purposes and where we have the legal basis to do so under applicable privacy legislation. We will not collect or process any personal data that we don’t need and where we do store any personal data, we will only store it for the least amount of time needed for the indicated purpose.

In this regard, we collect and process the following personal data from your use of the Website:

  • IP address: As part of such use of the Website we briefly process your IP address but we have no way of identifying you. We however have a legitimate interest in processing such IP addresses to ensure the technical functionality and enhance the security measures of the Website. This IP address is not stored by us over time.

3) Third party processing of personal data

In addition to our limited and collection of personal data, third parties may collect or process personal data as a result of the Website making use of certain features or to provide certain content. To the extent you interact with such third party content or features, their respective privacy policies will apply.

4) Security measures we take in respect of the Website

As a general approach, we take data security seriously and we have implemented a variety of security measures on the Website to maintain the safety of your personal data when you submit such information to us.

5) Exporting data outside the European Union and Switzerland

We are obliged to protect the privacy of personal data that you may have submitted in the unlikely event that we export your personal data to places outside the European Union or Switzerland. This means that personal data will only be processed in countries or by parties that provide an adequate level of protection as deemed by Switzerland or the European Commission. Otherwise, we will use other forms of protections, such as specific forms of contractual clauses to ensure such personal data is provided the same protection as required in Switzerland or Europe. In any event, the transmission of personal data outside the European Union and Switzerland will always occur in conformity with applicable privacy legislation.

6) Your choices and rights

As explained in this Privacy Policy, we limit our collection and processing of your personal data wherever possible. Nonetheless, you still have certain choices and rights in respect of the personal data which we do collect and process. As laid out in relevant privacy legislation, you have the right to:

  • Ask us to correct or update your personal data (where reasonably possible);

  • Ask us to remove your personal data from our systems;

  • Ask us for a copy of your personal data, which may also be transferred to another data controller at your request;

  • Withdraw your consent to process your personal data (only if consent was asked for a processing activity), which only affects processing activities that are based on your consent and doesn’t affect the validity of such processing activities before you have withdrawn your consent;

  • Object to the processing of your personal data; and

  • File a complaint with the Federal Data Protection and Information Commissioner (FDPIC), if you believe that your personal data has been processed unlawfully.

On this Website, you may come across links to third party websites. These third party sites have separate and independent privacy policies. We therefore have no responsibility or liability for the content and activities of these third party websites.

8) This Privacy Policy might change

We may modify or replace any part of this Privacy Policy at any time and without notice. Please check the Website periodically for any changes. The new Privacy Policy will be effective immediately upon its posting on our Website.

9) Contact information

To the extent that you have any questions about the Privacy Policy, please contact us at legal@free.technology.

This document is licensed under CC-BY-SA.

+ + + + \ No newline at end of file diff --git a/publications/index.html b/publications/index.html new file mode 100644 index 00000000..5942396b --- /dev/null +++ b/publications/index.html @@ -0,0 +1,26 @@ + + + + + +Publications | Vac Research + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/remote-log/index.html b/remote-log/index.html new file mode 100644 index 00000000..8125a85f --- /dev/null +++ b/remote-log/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/research/index.html b/research/index.html new file mode 100644 index 00000000..02fad549 --- /dev/null +++ b/research/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/rfcprocess/index.html b/rfcprocess/index.html new file mode 100644 index 00000000..2636dbc8 --- /dev/null +++ b/rfcprocess/index.html @@ -0,0 +1,29 @@ + + + + + +RFC Process | Vac Research + + + + + + + + + + +
+

Vac RFC Process

The Vac RFC unit serves as a vital cornerstone in the Logos collective, +taking on the responsibility of shepherding and editing specifications for Logos projects and Vac incubator projects. +By meticulously crafting and overseeing these specifications, the Vac RFC unit acts as a linchpin for ensuring standardised and interoperable protocols within the Logos ecosystem. +Their expertise and attention to detail contribute to a cohesive and collaborative environment, facilitating seamless integration and advancement of decentralised technologies throughout the Logos collective and beyond.

+ + + + \ No newline at end of file diff --git a/rln-anonymous-dos-prevention/index.html b/rln-anonymous-dos-prevention/index.html new file mode 100644 index 00000000..34bf8c02 --- /dev/null +++ b/rln-anonymous-dos-prevention/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/rln-light-verifiers/index.html b/rln-light-verifiers/index.html new file mode 100644 index 00000000..3960c14d --- /dev/null +++ b/rln-light-verifiers/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/rln-relay/index.html b/rln-relay/index.html new file mode 100644 index 00000000..4701d74c --- /dev/null +++ b/rln-relay/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/rln-v3/index.html b/rln-v3/index.html new file mode 100644 index 00000000..81d1c403 --- /dev/null +++ b/rln-v3/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/rlog/GossipSub Improvements/index.html b/rlog/GossipSub Improvements/index.html new file mode 100644 index 00000000..fc3c9af0 --- /dev/null +++ b/rlog/GossipSub Improvements/index.html @@ -0,0 +1,109 @@ + + + + + +GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks | Vac Research + + + + + + + + + + +
+

GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks

by
14 min read

GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks

Motivitation

We have been recently working on analyzing and improving the performance of the GossipSub protocol for large messages, +as in the case of Ethereum Improvement Proposal EIP-4844. +This work led to a comprehensive study of unstructured P2P networks. +The intention was to identify the best practices that can serve as guidelines for performance improvement and scalability of P2P networks.

Introduction

Nodes in an unstructured p2p network form self-organizing overlay(s) on top of the IP infrastructure to facilitate different services like information dissemination, +query propagation, file sharing, etc. The overlay(s) can be as optimal as a tree-like structure or as enforcing as a fully connected mesh.

Due to peer autonomy and a trustless computing environment, some peers may deviate from the expected operation or even leave the network. +At the same time, the underlying IP layer is unreliable.

Therefore, tree-like overlays are not best suited for reliable information propagation. +Moreover, tree-based solutions usually result in significantly higher message dissemination latency due to suboptimal branches.

Flooding-based solutions, on the other hand, result in maximum resilience against adversaries and achieve minimal message dissemination latency because the message propagates through all (including the optimal) paths. +Redundant transmissions help maintain the integrity and security of the network in the presence of adversaries and high node failure but significantly increase network-wide bandwidth utilization, cramming the bottleneck links.

An efficient alternative is to lower the number of redundant transmissions by D-regular broadcasting, where a peer will likely receive (or relay) a message from up to DD random peers. +Publishing through a D-regular overlay triggers approximately N×DN \times D transmissions. +Reducing DD reduces the redundant transmissions but compromises reachability and latency. +Sharing metadata through a K-regular overlay (where K>DK > D) allows nodes to pull missing messages.

GossipSub [1] benefits from full-message (D-regular) and metadata-only (k-regular) overlays. +Alternatively, a metadata-only overlay can be used, requiring a pull-based operation that significantly minimizes bandwidth utilization at the cost of increased latency.

Striking the right balance between parameters like D,KD, K, pull-based operation, etc., can yield application-specific performance tuning, but scalability remains a problem.

At the same time, many other aspects can significantly contribute to the network's performance and scalability. +One option is to realize peers' suitability and continuously changing capabilities while forming overlays.

For instance, a low-bandwidth link near a publisher can significantly demean the entire network's performance. +Reshuffling of peering links according to the changing network conditions can lead to superior performance.

Laying off additional responsibilities to more capable nodes (super nodes) can alleviate peer cramming, but it makes the network susceptible to adversaries/peer churn. +Grouping multiple super nodes to form virtual node(s) can solve this problem.

Similarly, flat (single-tier) overlays cannot address the routing needs in large (geographically dispersed) networks.

Hierarchical (Multi-tier) overlays with different intra/inter-overlay routing solutions can better address these needs. +Moreover, using message aggregation schemes for grouping multiple messages can save bandwidth and provide better resilience against adversaries/peer churn.

This article's primary objective is to investigate the possible choices that can empower an unstructured P2P network to achieve superior performance for the broadest set of applications. +We look into different constraints imposed by application-specific needs (performance goals) and investigate various choices that can augment the network's performance. +We explore overlay designs/freshness, peer selection approaches, message-relaying mechanisms, and resilience against adversaries/peer churn. +We consider GossipSub a baseline protocol to explore various possibilities and decisively commit to the ones demonstrating superior performance. +We also discuss the current state and, where applicable, propose a strategic plan for embedding new features to the GossipSub protocol.

GOAL1: Low Latency Operation

Different applications, like blockchain, streaming, etc., impose strict time bounds on network-wide message dissemination latency. +A message delivered after the imposed time bounds is considered as dropped. +An early message delivery in applications like live streaming can further enhance the viewing quality.

The properties and nature of the overlay network topology significantly impact the performance of services and applications executed on top of them. +Studying and devising mechanisms for better overlay design and message dissemination is paramount to achieving superior performance.

Interestingly, shortest-path message delivery trees have many limitations:

1) Changing network dynamics requires a quicker and continuous readjustment of the multicast tree. +2) The presence of resource-constrained (bandwidth/compute, etc.) nodes in the overlay can result in congestion. +3) Node failure can result in partitions, making many segments unreachable. +4) Assuring a shortest-path tree-like structure requires a detailed view of the underlying (and continuously changing) network topology.

Solutions involve creating multiple random trees to add redundancy [2]. +Alternatives involve building an overlay mesh and forwarding messages through the multicast delivery tree (eager push).

Metadata is shared through the overlay links so that the nodes can ask for missing messages (lazy push or pull-based operation) through the overlay links. +New nodes are added from the overlay on node failure, but it requires non-faulty node selection.

GossipSub uses eager push (through overlay mesh) and lazy push (through IWANT messages).

The mesh degree DLowDDHighD_{Low} \leq D \leq D_{High} is crucial in deciding message dissemination latency. +A smaller value for DD results in higher latency due to increased rounds, whereas a higher DD reduces latency on the cost of increased bandwidth. +At the same time, keeping DD independent of the growing network size (NN) may increase network-wide message dissemination latency. +Adjusting DD with NN maintains similar latency on the cost of increased workload for peers. +Authors in [3] suggest only a logarithmic increase in DD to maintain a manageable workload for peers. +In [4], it is reported that the average mesh degree should not exceed Davg=ln(N)+CD_{avg} = \ln(N) + C for an optimal operation, +where CC is a small constant.

Moreover, quicker shuffling of peers results in better performance in the presence of resource-constrained nodes or node failure [4].

GOAL2: Considering Heterogeneity In Overlay Design

Random peering connections in P2P overlays represent a stochastic process. It is inherently difficult to precisely model the performance of such systems. +Most of the research on P2P networks provides simulation results assuming nodes with similar capabilities. +The aspect of dissimilar capabilities and resource-constrained nodes is less explored.

It is discussed in GOAL1 that overlay mesh results in better performance if DavgD_{avg} does not exceed ln(N)+C\ln(N) + C. +Enforcing all the nodes to have approximately ln(N)+C\ln(N) + C peers makes resource-rich nodes under-utilized, while resource-constrained nodes are overloaded. +At the same time, connecting high-bandwidth nodes through a low-bandwidth node undermines the network's performance. +Ideally, the workload on any node should not exceed its available resources. +A better solution involves a two-phased operation:

  1. Every node computes its available bandwidth and selects a node degree DD proportional to its available bandwidth [4]. +Different bandwidth estimation approaches are suggested in literature [5,6]. +Simple bandwidth estimation approaches like variable packet size probing [6] yield similar results with less complexity. +It is also worth mentioning that many nodes may want to allocate only a capped share of their bandwidth to the network. +Lowering DD according to the available bandwidth can still prove helpful. +Additionally, bandwidth preservation at the transport layer through approaches like µTP can be useful. +To further conform to the suggested mesh-degree average DavgD_{avg}, every node tries achieving this average within its neighborhood, resulting in an overall similar DavgD_{avg}.

  2. From the available local view, every node tries connecting peers with the lowest latency until DD connections are made. +We suggest referring to the peering solution discussed in GOAL5 to avoid network partitioning.

The current GossipSub design considers homogeneous peers, and every node tries maintaining DLowDDHighD_{Low} \leq D \leq D_{High} connections.

GOAL3: Bandwidth Optimization

Redundant message transmissions are essential for handling adversaries/node failure. However, these transmissions result in traffic bursts, cramming many overlay links. +This not only adds to the network-wide message dissemination latency but a significant share of the network's bandwidth is wasted on (usually) unnecessary transmissions. +It is essential to explore solutions that can minimize the number of redundant transmissions while assuring resilience against node failures.

Many efforts have been made to minimize the impact of redundant transmissions. +These solutions include multicast delivery trees, metadata sharing to enable pull-based operation, in-network information caching, etc. [7,8]. +GossipSub employs a hybrid of eager push (message dissemination through the overlay) and lazy push (a pull-based operation by the nodes requiring information through IWANT messages).

A better alternative to simple redundant transmission is to use message aggregation [9,10,11] for the GossipSub protocol. +As a result, redundant message transmissions can serve as a critical advantage of the GossipSub protocol. +Suppose that we have three equal-length messages x1,x2,x3x1, x2, x3. Assuming an XOR coding function, +we know two trivial properties: x1x2x2=x1x1 \oplus x2 \oplus x2 = x1 and x1=x1x2x2\vert x1 \vert = \vert x1 \oplus x2 \oplus x2 \vert.

This implies that instead of sending messages individually, we can encode and transmit composite message(s) to the network. +The receiver can reconstruct the original message from encoded segments. +As a result, fewer transmissions are sufficient for sending more messages to the network.

However, sharing linear combinations of messages requires organizing messages in intervals, +and devising techniques to identify all messages belonging to each interval. +In addition, combining messages from different publishers requires more complex arrangements, +involving embedding publisher/message IDs, delayed forwarding (to accommodate more messages), and mechanisms to ensure the decoding of messages at all peers. +Careful application-specific need analysis can help decide the benefits against the added complexity.

GOAL4: Handling Large Messages

Many applications require transferring large messages for their successful operation. For instance, database/blockchain transactions [12]. +This introduces two challenges:

1) Redundant large message transmissions result in severe network congestion. +2) Message transmissions follow a store/forward process at all peers, which is inefficient in the case of large messages.

The above-mentioned challenges result in a noticeable increase in message dissemination latency and bandwidth wastage. +Most of the work done for handling large messages involves curtailing redundant transmissions using multicast delivery trees, +reducing the number of fanout nodes, employing in-network message caching, pull-based operation, etc.

Approaches like message aggregation also prove helpful in minimizing bandwidth wastage.

Our recent work on GossipSub improvements (still a work in progress) suggests the following solutions to deal with large message transmissions:

  1. Using IDontWant message proposal [13] and staggered sending.

    IDontWant message helps curtail redundant transmissions by letting other peers know we have already received the message. +Staggered sending enables relaying the message to a short subset of peers in each round. +We argue that simultaneously relaying a message to all peers hampers the effectiveness of the IDontWant message. +Therefore, using the IDontWant message with staggered sending can yield better results by allowing timely reception and processing of IDontWant messages.

  2. Message transmissions follow a store/forward process at all peers that is inefficient in the case of large messages. +We can parallelize message transmission by partitioning large messages into smaller fragments, letting intermediate peers relay these fragments as soon as they receive them.

GOAL5: Scalability

P2P networks are inherently scalable because every incoming node brings in bandwidth and compute resources. +In other words, we can keep adding nodes to the network as long as every incoming node brings at-least R×DR \times D bandwidth, +where RR is average data arrival rate. +It is worth mentioning that network-wide message dissemination requires at-least logD(N)\lceil \log_D (N) \rceil hops. +Therefore, increasing network size increases message dissemination latency, assuming D is independent of the network size.

Additionally, problems like peer churn, adversaries, heterogeneity, distributed operation, etc., significantly hamper the network's performance. +Most efforts for bringing scalability to the P2P systems have focused on curtailing redundant transmissions and flat overlay adjustments. +Hierarchical overlay designs, on the other hand, are less explored.

Placing a logical structure in unstructured P2P systems can help scale P2P networks.

One possible solution is to use a hierarchical overlay inspired by the approaches [14,15,16]. +An abstract operation of such overlay design is provided below:

  1. Clustering nodes based on locality, assuming that such peers will have relatively lower intra-cluster latency and higher bandwidth. +For this purpose, every node tries connecting peers with the lowest latency until DD connections are made or the cluster limit is reached.

  2. A small subset of nodes having the highest bandwidth and compute resources is selected from each cluster. +These super nodes form a fully connected mesh and jointly act as a virtual node, +mitigating the problem of peer churn among super nodes.

  3. Virtual nodes form a fully connected mesh to construct a hierarchical overlay. +Each virtual node is essentially a collection of super nodes; +a link to any of the constituent super nodes represents a link to the virtual node.

  4. One possible idea is to use GossipSub for intra-cluster message dissemination and FloodSub for inter-cluster message dissemination.

Summary

Overlay acts as a virtual backbone for a P2P network. A flat overlay is more straightforward and allows effortless readjustment to application needs. +On the other hand, a hierarchical overlay can bring scalability at the cost of increased complexity. +Regardless of the overlay design, a continuous readjustment to appropriate peering links is essential for superior performance. +At the same time, bandwidth preservation (through message aggregation, caching at strategic locations, metadata sharing, pull-based operation, etc.) can help minimize latency. +However, problems like peer churn and in-network adversaries can be best alleviated through balanced redundant coverage, and frequent reshuffling of the peering links.

References

  • [1] D. Vyzovitis, Y. Napora, D. McCormick, D. Dias, and Y. Psaras, “Gossipsub: Attack-resilient message propagation in the filecoin and eth2. 0 networks,” arXiv preprint arXiv:2007.02754, 2020. Retrieved from https://arxiv.org/pdf/2007.02754.pdf
  • [2] M. Matos, V. Schiavoni, P. Felber, R. Oliveira, and E. Riviere, “Brisa: Combining efficiency and reliability in epidemic data dissemination,” in 2012 IEEE 26th International Parallel and Distributed Processing Symposium. IEEE, 2012, pp. 983–994. Retrieved from https://ieeexplore.ieee.org/abstract/document/6267905
  • [3] P. T. Eugster, R. Guerraoui, A. M. Kermarrec, and L. Massouli, “Epidemic information dissemination in distributed systems,” IEEE Computer, vol. 37, no. 5, 2004. Retrieved from https://infoscience.epfl.ch/record/83478/files/EugGueKerMas04IEEEComp.pdf
  • [4] D. Frey, “Epidemic protocols: From large scale to big data,” Ph.D. dissertation, Universite De Rennes 1, 2019. Retrieved from https://inria.hal.science/tel-02375909/document
  • [5] M. Jain and C. Dovrolis, “End-to-end available bandwidth: measurement methodology, dynamics, and relation with tcp throughput,” IEEE/ACM Transactions on networking, vol. 11, no. 4, pp. 537–549, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1224454
  • [6] R. Prasad, C. Dovrolis, M. Murray, and K. Claffy, “Bandwidth estimation: metrics, measurement techniques, and tools,” IEEE network, vol. 17, no. 6, pp. 27–35, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1248658
  • [7] D. Kostic, A. Rodriguez, J. Albrecht, and A. Vahdat, “Bullet: High bandwidth data dissemination using an overlay mesh,” in Proceedings of the nineteenth ACM symposium on Operating systems principles, 2003, pp. 282–297. Retrieved from https://dl.acm.org/doi/abs/10.1145/945445.945473
  • [8] V. Pai, K. Kumar, K. Tamilmani, V. Sambamurthy, and A. E. Mohr, “Chainsaw: Eliminating trees from overlay multicast,” in Peer-to-Peer Systems IV: 4th International Workshop, IPTPS 2005, Ithaca, NY, USA, February 24-25, 2005. Revised Selected Papers 4. Springer, 2005, pp. 127–140. Retrieved from https://link.springer.com/chapter/10.1007/11558989_12
  • [9] Y.-D. Bromberg, Q. Dufour, and D. Frey, “Multisource rumor spreading with network coding,” in IEEE INFOCOM 2019-IEEE Conference on Computer Communications. IEEE, 2019, pp. 2359–2367. Retrieved from https://ieeexplore.ieee.org/abstract/document/8737576
  • [10] B. Haeupler, “Analyzing network coding gossip made easy,” in Proceedings of the forty-third annual ACM symposium on Theory of computing, 2011, pp. 293–302. Retrieved from https://dl.acm.org/doi/abs/10.1145/1993636.1993676
  • [11] S. Yu and Z. Li, “Massive data delivery in unstructured peer-to-peer networks with network coding,” in 6th IEEE/ACIS International Conference on Computer and Information Science (ICIS 2007). IEEE, 2007, pp. 592–597. Retrieved from https://ieeexplore.ieee.org/abstract/document/4276446
  • [12] V. Buterin, D. Feist, D. Loerakker, G. Kadianakis, M. Garnett, M. Taiwo, and A. Dietrichs, “Eip-4844: Shard blob transactions scale data-availability of ethereum in a simple, forwards-compatible manner,” 2022. Retrieved from https://eips.ethereum.org/EIPS/eip-4844
  • [13] A. Manning, “Gossipsub extension for epidemic meshes (v1.2.0),” 2022. Retrieved from https://github.com/libp2p/specs/pull/413
  • [14] Z. Duan, C. Tian, M. Zhou, X. Wang, N. Zhang, H. Du, and L. Wang, “Two-layer hybrid peer-to-peer networks,” Peer-to-Peer Networking and Applications, vol. 10, pp. 1304–1322, 2017. Retrieved from https://link.springer.com/article/10.1007/s12083-016-0460-5
  • [15] W. Hao, J. Zeng, X. Dai, J. Xiao, Q. Hua, H. Chen, K.-C. Li, and H. Jin, “Blockp2p: Enabling fast blockchain broadcast with scalable peer-to-peer network topology,” in Green, Pervasive, and Cloud Computing: 14th International Conference, GPC 2019, Uberlandia, Brazil, May 26–28, 2019, Proceedings 14. Springer, 2019, pp. 223–237. Retrieved from https://link.springer.com/chapter/10.1007/978-3-030-19223-5_16
  • [16] H. Qiu, T. Ji, S. Zhao, X. Chen, J. Qi, H. Cui, and S. Wang, “A geography-based p2p overlay network for fast and robust blockchain systems,” IEEE Transactions on Services Computing, 2022. Retrieved from https://ieeexplore.ieee.org/abstract/document/9826458
+ + + + \ No newline at end of file diff --git a/rlog/Nescience-A-zkVM-leveraging-hiding-properties/index.html b/rlog/Nescience-A-zkVM-leveraging-hiding-properties/index.html new file mode 100644 index 00000000..868a74a5 --- /dev/null +++ b/rlog/Nescience-A-zkVM-leveraging-hiding-properties/index.html @@ -0,0 +1,185 @@ + + + + + +Nescience - A zkVM leveraging hiding properties | Vac Research + + + + + + + + + + +
+

Nescience - A zkVM leveraging hiding properties

by
32 min read

Nescience, a privacy-first blockchain zkVM.

Introduction

Nescience is a privacy-first blockchain project that aims to enable private transactions and provide a general-purpose execution environment for classical applications. +The goals include creating a state separation architecture for public/private computation, +designing a versatile virtual machine based on mainstream instruction sets, +creating proofs for private state updates, implementing a kernel-based architecture for correct execution of private functions, +and implementing core DeFi protocols such as AMMs and staking from a privacy perspective.

It intends to create a user experience that is similar to public blockchains, but with additional privacy features that users can leverage at will. +To achieve this goal, Nescience will implement a versatile virtual machine that can be used to implement existing blockchain applications, +while also enabling the development of privacy-centric protocols such as private staking and private DEXs.

To ensure minimal trust assumptions and prevent information leakage, Nescience proposes a proof system that allows users to create proofs for private state updates, +while the verification of the proofs and the execution of the public functions inside the virtual machine can be delegated to an external incentivised prover.

It also aims to implement a seamless interaction between public and private state, enabling composability between contracts, and private and public functions. +Finally, Nescience intends to implement permissive licensing, which means that the source code will be open-source, +and developers will be able to use and modify the code without any restriction.

Our primary objective is the construction of the Zero-Knowledge Virtual Machine (zkVM). This document serves as a detailed exploration of the multifaceted challenges, +potential solutions, and alternatives that lay ahead. Each step is a testament to our commitment to thoroughness; +we systematically test various possibilities and decisively commit to the one that demonstrates paramount performance and utility. +For instance, as we progress towards achieving Goal 2, we are undertaking a rigorous benchmarking of the Nova proof system against its contemporaries. +Should Nova showcase superior performance metrics, we stand ready to integrate it as our proof system of choice. Through such meticulous approaches, +we not only reinforce the foundation of our project but also ensure its scalability and robustness in the ever-evolving landscape of blockchain technology.

Goal 1: Create a State Separation Architecture

The initial goal revolves around crafting a distinctive architecture that segregates public and private computations, +employing an account-based framework for the public state and a UTXO-based structure for the private state.

The UTXO model [1,2], notably utilized in Bitcoin, generates new UTXOs to serve future transactions, +while the account-based paradigm assigns balances to accounts that transactions can modify. +Although the UTXO model bolsters privacy by concealing comprehensive balances, +the pursuit of a dual architecture mandates a meticulous synchronization of these state models, +ensuring that private transactions remain inconspicuous in the wider public network state.

This task is further complicated by the divergent transaction processing methods intrinsic to each model, +necessitating a thoughtful and innovative approach to harmonize their functionality. +To seamlessly bring together the dual architecture, harmonizing the account-based model for public state with the UTXO-based model for private state, +a comprehensive strategy is essential.

The concept of blending an account-based structure with a UTXO-based model for differentiating between public and private states is intriguing. +It seeks to leverage the strengths of both models: the simplicity and directness of the account-based model with the privacy enhancements of the UTXO model.

Here's a breakdown and a potential strategy for harmonizing these models:

Rationale Behind the Dual Architecture:

  • Account-Based Model: This model is intuitive and easy to work with. Every participant has an account, +and transactions directly modify the balances of these accounts. It's conducive for smart contracts and a broad range of applications.

  • UTXO-Based Model: This model treats every transaction as a new output, which can then be used as an input for future transactions. +By not explicitly associating transaction outputs with user identities, it offers a degree of privacy.

Harmonizing the Two Systems:

  1. Translation Layer

    • Role: Interface between UTXO and account-based states.

    • UTXO-to-Account Adapter: When UTXOs are spent, the adapter can translate these into the corresponding account balance modifications. +This could involve creating a temporary 'pseudo-account' that mirrors the +UTXO's attributes.

    • Account-to-UTXO Adapter: When an account wishes to make a private transaction, +it would initiate a process converting a part of its balance to a UTXO, facilitating a privacy transaction.

  2. Unified Identity Management

    • Role: Maintain a unified identity (or address) system that works across both state models, +allowing users to easily manage their public and private states without requiring separate identities.

    • Deterministic Wallets: Use Hierarchical Deterministic (HD) wallets [3,4], enabling users to generate multiple addresses (both UTXO and account-based) from a single seed. +This ensures privacy while keeping management centralized for the user.

  1. State Commitments

    • Role: Use cryptographic commitments to commit to the state of both models. This can help in efficiently validating cross-model transactions.

    • Verkle Trees: Verkle Trees combine Vector Commitment and the KZG polynomial commitment scheme to produce a structure that's efficient in terms of both proofs and verification. +Verkle proofs are considerably small in size (less data to store and transmit), where Transaction and state verifications can be faster due to the smaller proof sizes and computational efficiencies.

    • Mimblewimble-style Aggregation [5]: For UTXOs, techniques similar to those used in Mimblewimble can be used to aggregate transactions, keeping the state compact and enhancing privacy.

  1. Batch Processing & Anonymity Sets

    • Role: Group several UTXO-based private transactions into a single public account-based transaction. +This can provide a level of obfuscation and can make synchronization between the two models more efficient.

    • CoinJoin Technique [6]: As seen in Bitcoin, multiple users can combine their UTXO transactions into one, enhancing privacy.

    • Tornado Cash Principle [7]: For account-based systems wanting to achieve privacy, methods like those used in Tornado Cash can be implemented, +providing zk-SNARKs-based private transactions.

  2. Event Hooks & Smart Contracts

    • Role: Implement event-driven mechanisms that trigger specific actions in one model based on events in the other. +For instance, a private transaction (UTXO-based) can trigger a corresponding public notification or event in the account-based model.

    • Conditional Execution: Smart contracts could be set to execute based on events in the UTXO system. For instance, +a smart contract might release funds (account-based) once a specific UTXO is spent.

    • Privacy Smart Contracts: Using zk-SNARKs or zk-STARKs to bring privacy to the smart contract layer, +allowing for private logic execution.

Challenges and Solutions

  1. Synchronization Overhead

    • Challenge: Combining two distinct transaction models creates an inherent synchronization challenge.

    • State Channels: By allowing transactions to be conducted off-chain between participants, state channels can alleviate synchronization stresses. +Only the final state needs to be settled on-chain, drastically reducing the amount of data and frequency of updates required.

    • Sidechains: These act as auxiliary chains to the main blockchain. Transactions can be processed on the sidechain and then periodically synced with the main chain. +This structure helps reduce the immediate load on the primary system.

    • Checkpointing: Introduce periodic checkpoints where the two systems' states are verified and harmonized. +This can ensure consistency without constant synchronization.

  2. Double Spending

    • Challenge: With two models operating in tandem, there's an increased risk of double-spending attacks.

    • Multi-Signature Transactions: Implementing transactions that require signatures from both systems can prevent unauthorized movements.

    • Cross-Verification Mechanisms: Before finalizing a transaction, it undergoes verification in both UTXO and account-based systems. +If discrepancies arise, the transaction can be halted.

    • Timestamping: By attaching a timestamp to each transaction, it's possible to order them sequentially, making it easier to spot and prevent double spending.

  3. Complexity in User Experience

    • Challenge: The dual model, while powerful, is inherently complex.

    • Abstracted User Interfaces: Design UIs that handle the complexity behind the scenes, +allowing users to make transactions without needing to understand the nuances of the dual model.

    • Guided Tutorials: Offer onboarding tutorials to acquaint users with the system's features, +especially emphasizing when and why they might choose one transaction type over the other.

    • Feedback Systems: Implement systems where users can provide feedback on any complexities or challenges they encounter. +This real-time feedback can be invaluable for iterative design improvements.

  4. Security

    • Challenge: Merging two systems can introduce unforeseen vulnerabilities.

    • Threat Modeling: Regularly conduct threat modeling exercises to anticipate potential attack vectors, +especially those that might exploit the interaction between the two systems.

    • Layered Security Protocols: Beyond regular audits, introduce multiple layers of security checks. +Each layer can act as a fail-safe if a potential threat bypasses another.

    • Decentralized Watchtowers: These are third-party services that monitor the network for malicious activities. +If any suspicious activity is detected, they can take corrective measures or raise alerts.

  5. Gas & Fee Management:

    • Challenge: A dual model can lead to convoluted fee structures.

    • Dynamic Fee Adjustment: Implement algorithms that adjust fees based on network congestion and transaction type. +This can ensure fairness and prevent network abuse.

    • Fee Estimation Tools: Provide tools that can estimate fees before a transaction is initiated. +This helps users understand potential costs upfront.

    • Unified Gas Stations: Design platforms where users can purchase or allocate gas for both transaction types simultaneously, +simplifying the gas acquisition process.

By addressing these challenges head-on with a detailed and systematic approach, it's possible to unlock the full potential of a dual-architecture system, +combining the strengths of both UTXO and account-based models without their standalone limitations.

AspectDetails
Harmony- Advanced VM Development: Design tailored for private smart contracts. - Leverage Established Architectures: Use WASM or RISC-V to harness their versatile and encompassing nature suitable for zero-knowledge applications. - Support for UTXO & Account-Based Models: Enhance adaptability across various blockchain structures.
Challenges- Adaptation Concerns: WASM and RISC-V weren't designed with zero-knowledge proofs as a primary focus, posing integration challenges. - Complexities with Newer Systems: Systems like (Super)Nova, STARKs, and Sangria are relatively nascent, adding another layer of intricacy to the integration. - Optimization Concerns: Ensuring that these systems are optimized for zero-knowledge proofs.
Proposed Solutions- Integration of Nova: Consider Nova's proof system for its potential alignment with project goals. - Comprehensive Testing: Rigorously test and benchmark against alternatives like Halo2, Plonky, and Starky to validate choices. - Poseidon Recursion Technique: To conduct exhaustive performance tests, providing insights into each system's efficiency and scalability.

Goal 2: Virtual Machine Creation

The second goal entails the creation of an advanced virtual machine by leveraging established mainstream instruction sets like WASM or RISC-V. +Alternatively, the objective involves pioneering a new, specialized instruction set meticulously optimized for Zero-Knowledge applications.

This initiative seeks to foster a versatile and efficient environment for executing computations within the privacy-focused context of the project. +Both WASM and RISC-V exhibit adaptability to both UTXO and account-based models due to their encompassing nature as general-purpose instruction set architectures.

WASM, operating as a low-level virtual machine, possesses the capacity to execute code derived from a myriad of high-level programming languages, +and boasts seamless integration across diverse blockchain platforms.

Meanwhile, RISC-V emerges as a versatile option, accommodating both models, and can be seamlessly integrated with secure enclaves like SGX or TEE, +elevating the levels of security and privacy. However, it is crucial to acknowledge that employing WASM or RISC-V might present challenges, +given their original design without specific emphasis on optimizing for Zero-Knowledge Proofs (ZKPs).

Further complexity arises with the consideration of more potent proof systems like (Super)Nova, STARKs, and Sangria, which, +while potentially addressing optimization concerns, necessitate extensive research and testing due to their relatively nascent status within the field. +This accentuates the need for a judicious balance between established options and innovative solutions in pursuit of an architecture harmoniously amalgamating privacy, security, and performance.

The ambition to build a powerful virtual machine tailored to zero-knowledge (ZK) applications is both commendable and intricate. +The combination of two renowned instruction sets, WASM and RISC-V, in tandem with ZK, is an innovation that could redefine privacy standards in blockchain. +Let's dissect the challenges and possibilities inherent in this goal:

  1. Established Mainstream Instruction Sets - WASM and RISC-V

    • Strengths:

      • WASM: Rooted in its ability to execute diverse high-level language codes, its potential for cross-chain compatibility makes it a formidable contender. +Serving as a low-level virtual machine, its role in the blockchain realm is analogous to that of the Java Virtual Machine in the traditional computing landscape.

      • RISC-V: This open-standard instruction set architecture has made waves due to its customizable nature. +Its adaptability to both UTXO and account-based structures coupled with its compatibility with trusted execution environments like SGX and TEE augments its appeal, +especially in domains that prioritize security and privacy.

    • Challenges: Neither WASM nor RISC-V was primarily designed with ZKPs in mind. While they offer flexibility, +they might lack the necessary optimizations for ZK-centric tasks. Adjustments to these architectures might demand intensive R&D efforts.

  1. Pioneering a New, Specialized Instruction Set

    • Strengths: A bespoke instruction set can be meticulously designed from the ground up with ZK in focus, +potentially offering unmatched performance and optimizations tailored to the project's requirements.

    • Challenges: Crafting a new instruction set is a monumental task requiring vast resources, including expertise, time, and capital. +It would also need to garner community trust and support over time.

  1. Contemporary Proof Systems - (Super)Nova, STARKs, Sangria

    • Strengths: These cutting-edge systems, being relatively new, might offer breakthrough cryptographic efficiencies that older systems lack: designed with modern challenges in mind, +they could potentially bridge the gap where WASM and RISC-V might falter in terms of ZKP optimization.

    • Challenges: Their nascent nature implies a dearth of exhaustive testing, peer reviews, and potentially limited community support. +The unknowns associated with these systems could introduce unforeseen vulnerabilities or complexities. +While they could offer optimizations that address challenges presented by WASM and RISC-V, their young status demands rigorous vetting and testing.

Mainstream (WASM, RISC-V)ZK-optimized (New Instruction Set)
Existing ToolingYESNO
Blockchain-focusedNOYES
PerformantDEPENDSYES

Optimization Concerns for WASM and RISC-V:

  • Cryptography Libraries: ZKP applications rely heavily on specific cryptographic primitives. Neither WASM nor RISC-V natively supports all of these primitives. +Thus, a comprehensive library of cryptographic functions, optimized for these platforms, needs to be developed.

  • Parallel Execution: Given the heavy computational demands of ZKPs, leveraging parallel processing capabilities can optimize the time taken. +Both WASM and RISC-V would need modifications to handle parallel execution of ZKP processes efficiently.

  • Memory Management: ZKP computations can sometimes require significant amounts of memory, especially during the proof generation phase. +Fine-tuned memory management mechanisms are essential to prevent bottlenecks.

Emerging ZKP Optimized Systems Considerations:

  • Proof Size: Different systems generate proofs of varying sizes. A smaller proof size is preferable for blockchain applications to save on storage and bandwidth. +The trade-offs between proof size, computational efficiency, and security need to be balanced.

  • Universality: Some systems can support any computational statement (universal), while others might be tailored to specific tasks. +A universal system can be more versatile for diverse applications on the blockchain.

  • Setup Requirements: Certain ZKP systems, like zk-SNARKs, require a trusted setup, which can be a security concern. +Alternatives like zk-STARKs don't have this requirement but come with other trade-offs.

Strategies for Integration:

  • Iterative Development: Given the complexities, an iterative development approach can be beneficial. +Start with a basic integration of WASM or RISC-V for general tasks and gradually introduce specialized ZKP functionalities.

  • Benchmarking: Establish benchmark tests specifically for ZKP operations. This will provide continuous feedback on the performance of the system as modifications are made, ensuring optimization.

  • External Audits & Research: Regular checks from cryptographic experts and collaboration with academic researchers can help in staying updated and ensuring secure implementations.

Goal 3: Proofs Creation and Verification

The process of generating proofs for private state updates is vested in the hands of the user, aligning with our commitment to minimizing trust assumptions and enhancing privacy. +Concurrently, the responsibility of verifying these proofs and executing public functions within the virtual machine can be effectively delegated to an external prover, +a role that is incentivized to operate with utmost honesty and integrity. This intricate balance seeks to safeguard against information leakage, +preserving the confidentiality of private transactions. Integral to this mechanism is the establishment of a robust incentivization framework.

To ensure the prover’s steadfast commitment to performing tasks with honesty, we should introduce a mechanism that facilitates both rewards for sincere behavior and penalties for any deviation from the expected standards. +This two-pronged approach serves as a compelling deterrent against dishonest behavior and fosters an environment of accountability. +In addition to incentivization, a crucial consideration is the economic aspect of verification and execution. +The verification process has been intentionally designed to be more cost-effective than execution.

This strategic approach prevents potential malicious actors from exploiting the system by flooding it with spurious proofs, a scenario that could arise when the costs align favorably. +By maintaining a cost balance that favors verification, we bolster the system’s resilience against fraudulent activities while ensuring its efficiency. +In sum, our multifaceted approach endeavors to strike an intricate equilibrium between user-initiated proof creation, external verification, and incentivization. +This delicate interplay of mechanisms ensures a level of trustworthiness that hinges on transparency, accountability, and economic viability.

As a result, we are poised to cultivate an ecosystem where users’ privacy is preserved, incentives are aligned, +and the overall integrity of the system is fortified against potential adversarial actions. To achieve the goals of user-initiated proof creation, +external verification, incentivization, and cost-effective verification over execution, several options and mechanisms can be employed:

  1. User-Initiated Proof Creation: Users are entrusted with the generation of proofs for private state updates, thus ensuring greater privacy and reducing trust dependencies.

    • Challenges:

      • Maintaining the quality and integrity of the proofs generated by users.

      • Ensuring that users have the tools and knowledge to produce valid proofs.

    • Solutions:

      • Offer extensive documentation, tutorials, and user-friendly tools to streamline the proof-generation process.

      • Implement checks at the verifier's end to ensure the quality of proofs.

  1. External Verification by Provers: An external prover verifies the proofs and executes public functions within the virtual machine.

    • Challenges:

      • Ensuring that the external prover acts honestly.

      • Avoiding centralized points of failure.

    • Solutions:

      • Adopt a decentralized verification approach, with multiple provers cross-verifying each other’s work.

      • Use reputation systems to rank provers based on their past performances, creating a trust hierarchy.

  2. Incentivization Framework: A system that rewards honesty and penalizes dishonest actions, ensuring provers' commitment to the task.

    • Challenges:

      • Determining the right balance of rewards and penalties.

      • Ensuring that the system cannot be gamed for undue advantage.

    • Solutions1:

      • Implement a dynamic reward system that adjusts based on network metrics and provers' performance.

      • Use a staking mechanism where provers need to lock up a certain amount of assets. +Honest behavior earns rewards, while dishonest behavior could lead to loss of staked assets.

  3. Economic Viability through Cost Dynamics: Making verification more cost-effective than execution to deter spamming and malicious attacks.

    • Challenges:

      • Setting the right cost metrics for both verification and execution.

      • Ensuring that genuine users aren’t priced out of the system.

    • Solutions:

      • Use a dynamic pricing model, adjusting costs in real-time based on network demand.

      • Implement gas-like mechanisms to differentiate operation costs and ensure fairness.

  4. Maintaining Trustworthiness: Create a system that's transparent, holds all actors accountable, and is economically sound.

    • Challenges:

      • Keeping the balance where users feel their privacy is intact, while provers feel incentivized.

      • Ensuring the system remains resilient against adversarial attacks.

    • Solutions:

      • Implement layered checks and balances.

      • Foster community involvement, allowing them to participate in decision-making, potentially through a decentralized autonomous organization (DAO).

Each of these options can be combined or customized to suit the specific requirements of your project, striking a balance between user incentives, +cost dynamics, and verification integrity. A thoughtful combination of these mechanisms ensures that the system remains robust, resilient, +and conducive to the objectives of user-initiated proof creation, incentivized verification, and cost- effective validation.

AspectDetails
Design Principle- User Responsibility: Generating proofs for private state updates. - External Prover: Delegated the task of verifying proofs and executing public VM functions.
Trust & Privacy- Minimized Trust Assumptions: Place proof generation in users' hands. - Enhanced Privacy: Ensure confidentiality of private transactions and prevent information leakage.
Incentivization Framework- Rewards: Compensate honest behavior. - Penalties: Deter and penalize dishonest behavior.
Economic Considerations- Verification vs. Execution: Make verification more cost-effective than execution to prevent spurious proofs flooding. - Cost Balance: Strengthen resilience against fraudulent activities and maintain efficiency.
OutcomeAn ecosystem where: - Users' privacy is paramount. - Incentives are appropriately aligned. - The system is robust against adversarial actions.

Goal 4: Kernel-based Architecture Implementation

This goal centers on the establishment of a kernel-based architecture, akin to the model observed in ZEXE, to facilitate the attestation of accurate private function executions. +This innovative approach employs recursion to construct a call stack, which is then validated through iterative recursive computations. +At its core, this technique harnesses a recursive Succinct Non-Interactive Argument of Knowledge (SNARK) mechanism, where each function call’s proof accumulates within the call stack.

The subsequent verification of this stack’s authenticity leverages recursive SNARK validation. +While this method offers robust verification of private function executions, it’s essential to acknowledge its associated intricacies.

The generation of SNARK proofs necessitates a substantial computational effort, which, in turn, may lead to elevated gas fees for users. +Moreover, the iterative recursive computations could potentially exhibit computational expansion as the depth of recursion increases. +This calls for a meticulous balance between the benefits of recursive verification and the resource implications it may entail.

In essence, Goal 4 embodies a pursuit of enhanced verification accuracy through a kernel-based architecture. +By weaving recursion and iterative recursive computations into the fabric of our system, we aim to establish a mechanism that accentuates the trustworthiness of private function executions, +while conscientiously navigating the computational demands that ensue.

To accomplish the goal of implementing a kernel-based architecture for recursive verification of private function executions, +several strategic steps and considerations can be undertaken: recursion handling and depth management.

Recursion Handling
  • Call Stack Management:

    • Implement a data structure to manage the call stack, recording each recursive function call’s details, parameters, and state.
  • Proof Accumulation:

    • Design a mechanism to accumulate proof data for each function call within the call stack. +This includes cryptographic commitments, intermediate results, and cryptographic challenges.

    • Ensure that the accumulated proof data remains secure and tamper-resistant throughout the recursion process.

  • Intermediary SNARK Proofs:

    • Develop an intermediary SNARK proof for each function call’s correctness within the call stack. +This proof should demonstrate that the function executed correctly and produced expected outputs.

    • Ensure that the intermediary SNARK proof for each recursive call can be aggregated and verified together, maintaining the integrity of the entire call stack.

Depth management
  • Depth Limitation:

    • Define a threshold for the maximum allowable recursion depth based on the system’s computational capacity, gas limitations, and performance considerations.

    • Implement a mechanism to prevent further recursion beyond the defined depth limit, safeguarding against excessive computational growth.

  • Graceful Degradation:

    • Design a strategy for graceful degradation when the recursion depth approaches or reaches the defined limit. +This may involve transitioning to alternative execution modes or optimization techniques.

    • Communicate the degradation strategy to users and ensure that the system gracefully handles scenarios where recursion must be curtailed.

  • Resource Monitoring:

    • Develop tools to monitor resource consumption (such as gas usage and computational time) as recursion progresses. +Provide real-time feedback to users about the cost and impact of recursive execution.
  • Dynamic Depth Adjustment:

    • Consider implementing adaptive depth management that dynamically adjusts the recursion depth based on network conditions, transaction fees, and available resources.

    • Utilize algorithms to assess the optimal recursion depth for efficient execution while adhering to gas cost constraints.

  • Fallback Mechanisms:

    • Create fallback mechanisms that activate if the recursion depth limit is reached or if the system encounters resource constraints. +These mechanisms could involve alternative verification methods or delayed execution.
  • User Notifications:

    • Notify users when the recursion depth limit is approaching, enabling them to make informed decisions about the complexity of their transactions and potential resource usage.

Goal 4 underscores the project's ambition to integrate the merits of a kernel-based architecture with recursive verifications to bolster the reliability of private function executions. +While the approach promises robust outcomes, it's pivotal to maneuver through its intricacies with astute strategies, ensuring computational efficiency and economic viability. +By striking this balance, the architecture can realize its full potential in ensuring trustworthy and efficient private function executions.

Goal 5: Seamless Interaction Design

Goal 5 revolves around the meticulous design of a seamless interaction between public and private states within the blockchain ecosystem. +This objective envisions achieving not only composability between contracts but also the harmonious integration of private and public functions.

A notable challenge in this endeavor lies in the intricate interplay between public and private states, +wherein the potential linkage of a private transaction to a public one raises concerns about unintended information leakage.

The essence of this goal entails crafting an architecture that facilitates the dynamic interaction of different states while ensuring that the privacy and confidentiality of private transactions remain unbreached. +This involves the formulation of mechanisms that enable secure composability between contracts, guaranteeing the integrity of interactions across different layers of functionality.

A key focus of this goal is to surmount the challenge of information leakage by implementing robust safeguards. +The solution involves devising strategies to mitigate the risk of revealing private transaction details when connected to corresponding public actions. +By creating a nuanced framework that com- partmentalizes private and public interactions, the architecture aims to uphold privacy while facilitating seamless interoperability.

Goal 5 encapsulates a multifaceted undertaking, calling for the creation of an intricate yet transparent framework that empowers users to confidently engage in both public and private functions, +without compromising the confidentiality of private transactions. The successful realization of this vision hinges on a delicate blend of architectural ingenuity, cryptographic sophistication, and user-centric design.

To achieve seamless interaction between public and private states, composability, and privacy preservation, a combination of solutions and approaches can be employed. +In the table below, a comprehensive list of solutions that address these objectives:

Solution CategoryDescription
Layer 2 SolutionsEmploy zk-Rollups, Optimistic Rollups, and state channels to handle private interactions off-chain and settle them on-chain periodically. Boost scalability and cut transaction costs.
Intermediary Smart ContractsCraft smart contracts as intermediaries for secure public-private interactions. Use these to manage data exchange confidentially.
Decentralized Identity & PseudonymityImplement decentralized identity systems for pseudonymous interactions. Validate identity using cryptographic proofs.
Confidential Sidechains & Cross-ChainSet up confidential sidechains and employ cross-chain protocols to ensure private and composability across blockchains.
Temporal Data StructuresCreate chronological data structures for secure interactions. Utilize cryptographic methods for data integrity and privacy.
Homomorphic Encryption & MPCApply homomorphic encryption and MPC for computations on encrypted data and interactions between state layers.
Commit-Reveal SchemesIntroduce commit-reveal mechanisms for private transactions, revealing data only post necessary public actions.
Auditability & VerifiabilityUse on-chain tools for auditing and verifying interactions. Utilize cryptographic commitments for third-party validation.
Data Fragmentation & ShardingFragment data across shards for private interactions and curtailed data exposure. Bridge shards securely with cryptography.
Ring Signatures & CoinJoinIncorporate ring signatures and CoinJoin protocols to mask transaction details and mix transactions collaboratively.

Goal 6: Integration of DeFi Protocols with a Privacy-Preserving Framework

The primary aim of Goal 6 is to weave key DeFi protocols, such as AMMs and staking, into a user-centric environment that accentuates privacy. +This endeavor comes with inherent challenges, especially considering the heterogeneity of existing DeFi protocols, predominantly built on Ethereum. +These variations in programming languages and VMs exacerbate the quest for interoperability. Furthermore, the success and functionality of DeFi protocols is closely tied to liquidity, +which in turn is influenced by user engagement and the amount of funds locked into the system.

Strategic Roadmap for Goal 6

  1. Pioneering Privacy-Centric DeFi Models: Initiate the development of AMMs and staking solutions that are inherently protective of users' transactional privacy and identity.

  2. Specialized Smart Contracts with Privacy: Architect distinct smart contracts infused with privacy elements, setting the stage for secure user interactions within this new, confidential DeFi landscape.

  3. Optimized User Interfaces: Craft interfaces that resonate with user needs, simplifying the journey through the private DeFi space without compromising on security.

  4. Tackling Interoperability:

    • Deploy advanced bridge technologies and middleware tools to foster efficient data exchanges and guarantee operational harmony across a spectrum of programming paradigms and virtual environments.

    • Design and enforce universal communication guidelines that bridge the privacy-centric DeFi entities with the larger DeFi world seamlessly.

  1. Enhancing and Sustaining Liquidity:

    • Unveil innovative liquidity stimuli and yield farming incentives, compelling users to infuse liquidity into the private DeFi space.

    • Incorporate adaptive liquidity frameworks that continually adjust based on the evolving market demands, ensuring consistent liquidity.

    • Forge robust alliances with other DeFi stalwarts, jointly maximizing liquidity stores and honing sustainable token distribution strategies.

  1. Amplifying Community Engagement: Design and roll out enticing incentive schemes to rally users behind privacy-focused AMMs and staking systems, +thereby nurturing a vibrant, privacy-advocating DeFi community.

Through the integration of these approaches, we aim to achieve Goal 6, providing users with a privacy-focused platform for engaging effortlessly in core DeFi functions such as AMMs and staking, +all while effectively overcoming the obstacles related to interoperability and liquidity concerns.

Summary of the Architecture

In our quest to optimize privacy, we're proposing a Zero-Knowledge Virtual Machine (Zkvm) that harnesses the power of Zero-Knowledge Proofs (ZKPs). +These proofs ensure that while private state data remains undisclosed, public state transitions can still be carried out and subsequently verified by third parties. +This blend of public and private state is envisaged to be achieved through a state tree representing the public state, while the encrypted state leaves stand for the private state. +Each user's private state indicates validity through the absence of a corresponding nullifier. +A nullifier is a unique cryptographic value generated in privacy-preserving blockchain transactions to prevent double-spending, +ensuring that each private transaction is spent only once without revealing its details.

Private functions' execution mandates users to offer a proof underscoring the accurate execution of all encapsulated private calls. +For validating a singular private function call, we're leaning into the kernel-based model inspired by the ZEXE protocol. +Defined as kernel circuits, these functions validate the correct execution of each private function call. +Due to their recursive circuit structure, a succession of private function calls can be executed by calculating proofs in an iterative manner. +Execution-relevant data, like private and public call stacks and additions to the state tree, are incorporated as public inputs.

Our method integrates the verification keys for these functions within a merkle tree. Here's the innovation: a user's ZKP showcases the existence of the verification key in this tree, yet keeps the executed function concealed. +The unique function identifier can be presented as the verification key, with all contracts merkleized for hiding functionalities.

We suggest a nuanced shift from the ZEXE protocol's identity function, which crafts an identity for smart contracts delineating its behavior, access timeframes, and other functionalities. +Instead of the ZEXE protocol's structure, our approach pivots to a method anchored in the +security of a secret combined with the uniqueness from hashing with the contract address. +The underlying rationale is straightforward: the sender, equipped with a unique nonce and salt for the transaction, hashes the secret, payload, nonce, and salt. +This result is then hashed with the contract address for the final value. The hash function's unidirectional nature ensures that the input cannot be deduced easily from its output. +A specific concern, however, is the potential repetition of secret and payload values across transactions, which could jeopardize privacy. +Yet, by embedding the function's hash within the hash of the contract address, users can validate a specific function's execution without divulging the function, navigating this limitation.

Alternative routes do exist: We could employ signature schemes like ECDSA, focusing on uniqueness and authenticity, albeit at the cost of complex key management. +Fully Homomorphic Encryption (FHE) offers another pathway, enabling function execution on encrypted data, or Multi-Party Computation (MPC) which guarantees non-disclosure of function or inputs. +Yet, integrating ZKPs with either FHE or MPC presents a challenge. Combining cryptographic functions like SHA-3 and BLAKE2 can also bolster security and uniqueness. +It's imperative to entertain these alternatives, especially when hashing might not serve large input/output functions effectively or might fall short in guaranteeing uniqueness.

Current State

Our aim is to revolutionize the privacy and security paradigms through Nescience. +As we strive to set milestones and achieve groundbreaking advancements, +our current focus narrows onto the realization of Goal 2 and Goal 3.

Our endeavors to build a powerful virtual machine tailored for Zero-Knowledge applications have led us down the path of rigorous exploration and testing. +We believe that integrating the right proof system is pivotal to our project's success, which brings us to Nova [8]. +In our project journey, we have opted to integrate the Nova proof system, recognizing its potential alignment with our overarching goals. +However, as part of our meticulous approach to innovation and optimization, we acknowledge the need to thoroughly examine Nova’s performance capabilities, +particularly due to its status as a pioneering and relatively unexplored proof system.

This critical evaluation entails a comprehensive process of benchmarking and comparative analysis [9], +pitting Nova against other prominent proof systems in the field, including Halo2 [10], +Plonky2 [11], and Starky [12]. +This ongoing and methodical initiative is designed to ensure a fair and impartial assessment, enabling us to draw meaningful conclusions about Nova’s strengths and limitations in relation to its counterparts. +By leveraging the Poseidon recursion technique, we are poised to conduct an exhaustive performance test that delves into intricate details. +Through this testing framework, we aim to discern whether Nova possesses the potential to outshine its contemporaries in terms of efficiency, scalability, and overall performance. +The outcome of this rigorous evaluation will be pivotal in shaping our strategic decisions moving forward. +Armed with a comprehensive understanding of Nova’s performance metrics vis-à-vis other proof systems, +we can confidently chart a course that maximizes the benefits of our project’s optimization efforts.

Moreover, as we ambitiously pursue the establishment of a robust mechanism for proof creation and verification, our focus remains resolute on preserving user privacy, +incentivizing honest behaviour, and ensuring the cost-effective verification of transactions. +At the heart of this endeavor is our drive to empower users by allowing them the autonomy of generating proofs for private state updates, +thereby reducing dependencies and enhancing privacy. +We would like to actively work on providing comprehensive documentation, user-friendly tools, +and tutorials to aid users in this intricate process.

Parallelly, we're looking into decentralized verification processes, harnessing the strength of multiple external provers that cross-verify each other's work. +Our commitment is further cemented by our efforts to introduce a dynamic reward system that adjusts based on network metrics and prover performance. +This intricate balance, while challenging, aims to fortify our system against potential adversarial actions, aligning incentives, and preserving the overall integrity of the project.

References

[1] Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. Retrieved from https://bitcoin.org/bitcoin.pdf

[2] Sanchez, F. (2021). Cardano’s Extended UTXO accounting model. Retrived from https://iohk.io/en/blog/posts/2021/03/11/cardanos-extended-utxo-accounting-model/

[3] Morgan, D. (2020). HD Wallets Explained: From High Level to Nuts and Bolts. Retrieved from https://medium.com/mycrypto/the-journey-from-mnemonic-phrase-to-address-6c5e86e11e14

[4] Wuille, P. (012). Bitcoin Improvement Proposal (BIP) 44. Retrieved from https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki

[5] Jedusor, T. (2020). Introduction to Mimblewimble and Grin. Retrieved from https://github.com/mimblewimble/grin/blob/master/doc/intro.md

[6] Bitcoin's official wiki overview of the CoinJoin method. Retrieved from https://en.bitcoin.it/wiki/CoinJoin

[7] TornadoCash official Github page. Retrieved from https://github.com/tornadocash/tornado-classic-ui

[8] Kothapalli, A., Setty, S., Tzialla, I. (2021). Nova: Recursive Zero-Knowledge Arguments from Folding Schemes. Retrieved from https://eprint.iacr.org/2021/370

[9] ZKvm Github page. Retrieved from https://github.com/vacp2p/zk-explorations

[10] Electric Coin Company (2020). Explaining Halo 2. Retrieved from https://electriccoin.co/blog/explaining-halo-2/

[11] Polygon Labs (2022). Introducing Plonky2. Retrieved from https://polygon.technology/blog/introducing-plonky2

[12] StarkWare (2021). ethSTARK Documentation. Retrieved from https://eprint.iacr.org/2021/582


  1. Incentive Mechanisms:

    • Token Rewards: Design a token-based reward system where honest provers are compensated with tokens for their verification services. +This incentivizes participation and encourages integrity.

    • Staking and Slashing: Introduce a staking mechanism where provers deposit tokens as collateral. +Dishonest behavior results in slashing (partial or complete loss) of the staked tokens, while honest actions are rewarded.

    • Proof of Work/Proof of Stake: Implement a proof-of-work or proof-of- stake consensus mechanism for verification, +aligning incentives with the blockchain’s broader consensus mechanism.

+ + + + \ No newline at end of file diff --git a/rlog/archive/index.html b/rlog/archive/index.html new file mode 100644 index 00000000..a9c2add0 --- /dev/null +++ b/rlog/archive/index.html @@ -0,0 +1,26 @@ + + + + + +Archive | Vac Research + + + + + + + + + + +
+

Archive

Archive

+ + + + \ No newline at end of file diff --git a/rlog/atom.xml b/rlog/atom.xml new file mode 100644 index 00000000..2c4e7a61 --- /dev/null +++ b/rlog/atom.xml @@ -0,0 +1,1870 @@ + + + https://vac.dev/rlog + Vac Research Blog + 2024-05-13T12:00:00.000Z + https://github.com/jpmonette/feed + + Vac Research Blog + https://vac.dev/theme/image/favicon.ico + + <![CDATA[RLN-v3: Towards a Flexible and Cost-Efficient Implementation]]> + https://vac.dev/rlog/rln-v3 + + 2024-05-13T12:00:00.000Z + + Improving on the previous version of RLN by allowing dynamic epoch sizes.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

The premise of RLN-v3 is to have a variable message rate per variable epoch, +which can be explained in the following way:

  • RLN-v1: “Alice can send 1 message per global epoch”

    Practically, this is 1 msg/second

  • RLN-v2: “Alice can send x messages per global epoch”

    Practically, this is x msg/second

  • RLN-v3: “Alice can send x messages within a time interval y chosen by herself. +The funds she has to pay are affected by both the number of messages and the chosen time interval. +Other participants can choose different time intervals fitting their specific needs.

    Practically, this is x msg/y seconds

RLN-v3 allows higher flexibility and ease of payment/stake for users who have more predictable usage patterns and therefore, +more predictable bandwidth usage on a p2p network (Waku, etc.).

For example:

  • An AMM that broadcasts bids, asks, and fills over Waku may require a lot of throughput in the smallest epoch possible and hence may register an RLN-v3 membership of 10000 msg/1 second. +They could do this with RLN-v2, too.
  • Alice, a casual user of a messaging app built on Waku, who messages maybe 3-4 people infrequently during the day, may register an RLN-v3 membership of 100 msg/hour, +which would not be possible in RLN-v2 considering the global epoch was set to 1 second. +With RLN-v2, Alice would have to register with a membership of 1 msg/sec, +which would translate to 3600 msg/hour. This is much higher than her usage and would +result in her overpaying to stake into the membership set.
  • A sync service built over Waku, +whose spec defines that it MUST broadcast a set of public keys every hour, +may register an RLN-v3 membership of 1 msg/hour, +cutting down the costs to enter the membership set earlier.

Theory

Modification to leaves set in the membership Merkle tree

To ensure that a user’s epoch size (user_epoch_limit) is included within their membership we must modify the user’s commitment/leaf in the tree to contain it. +A user’s commitment/leaf in the tree is referred to as a rate_commitment, +which was previously derived from their public key (identity_commitment) +and their variable message rate (user_message_limit).

In RLN-v2:

rate_commitment=poseidon([identity_commitment,user_message_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit])

In RLN-v3:

rate_commitment=poseidon([identity_commitment,user_message_limit,user_epoch_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit, user\_epoch\_limit])

Modification to circuit inputs

To detect double signaling, +we make use of a circuit output nullifier, +which remains the same if a user generates a proof with the same message_id and external_nullifier, +where the external_nullifier and nullifier are defined as:

external_nullifier=poseidon([epoch,rln_identifier])nullifier=poseidon([identity_secret,external_nullifier,message_id])external\_nullifier = poseidon([epoch, rln\_identifier]) \\ nullifier = poseidon([identity\_secret, external\_nullifier, message\_id])

Where:

  • epoch is defined as the Unix epoch timestamp with seconds precision.
  • rln_identifier uniquely identifies an application for which a user submits a proof.
  • identity_secret is the private key of the user.
  • message_id is the sequence number of the user’s message within user_message_limit in an epoch.

In RLN-v2, the global epoch was 1 second, +hence we did not need to perform any assertions to the epoch’s value inside the circuit, +and the validation of the epoch was handled off-circuit (i.e., too old, too large, bad values, etc.).

In RLN-v3, we propose that the epoch that is passed into the circuit +must be a valid multiple of user_epoch_limit +since the user may pass in values of the epoch which do not directly correlate with the user_epoch_limit.

For example:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237 +generates user_message_limit proofs with it, +can increment the epoch by 1, +and generate user_message_limit proofs with it, +thereby allowing them to bypass the message per epoch restriction.

One could say that we could perform this validation outside of the circuit, +but we maintain the user_epoch_limit as a private input to the circuit so that the user is not deanonymized by the anonymity set connected to that user_epoch_limit. +Since user_epoch_limit is kept private, +the verifier does not have access to that value and cannot perform validation on it.

If we ensure that the epoch is a multiple of user_epoch_limit, +we have the following scenarios:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237. +Proof generation fails since the epoch is not a multiple of user_epoch_limit.
  • A user with user_epoch_limit of 120 +passes in an epoch of 240 and +can generate user_message_limit proofs without being slashed.

Since we perform operations on the epoch, we must include it as a circuit input (previously, it was removed from the circuit inputs to RLN-v2).

Therefore, the new circuit inputs are as follows:

// unchanged
private identity_secret
private user_message_limit
private message_id
private pathElements[]
private pathIndices[]
public x // messageHash

// new/changed
private user_epoch_limit
private user_epoch_quotient // epoch/user_epoch_limit to assert within circuit
public epoch
public rln_identifier

The circuit outputs remain the same.

Additional circuit constraints

  1. Since we accept the epoch, user_epoch_quotient, and user_epoch_limit, +we must ensure that the relation between these 3 values is preserved. I.e.:

    epoch==user_epoch_limituser_epoch_quotientepoch == user\_epoch\_limit * user\_epoch\_quotient
  2. To ensure no overflows/underflows occur in the above multiplication, +we must constrain the inputs of epoch, user_epoch_quotient, and user_epoch_limit. +We have assumed 3600 to be the maximum valid size of the user_epoch_quotient.

size(epoch)64 bitssize(user_epoch_limit)12 bitsuser_epoch_limit3600user_epoch_limitepochuser_epoch_quotient<user_epoch_limitsize(epoch) \leq 64\ bits \\ size(user\_epoch\_limit) \leq 12\ bits \\ user\_epoch\_limit \leq 3600 \\ user\_epoch\_limit \leq epoch \\ user\_epoch\_quotient < user\_epoch\_limit

Modifications to external epoch validation (Waku, etc.)

For receivers of an RLN-v3 proof +to detect if a message is too old, we must use the higher bound of the user_epoch_limit, which has been set to 3600. +The trade-off here is that we allow hour-old messages to propagate within the network.

Modifications to double signaling detection scheme (Waku, etc.)

For verifiers of RLN-v1/v2 proofs, +a log of nullifiers seen in the last epoch is maintained, +and if there is a match with a pre-existing nullifier, +double signaling has been detected and the verifier MAY proceed to slash the spamming user.

With the RLN-v3 scheme, +we need to increase the size of the nullifier log used, +which previously cleared itself every second to the higher bound of the user_epoch_limit, which is 3600. +Now, the RLN proof verifier must clear the nullifier log every 3600 seconds to satisfactorily detect double signaling.

The implementation

An implementation of the RLN-v3 scheme in gnark can be found here.

Comments on performance

  • Hardware: Macbook Air M2, 16GB RAM
  • Circuit: RLN-v3
  • Proving system: Groth16
  • Framework: gnark
  • Elliptic curve: bn254 (aka bn128) (not to be confused with the 254-bit Weierstrass curve)
  • Finite field: Prime-order subgroup of the group of points on the bn254 curve
  • Default Merkle tree height: 20
  • Hashing algorithm: Poseidon
  • Merkle tree: Sparse Indexed Merkle Tree

Proving

The proving time for the RLN-v3 circuit is 90ms for a single proof.

Verification

The verification time for the RLN-v3 circuit is 1.7ms for a single proof.

Conclusion

The RLN-v3 scheme introduces a new epoch-based message rate-limiting scheme to the RLN protocol. +It enhances the user's flexibility in setting their message limits and cost-optimizes their stake.

Future work

  • Implementing the RLN-v3 scheme in Zerokit
  • Implementing the RLN-v3 scheme in Waku
  • Formal security analysis of the RLN-v3 scheme

References

]]>
+ + Aaryamann + +
+ + <![CDATA[Verifying RLN Proofs in Light Clients with Subtrees]]> + https://vac.dev/rlog/rln-light-verifiers + + 2024-05-03T12:00:00.000Z + + How resource-restricted devices can verify RLN proofs fast and efficiently.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

This post expands upon ideas described in the previous post, +focusing on how resource-restricted devices can verify RLN proofs fast and efficiently.

Previously, it was required to fetch all the memberships from the smart contract, +construct the merkle tree locally, +and derive the merkle root, +which is subsequently used to verify RLN proofs.

This process is not feasible for resource-restricted devices since it involves a lot of RPC calls, computation and fault tolerance. +One cannot expect a mobile phone to fetch all the memberships from the smart contract and construct the merkle tree locally.

Constraints and requirements

An alternative solution to the one proposed in this post is to construct the merkle tree on-chain, +and have the root accessible with a single RPC call. +However, this approach increases gas costs for inserting new memberships and may not be feasible until it is optimized further with batching mechanisms, etc.

The other methods have been explored in more depth here.

Following are the requirements and constraints for the solution proposed in this post:

  1. Cheap membership insertions.
  2. As few RPC calls as possible to reduce startup time.
  3. Merkle root of the tree is available on-chain.
  4. No centralized services to sequence membership insertions.
  5. Map inserted commitments to the block in which they were inserted.

Metrics on sync time for a tree with 2,653 leaves

The following metrics are based on the current implementation of RLN in the Waku gen0 network.

Test bench

  • Hardware: Macbook Air M2, 16GB RAM
  • Network: 120 Megabits/sec
  • Nwaku commit: e61e4ff
  • RLN membership set contract: 0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4
  • Deployed block number: 4,230,716
  • RLN Membership set depth: 20
  • Hash function: PoseidonT3 (which is a gas guzzler)
  • Max size of the membership set: 2^20 = 1,048,576 leaves

Metrics

  • Time to sync the whole tree: 4 minutes
  • RPC calls: 702
  • Number of leaves: 2,653

One can argue that the time to sync the tree at the current state is not that bad. +However, the number of RPC calls is a concern, +which scales linearly with the number of blocks since the contract was deployed +This is because the implementation fetches all events from the contract, +chunking 2,000 blocks at a time. +This is done to avoid hitting the block limit of 10,000 events per call, +which is a limitation of popular RPC providers.

Proposed solution

From a theoretical perspective, +one could construct the merkle tree on-chain, +in a view call, in-memory. +However, this is not feasible due to the gas costs associated with it.

To compute the root of a Merkle tree with 2202^{20} leaves it costs approximately 2 billion gas. +With Infura and Alchemy capping the gas limit to 350M and 550M gas respectively, +it is not possible to compute the root of the tree in a single call.

Acknowledging that Polygon Miden and Penumbra both make use of a tiered commitment tree, +we propose a similar approach for RLN.

A tiered commitment tree is a tree which is sharded into multiple smaller subtrees, +each of which is a tree in itself. +This allows scaling in terms of the number of leaves, +as well as reducing state bloat by just storing the root of a subtree when it is full instead of all its leaves.

Here, the question arises: +What is the maximum number of leaves in a subtree with which the root can be computed in a single call?

It costs approximately 217M gas to compute the root of a Merkle tree with 2102^{10} leaves.

This is a feasible number for a single call, +and hence we propose a tiered commitment tree with a maximum of 2102^{10} leaves in a subtree and the number of subtrees is 2102^{10}. +Therefore, the maximum number of leaves in the tree is 2202^{20} (the same as the current implementation).

img

Insertion

When a commitment is inserted into the tree it is first inserted into the first subtree. +When the first subtree is full the next insertions go into the second subtree and so on.

Syncing

When syncing the tree, +one only needs to fetch the roots of the subtrees. +The root of the full tree can be computed in-memory or on-chain.

This allows us to derive the following relation:

number_of_rpc_calls=number_of_filled_subtrees+1number\_of\_rpc\_calls = number\_of\_filled\_subtrees + 1

This is a significant improvement over the current implementation, +which requires fetching all the memberships from the smart contract.

Gas costs

The gas costs for inserting a commitment into the tree are the same as the current implementation except it consists of an extra SSTORE operation to store the shardIndex of the commitment.

Events

The events emitted by the contract are the same as the current implementation, +appending the shardIndex of the commitment.

Proof of concept

A proof of concept implementation of the tiered commitment tree is available here, +and is deployed on Sepolia at 0xE7987c70B54Ff32f0D5CBbAA8c8Fc1cAf632b9A5.

It is compatible with the current implementation of the RLN verifier.

Future work

  1. Optimize the gas costs of the tiered commitment tree.
  2. Explore using different number of leaves under a given node in the tree (currently set to 2).

Conclusion

The tiered commitment tree is a promising approach to reduce the number of RPC calls required to sync the tree and reduce the gas costs associated with computing the root of the tree. +Consequently, it allows for a more scalable and efficient RLN verifier.

References

]]>
+ + Aaryamann + +
+ + <![CDATA[Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku]]> + https://vac.dev/rlog/rln-anonymous-dos-prevention + + 2023-11-07T12:00:00.000Z + + Rate Limiting Nullifiers in practice, applied to an anonymous p2p network, like Waku.

Introduction

Rate Limiting Nullifier (RLN) is a zero-knowledge gadget that allows users to prove 2 pieces of information,

  1. They belong to a permissioned membership set
  2. Their rate of signaling abides by a fixed number that has been previously declared

The "membership set" introduced above, is in the form of a sparse, indexed merkle tree. +This membership set can be maintained on-chain, off-chain or as a hybrid depending on the network's storage costs. +Waku makes use of a hybrid membership set, +where insertions are tracked in a smart contract. +In addition, each Waku node maintains a local copy of the tree, +which is updated upon each insertion.

Users register themselves with a hash of a locally generated secret, +which is then inserted into the tree at the next available index. +After having registered, users can prove their membership by proving their knowledge of the pre-image of the respective leaf in the tree. +The leaf hashes are also referred to as commitments of the respective users. +The actual proof is done by a Merkle Inclusion Proof, which is a type of ZK proof.

The circuit ensures that the user's secret does indeed hash to a leaf in the tree, +and that the provided Merkle proof is valid.

After a User generates this Merkle proof, +they can transmit it to other users, +who can verify the proof. +Including a message's hash within the proof generation, +additionally guarantees integrity of that message.

A malicious user could generate multiple proofs per epoch. +they generate multiple proofs per epoch. +However, when multiple proofs are generated per epoch, +the malicious user's secret is exposed, which strongly disincentivizes this attack. +This mechanism is further described in malicious User secret interpolation mechanism

Note: This blog post describes rln-v1, which excludes the range check in favor of a global rate limit for all users, +which is once per time window. This version is currently in use in waku-rln-relay.

RLN Protocol parameters

Given below is the set of cryptographic primitives, +and constants that are used in the RLN protocol.

  1. Proving System: groth16
  2. Elliptic Curve: bn254 (aka bn128) (not to be confused with the 254 bit Weierstrass curve)
  3. Finite Field: Prime-order subgroup of the group of points on the bn254 curve
  4. Default Merkle Tree Height: 20
  5. Hashing algorithm: Poseidon
  6. Merkle Tree: Sparse Indexed Merkle Tree
  7. Messages per epoch: 1
  8. Epoch duration: 10 seconds

Malicious User secret interpolation mechanism

note: all the parameters mentioned below are elements in the finite field mentioned above.

The private inputs to the circuit are as follows: -

identitySecret: the randomly generated secret of the user
identityPathIndex: the index of the commitment derived from the secret
pathElements: elements included in the path to the index of the commitment

Following are the public inputs to the circuit -

x: hash of the signal to the finite field
rlnIdentifier: application-specific identifier which this proof is being generated for
epoch: the timestamp which this proof is being generated for

The outputs of the circuit are as follows: -

y: result of Shamir's secret sharing calculation
root: root of the Merkle tree obtained after applying the inclusion proof
nullifier: uniquely identifies a message, derived from rlnIdentifier, epoch, and the user's secret

With the above data in mind, following is the circuit pseudocode -

identityCommitment = Poseidon([identitySecret])
root = MerkleInclusionProof(identityCommitment, identityPathIndex, pathElements)
externalNullifier = Poseidon([epoch, rlnIdentifier])
a1 = Poseidon([identitySecret, externalNullifier])
y = identitySecret + a1 * x
nullifier = Poseidon([a1])

To interpolate the secret of a user who has sent multiple signals during the same epoch to the same rln-based application, we may make use of the following formula -

a1=(y1y2)(x1x2)a_1 = {(y_1 - y_2) \over (x_1 - x_2)}

where x1x_1, y1y_1 and x2x_2, y2y_2 are shares from different messages

subsequently, we may use one pair of the shares, x1x_1 and y1y_1 to obtain the identitySecret

identitySecret=y1a1xidentitySecret = y_1 - a_1 * x

This enables RLN to be used for rate limiting with a global limit. For arbitrary limits, +please refer to an article written by @curryrasul, rln-v2.

Waku's problem with DoS

In a decentralized, privacy focused messaging system like Waku, +Denial of Service (DoS) vulnerabilities are very common, and must be addressed to promote network scale and optimal bandwidth utilization.

DoS prevention with user metadata

There are a couple of ways a user can be rate-limited, either -

  1. IP Logging
  2. KYC Logging

Both IP and KYC logging prevent systems from being truly anonymous, and hence, cannot be used as a valid DoS prevention mechanism for Waku.

RLN can be used as an alternative, which provides the best of both worlds, i.e a permissioned membership set, as well as anonymous signaling. +However, we are bound by k-anonymity rules of the membership set.

Waku-RLN-Relay is a libp2p pubsub validator that verifies if a proof attached to a given message is valid. +In case the proof is valid, the message is relayed.

Performance analysis

Test bench specs: AMD EPYC 7502P 32-Core, 4x32GB DDR4 Reg.ECC Memory

This simulation was conducted by @alrevuelta, and is described in more detail here.

The simulation included 100 waku nodes running in parallel.

Proof generation times - +

img

Proof verification times - +

img

A spammer node publishes 3000 msg/epoch, which is detected by all connected nodes, and subsequently disconnect to prevent further spam - +

img

Security analysis

Barbulescu and Duquesne +conclude that that the bn254 curve has only 100 bits of security. +Since the bn254 curve has a small embedding degree, +it is vulnerable to the MOV attack. +However, the MOV attack is only applicable to pairings, +and not to the elliptic curve itself. +It is acceptable to use the bn254 curve for RLN, +since the circuit does not make use of pairings.

An analysis on the number of rounds in the Poseidon hash function was done, +which concluded that the hashing rounds should not be reduced,

The smart contracts have not been audited, and are not recommended for real world deployments yet.

Storage analysis

commitment_size=32 bytestree_height=20total_leaves=220max_tree_size=total_leavescommitment_sizemax_tree_size=22032=33,554,432max_tree_size=33.55 megabytescommitment\_size = 32\ bytes \\ tree\_height =20 \\ total\_leaves = 2^{20} \\ max\_tree\_size = total\_leaves * commitment\_size \\ max\_tree\_size = 2^{20} * 32 = 33,554,432 \\ ∴max\_tree\_size = 33.55\ megabytes

The storage overhead introduced by RLN is minimal. +RLN only requires 34 megabytes of storage, which poses no problem on most end-user hardware, with the exception of IoT/microcontrollers. +Still, we are working on further optimizations allowing proof generation without having to store the full tree.

The bare minimum requirements to run RLN

With proof generation time in sub-second latency, along with low storage overhead for the tree, +it is possible for end users to generate and verify RLN proofs on a modern smartphone.

Following is a demo provided by @rramos that demonstrates +waku-rln-relay used in react native.

Warning: The react native sdk will be deprecated soon, and the above demo should serve as a PoC for RLN on mobiles

RLN usage guide

Zerokit implements api's that allow users to handle operations to the tree, +as well as generate/verify RLN proofs.

Our main implementation of RLN can be accessed via this Rust crate, +which is documented here. +It can used in other langugages via the FFI API, which is documented here. +The usage of RLN in Waku is detailed in our RLN Implementers guide, +which provides step-by-step instructions on how to run Waku-RLN-Relay.

Following is a diagram that will help understand the dependency tree -

rln-dep-tree

Future work

  • Optimizations to zerokit for proof generation time.
  • Incrementing tree depth from 20 to 32, to allow more memberships.
  • Optimizations to the smart contract.
  • An ability to signal validity of a message in different time windows.
  • Usage of proving systems other than Groth16.

References

]]>
+ + Aaryamann + +
+ + <![CDATA[GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks]]> + https://vac.dev/rlog/GossipSub Improvements + + 2023-11-06T12:00:00.000Z + + GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks

Motivitation

We have been recently working on analyzing and improving the performance of the GossipSub protocol for large messages, +as in the case of Ethereum Improvement Proposal EIP-4844. +This work led to a comprehensive study of unstructured P2P networks. +The intention was to identify the best practices that can serve as guidelines for performance improvement and scalability of P2P networks.

Introduction

Nodes in an unstructured p2p network form self-organizing overlay(s) on top of the IP infrastructure to facilitate different services like information dissemination, +query propagation, file sharing, etc. The overlay(s) can be as optimal as a tree-like structure or as enforcing as a fully connected mesh.

Due to peer autonomy and a trustless computing environment, some peers may deviate from the expected operation or even leave the network. +At the same time, the underlying IP layer is unreliable.

Therefore, tree-like overlays are not best suited for reliable information propagation. +Moreover, tree-based solutions usually result in significantly higher message dissemination latency due to suboptimal branches.

Flooding-based solutions, on the other hand, result in maximum resilience against adversaries and achieve minimal message dissemination latency because the message propagates through all (including the optimal) paths. +Redundant transmissions help maintain the integrity and security of the network in the presence of adversaries and high node failure but significantly increase network-wide bandwidth utilization, cramming the bottleneck links.

An efficient alternative is to lower the number of redundant transmissions by D-regular broadcasting, where a peer will likely receive (or relay) a message from up to DD random peers. +Publishing through a D-regular overlay triggers approximately N×DN \times D transmissions. +Reducing DD reduces the redundant transmissions but compromises reachability and latency. +Sharing metadata through a K-regular overlay (where K>DK > D) allows nodes to pull missing messages.

GossipSub [1] benefits from full-message (D-regular) and metadata-only (k-regular) overlays. +Alternatively, a metadata-only overlay can be used, requiring a pull-based operation that significantly minimizes bandwidth utilization at the cost of increased latency.

Striking the right balance between parameters like D,KD, K, pull-based operation, etc., can yield application-specific performance tuning, but scalability remains a problem.

At the same time, many other aspects can significantly contribute to the network's performance and scalability. +One option is to realize peers' suitability and continuously changing capabilities while forming overlays.

For instance, a low-bandwidth link near a publisher can significantly demean the entire network's performance. +Reshuffling of peering links according to the changing network conditions can lead to superior performance.

Laying off additional responsibilities to more capable nodes (super nodes) can alleviate peer cramming, but it makes the network susceptible to adversaries/peer churn. +Grouping multiple super nodes to form virtual node(s) can solve this problem.

Similarly, flat (single-tier) overlays cannot address the routing needs in large (geographically dispersed) networks.

Hierarchical (Multi-tier) overlays with different intra/inter-overlay routing solutions can better address these needs. +Moreover, using message aggregation schemes for grouping multiple messages can save bandwidth and provide better resilience against adversaries/peer churn.

This article's primary objective is to investigate the possible choices that can empower an unstructured P2P network to achieve superior performance for the broadest set of applications. +We look into different constraints imposed by application-specific needs (performance goals) and investigate various choices that can augment the network's performance. +We explore overlay designs/freshness, peer selection approaches, message-relaying mechanisms, and resilience against adversaries/peer churn. +We consider GossipSub a baseline protocol to explore various possibilities and decisively commit to the ones demonstrating superior performance. +We also discuss the current state and, where applicable, propose a strategic plan for embedding new features to the GossipSub protocol.

GOAL1: Low Latency Operation

Different applications, like blockchain, streaming, etc., impose strict time bounds on network-wide message dissemination latency. +A message delivered after the imposed time bounds is considered as dropped. +An early message delivery in applications like live streaming can further enhance the viewing quality.

The properties and nature of the overlay network topology significantly impact the performance of services and applications executed on top of them. +Studying and devising mechanisms for better overlay design and message dissemination is paramount to achieving superior performance.

Interestingly, shortest-path message delivery trees have many limitations:

1) Changing network dynamics requires a quicker and continuous readjustment of the multicast tree. +2) The presence of resource-constrained (bandwidth/compute, etc.) nodes in the overlay can result in congestion. +3) Node failure can result in partitions, making many segments unreachable. +4) Assuring a shortest-path tree-like structure requires a detailed view of the underlying (and continuously changing) network topology.

Solutions involve creating multiple random trees to add redundancy [2]. +Alternatives involve building an overlay mesh and forwarding messages through the multicast delivery tree (eager push).

Metadata is shared through the overlay links so that the nodes can ask for missing messages (lazy push or pull-based operation) through the overlay links. +New nodes are added from the overlay on node failure, but it requires non-faulty node selection.

GossipSub uses eager push (through overlay mesh) and lazy push (through IWANT messages).

The mesh degree DLowDDHighD_{Low} \leq D \leq D_{High} is crucial in deciding message dissemination latency. +A smaller value for DD results in higher latency due to increased rounds, whereas a higher DD reduces latency on the cost of increased bandwidth. +At the same time, keeping DD independent of the growing network size (NN) may increase network-wide message dissemination latency. +Adjusting DD with NN maintains similar latency on the cost of increased workload for peers. +Authors in [3] suggest only a logarithmic increase in DD to maintain a manageable workload for peers. +In [4], it is reported that the average mesh degree should not exceed Davg=ln(N)+CD_{avg} = \ln(N) + C for an optimal operation, +where CC is a small constant.

Moreover, quicker shuffling of peers results in better performance in the presence of resource-constrained nodes or node failure [4].

GOAL2: Considering Heterogeneity In Overlay Design

Random peering connections in P2P overlays represent a stochastic process. It is inherently difficult to precisely model the performance of such systems. +Most of the research on P2P networks provides simulation results assuming nodes with similar capabilities. +The aspect of dissimilar capabilities and resource-constrained nodes is less explored.

It is discussed in GOAL1 that overlay mesh results in better performance if DavgD_{avg} does not exceed ln(N)+C\ln(N) + C. +Enforcing all the nodes to have approximately ln(N)+C\ln(N) + C peers makes resource-rich nodes under-utilized, while resource-constrained nodes are overloaded. +At the same time, connecting high-bandwidth nodes through a low-bandwidth node undermines the network's performance. +Ideally, the workload on any node should not exceed its available resources. +A better solution involves a two-phased operation:

  1. Every node computes its available bandwidth and selects a node degree DD proportional to its available bandwidth [4]. +Different bandwidth estimation approaches are suggested in literature [5,6]. +Simple bandwidth estimation approaches like variable packet size probing [6] yield similar results with less complexity. +It is also worth mentioning that many nodes may want to allocate only a capped share of their bandwidth to the network. +Lowering DD according to the available bandwidth can still prove helpful. +Additionally, bandwidth preservation at the transport layer through approaches like µTP can be useful. +To further conform to the suggested mesh-degree average DavgD_{avg}, every node tries achieving this average within its neighborhood, resulting in an overall similar DavgD_{avg}.

  2. From the available local view, every node tries connecting peers with the lowest latency until DD connections are made. +We suggest referring to the peering solution discussed in GOAL5 to avoid network partitioning.

The current GossipSub design considers homogeneous peers, and every node tries maintaining DLowDDHighD_{Low} \leq D \leq D_{High} connections.

GOAL3: Bandwidth Optimization

Redundant message transmissions are essential for handling adversaries/node failure. However, these transmissions result in traffic bursts, cramming many overlay links. +This not only adds to the network-wide message dissemination latency but a significant share of the network's bandwidth is wasted on (usually) unnecessary transmissions. +It is essential to explore solutions that can minimize the number of redundant transmissions while assuring resilience against node failures.

Many efforts have been made to minimize the impact of redundant transmissions. +These solutions include multicast delivery trees, metadata sharing to enable pull-based operation, in-network information caching, etc. [7,8]. +GossipSub employs a hybrid of eager push (message dissemination through the overlay) and lazy push (a pull-based operation by the nodes requiring information through IWANT messages).

A better alternative to simple redundant transmission is to use message aggregation [9,10,11] for the GossipSub protocol. +As a result, redundant message transmissions can serve as a critical advantage of the GossipSub protocol. +Suppose that we have three equal-length messages x1,x2,x3x1, x2, x3. Assuming an XOR coding function, +we know two trivial properties: x1x2x2=x1x1 \oplus x2 \oplus x2 = x1 and x1=x1x2x2\vert x1 \vert = \vert x1 \oplus x2 \oplus x2 \vert.

This implies that instead of sending messages individually, we can encode and transmit composite message(s) to the network. +The receiver can reconstruct the original message from encoded segments. +As a result, fewer transmissions are sufficient for sending more messages to the network.

However, sharing linear combinations of messages requires organizing messages in intervals, +and devising techniques to identify all messages belonging to each interval. +In addition, combining messages from different publishers requires more complex arrangements, +involving embedding publisher/message IDs, delayed forwarding (to accommodate more messages), and mechanisms to ensure the decoding of messages at all peers. +Careful application-specific need analysis can help decide the benefits against the added complexity.

GOAL4: Handling Large Messages

Many applications require transferring large messages for their successful operation. For instance, database/blockchain transactions [12]. +This introduces two challenges:

1) Redundant large message transmissions result in severe network congestion. +2) Message transmissions follow a store/forward process at all peers, which is inefficient in the case of large messages.

The above-mentioned challenges result in a noticeable increase in message dissemination latency and bandwidth wastage. +Most of the work done for handling large messages involves curtailing redundant transmissions using multicast delivery trees, +reducing the number of fanout nodes, employing in-network message caching, pull-based operation, etc.

Approaches like message aggregation also prove helpful in minimizing bandwidth wastage.

Our recent work on GossipSub improvements (still a work in progress) suggests the following solutions to deal with large message transmissions:

  1. Using IDontWant message proposal [13] and staggered sending.

    IDontWant message helps curtail redundant transmissions by letting other peers know we have already received the message. +Staggered sending enables relaying the message to a short subset of peers in each round. +We argue that simultaneously relaying a message to all peers hampers the effectiveness of the IDontWant message. +Therefore, using the IDontWant message with staggered sending can yield better results by allowing timely reception and processing of IDontWant messages.

  2. Message transmissions follow a store/forward process at all peers that is inefficient in the case of large messages. +We can parallelize message transmission by partitioning large messages into smaller fragments, letting intermediate peers relay these fragments as soon as they receive them.

GOAL5: Scalability

P2P networks are inherently scalable because every incoming node brings in bandwidth and compute resources. +In other words, we can keep adding nodes to the network as long as every incoming node brings at-least R×DR \times D bandwidth, +where RR is average data arrival rate. +It is worth mentioning that network-wide message dissemination requires at-least logD(N)\lceil \log_D (N) \rceil hops. +Therefore, increasing network size increases message dissemination latency, assuming D is independent of the network size.

Additionally, problems like peer churn, adversaries, heterogeneity, distributed operation, etc., significantly hamper the network's performance. +Most efforts for bringing scalability to the P2P systems have focused on curtailing redundant transmissions and flat overlay adjustments. +Hierarchical overlay designs, on the other hand, are less explored.

Placing a logical structure in unstructured P2P systems can help scale P2P networks.

One possible solution is to use a hierarchical overlay inspired by the approaches [14,15,16]. +An abstract operation of such overlay design is provided below:

  1. Clustering nodes based on locality, assuming that such peers will have relatively lower intra-cluster latency and higher bandwidth. +For this purpose, every node tries connecting peers with the lowest latency until DD connections are made or the cluster limit is reached.

  2. A small subset of nodes having the highest bandwidth and compute resources is selected from each cluster. +These super nodes form a fully connected mesh and jointly act as a virtual node, +mitigating the problem of peer churn among super nodes.

  3. Virtual nodes form a fully connected mesh to construct a hierarchical overlay. +Each virtual node is essentially a collection of super nodes; +a link to any of the constituent super nodes represents a link to the virtual node.

  4. One possible idea is to use GossipSub for intra-cluster message dissemination and FloodSub for inter-cluster message dissemination.

Summary

Overlay acts as a virtual backbone for a P2P network. A flat overlay is more straightforward and allows effortless readjustment to application needs. +On the other hand, a hierarchical overlay can bring scalability at the cost of increased complexity. +Regardless of the overlay design, a continuous readjustment to appropriate peering links is essential for superior performance. +At the same time, bandwidth preservation (through message aggregation, caching at strategic locations, metadata sharing, pull-based operation, etc.) can help minimize latency. +However, problems like peer churn and in-network adversaries can be best alleviated through balanced redundant coverage, and frequent reshuffling of the peering links.

References

  • [1] D. Vyzovitis, Y. Napora, D. McCormick, D. Dias, and Y. Psaras, “Gossipsub: Attack-resilient message propagation in the filecoin and eth2. 0 networks,” arXiv preprint arXiv:2007.02754, 2020. Retrieved from https://arxiv.org/pdf/2007.02754.pdf
  • [2] M. Matos, V. Schiavoni, P. Felber, R. Oliveira, and E. Riviere, “Brisa: Combining efficiency and reliability in epidemic data dissemination,” in 2012 IEEE 26th International Parallel and Distributed Processing Symposium. IEEE, 2012, pp. 983–994. Retrieved from https://ieeexplore.ieee.org/abstract/document/6267905
  • [3] P. T. Eugster, R. Guerraoui, A. M. Kermarrec, and L. Massouli, “Epidemic information dissemination in distributed systems,” IEEE Computer, vol. 37, no. 5, 2004. Retrieved from https://infoscience.epfl.ch/record/83478/files/EugGueKerMas04IEEEComp.pdf
  • [4] D. Frey, “Epidemic protocols: From large scale to big data,” Ph.D. dissertation, Universite De Rennes 1, 2019. Retrieved from https://inria.hal.science/tel-02375909/document
  • [5] M. Jain and C. Dovrolis, “End-to-end available bandwidth: measurement methodology, dynamics, and relation with tcp throughput,” IEEE/ACM Transactions on networking, vol. 11, no. 4, pp. 537–549, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1224454
  • [6] R. Prasad, C. Dovrolis, M. Murray, and K. Claffy, “Bandwidth estimation: metrics, measurement techniques, and tools,” IEEE network, vol. 17, no. 6, pp. 27–35, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1248658
  • [7] D. Kostic, A. Rodriguez, J. Albrecht, and A. Vahdat, “Bullet: High bandwidth data dissemination using an overlay mesh,” in Proceedings of the nineteenth ACM symposium on Operating systems principles, 2003, pp. 282–297. Retrieved from https://dl.acm.org/doi/abs/10.1145/945445.945473
  • [8] V. Pai, K. Kumar, K. Tamilmani, V. Sambamurthy, and A. E. Mohr, “Chainsaw: Eliminating trees from overlay multicast,” in Peer-to-Peer Systems IV: 4th International Workshop, IPTPS 2005, Ithaca, NY, USA, February 24-25, 2005. Revised Selected Papers 4. Springer, 2005, pp. 127–140. Retrieved from https://link.springer.com/chapter/10.1007/11558989_12
  • [9] Y.-D. Bromberg, Q. Dufour, and D. Frey, “Multisource rumor spreading with network coding,” in IEEE INFOCOM 2019-IEEE Conference on Computer Communications. IEEE, 2019, pp. 2359–2367. Retrieved from https://ieeexplore.ieee.org/abstract/document/8737576
  • [10] B. Haeupler, “Analyzing network coding gossip made easy,” in Proceedings of the forty-third annual ACM symposium on Theory of computing, 2011, pp. 293–302. Retrieved from https://dl.acm.org/doi/abs/10.1145/1993636.1993676
  • [11] S. Yu and Z. Li, “Massive data delivery in unstructured peer-to-peer networks with network coding,” in 6th IEEE/ACIS International Conference on Computer and Information Science (ICIS 2007). IEEE, 2007, pp. 592–597. Retrieved from https://ieeexplore.ieee.org/abstract/document/4276446
  • [12] V. Buterin, D. Feist, D. Loerakker, G. Kadianakis, M. Garnett, M. Taiwo, and A. Dietrichs, “Eip-4844: Shard blob transactions scale data-availability of ethereum in a simple, forwards-compatible manner,” 2022. Retrieved from https://eips.ethereum.org/EIPS/eip-4844
  • [13] A. Manning, “Gossipsub extension for epidemic meshes (v1.2.0),” 2022. Retrieved from https://github.com/libp2p/specs/pull/413
  • [14] Z. Duan, C. Tian, M. Zhou, X. Wang, N. Zhang, H. Du, and L. Wang, “Two-layer hybrid peer-to-peer networks,” Peer-to-Peer Networking and Applications, vol. 10, pp. 1304–1322, 2017. Retrieved from https://link.springer.com/article/10.1007/s12083-016-0460-5
  • [15] W. Hao, J. Zeng, X. Dai, J. Xiao, Q. Hua, H. Chen, K.-C. Li, and H. Jin, “Blockp2p: Enabling fast blockchain broadcast with scalable peer-to-peer network topology,” in Green, Pervasive, and Cloud Computing: 14th International Conference, GPC 2019, Uberlandia, Brazil, May 26–28, 2019, Proceedings 14. Springer, 2019, pp. 223–237. Retrieved from https://link.springer.com/chapter/10.1007/978-3-030-19223-5_16
  • [16] H. Qiu, T. Ji, S. Zhao, X. Chen, J. Qi, H. Cui, and S. Wang, “A geography-based p2p overlay network for fast and robust blockchain systems,” IEEE Transactions on Services Computing, 2022. Retrieved from https://ieeexplore.ieee.org/abstract/document/9826458
]]>
+ + Umar Farooq + +
+ + <![CDATA[Nescience - A zkVM leveraging hiding properties]]> + https://vac.dev/rlog/Nescience-A-zkVM-leveraging-hiding-properties + + 2023-08-28T12:00:00.000Z + + Nescience, a privacy-first blockchain zkVM.

Introduction

Nescience is a privacy-first blockchain project that aims to enable private transactions and provide a general-purpose execution environment for classical applications. +The goals include creating a state separation architecture for public/private computation, +designing a versatile virtual machine based on mainstream instruction sets, +creating proofs for private state updates, implementing a kernel-based architecture for correct execution of private functions, +and implementing core DeFi protocols such as AMMs and staking from a privacy perspective.

It intends to create a user experience that is similar to public blockchains, but with additional privacy features that users can leverage at will. +To achieve this goal, Nescience will implement a versatile virtual machine that can be used to implement existing blockchain applications, +while also enabling the development of privacy-centric protocols such as private staking and private DEXs.

To ensure minimal trust assumptions and prevent information leakage, Nescience proposes a proof system that allows users to create proofs for private state updates, +while the verification of the proofs and the execution of the public functions inside the virtual machine can be delegated to an external incentivised prover.

It also aims to implement a seamless interaction between public and private state, enabling composability between contracts, and private and public functions. +Finally, Nescience intends to implement permissive licensing, which means that the source code will be open-source, +and developers will be able to use and modify the code without any restriction.

Our primary objective is the construction of the Zero-Knowledge Virtual Machine (zkVM). This document serves as a detailed exploration of the multifaceted challenges, +potential solutions, and alternatives that lay ahead. Each step is a testament to our commitment to thoroughness; +we systematically test various possibilities and decisively commit to the one that demonstrates paramount performance and utility. +For instance, as we progress towards achieving Goal 2, we are undertaking a rigorous benchmarking of the Nova proof system against its contemporaries. +Should Nova showcase superior performance metrics, we stand ready to integrate it as our proof system of choice. Through such meticulous approaches, +we not only reinforce the foundation of our project but also ensure its scalability and robustness in the ever-evolving landscape of blockchain technology.

Goal 1: Create a State Separation Architecture

The initial goal revolves around crafting a distinctive architecture that segregates public and private computations, +employing an account-based framework for the public state and a UTXO-based structure for the private state.

The UTXO model [1,2], notably utilized in Bitcoin, generates new UTXOs to serve future transactions, +while the account-based paradigm assigns balances to accounts that transactions can modify. +Although the UTXO model bolsters privacy by concealing comprehensive balances, +the pursuit of a dual architecture mandates a meticulous synchronization of these state models, +ensuring that private transactions remain inconspicuous in the wider public network state.

This task is further complicated by the divergent transaction processing methods intrinsic to each model, +necessitating a thoughtful and innovative approach to harmonize their functionality. +To seamlessly bring together the dual architecture, harmonizing the account-based model for public state with the UTXO-based model for private state, +a comprehensive strategy is essential.

The concept of blending an account-based structure with a UTXO-based model for differentiating between public and private states is intriguing. +It seeks to leverage the strengths of both models: the simplicity and directness of the account-based model with the privacy enhancements of the UTXO model.

Here's a breakdown and a potential strategy for harmonizing these models:

Rationale Behind the Dual Architecture:

  • Account-Based Model: This model is intuitive and easy to work with. Every participant has an account, +and transactions directly modify the balances of these accounts. It's conducive for smart contracts and a broad range of applications.

  • UTXO-Based Model: This model treats every transaction as a new output, which can then be used as an input for future transactions. +By not explicitly associating transaction outputs with user identities, it offers a degree of privacy.

Harmonizing the Two Systems:

  1. Translation Layer

    • Role: Interface between UTXO and account-based states.

    • UTXO-to-Account Adapter: When UTXOs are spent, the adapter can translate these into the corresponding account balance modifications. +This could involve creating a temporary 'pseudo-account' that mirrors the +UTXO's attributes.

    • Account-to-UTXO Adapter: When an account wishes to make a private transaction, +it would initiate a process converting a part of its balance to a UTXO, facilitating a privacy transaction.

  2. Unified Identity Management

    • Role: Maintain a unified identity (or address) system that works across both state models, +allowing users to easily manage their public and private states without requiring separate identities.

    • Deterministic Wallets: Use Hierarchical Deterministic (HD) wallets [3,4], enabling users to generate multiple addresses (both UTXO and account-based) from a single seed. +This ensures privacy while keeping management centralized for the user.

  1. State Commitments

    • Role: Use cryptographic commitments to commit to the state of both models. This can help in efficiently validating cross-model transactions.

    • Verkle Trees: Verkle Trees combine Vector Commitment and the KZG polynomial commitment scheme to produce a structure that's efficient in terms of both proofs and verification. +Verkle proofs are considerably small in size (less data to store and transmit), where Transaction and state verifications can be faster due to the smaller proof sizes and computational efficiencies.

    • Mimblewimble-style Aggregation [5]: For UTXOs, techniques similar to those used in Mimblewimble can be used to aggregate transactions, keeping the state compact and enhancing privacy.

  1. Batch Processing & Anonymity Sets

    • Role: Group several UTXO-based private transactions into a single public account-based transaction. +This can provide a level of obfuscation and can make synchronization between the two models more efficient.

    • CoinJoin Technique [6]: As seen in Bitcoin, multiple users can combine their UTXO transactions into one, enhancing privacy.

    • Tornado Cash Principle [7]: For account-based systems wanting to achieve privacy, methods like those used in Tornado Cash can be implemented, +providing zk-SNARKs-based private transactions.

  2. Event Hooks & Smart Contracts

    • Role: Implement event-driven mechanisms that trigger specific actions in one model based on events in the other. +For instance, a private transaction (UTXO-based) can trigger a corresponding public notification or event in the account-based model.

    • Conditional Execution: Smart contracts could be set to execute based on events in the UTXO system. For instance, +a smart contract might release funds (account-based) once a specific UTXO is spent.

    • Privacy Smart Contracts: Using zk-SNARKs or zk-STARKs to bring privacy to the smart contract layer, +allowing for private logic execution.

Challenges and Solutions

  1. Synchronization Overhead

    • Challenge: Combining two distinct transaction models creates an inherent synchronization challenge.

    • State Channels: By allowing transactions to be conducted off-chain between participants, state channels can alleviate synchronization stresses. +Only the final state needs to be settled on-chain, drastically reducing the amount of data and frequency of updates required.

    • Sidechains: These act as auxiliary chains to the main blockchain. Transactions can be processed on the sidechain and then periodically synced with the main chain. +This structure helps reduce the immediate load on the primary system.

    • Checkpointing: Introduce periodic checkpoints where the two systems' states are verified and harmonized. +This can ensure consistency without constant synchronization.

  2. Double Spending

    • Challenge: With two models operating in tandem, there's an increased risk of double-spending attacks.

    • Multi-Signature Transactions: Implementing transactions that require signatures from both systems can prevent unauthorized movements.

    • Cross-Verification Mechanisms: Before finalizing a transaction, it undergoes verification in both UTXO and account-based systems. +If discrepancies arise, the transaction can be halted.

    • Timestamping: By attaching a timestamp to each transaction, it's possible to order them sequentially, making it easier to spot and prevent double spending.

  3. Complexity in User Experience

    • Challenge: The dual model, while powerful, is inherently complex.

    • Abstracted User Interfaces: Design UIs that handle the complexity behind the scenes, +allowing users to make transactions without needing to understand the nuances of the dual model.

    • Guided Tutorials: Offer onboarding tutorials to acquaint users with the system's features, +especially emphasizing when and why they might choose one transaction type over the other.

    • Feedback Systems: Implement systems where users can provide feedback on any complexities or challenges they encounter. +This real-time feedback can be invaluable for iterative design improvements.

  4. Security

    • Challenge: Merging two systems can introduce unforeseen vulnerabilities.

    • Threat Modeling: Regularly conduct threat modeling exercises to anticipate potential attack vectors, +especially those that might exploit the interaction between the two systems.

    • Layered Security Protocols: Beyond regular audits, introduce multiple layers of security checks. +Each layer can act as a fail-safe if a potential threat bypasses another.

    • Decentralized Watchtowers: These are third-party services that monitor the network for malicious activities. +If any suspicious activity is detected, they can take corrective measures or raise alerts.

  5. Gas & Fee Management:

    • Challenge: A dual model can lead to convoluted fee structures.

    • Dynamic Fee Adjustment: Implement algorithms that adjust fees based on network congestion and transaction type. +This can ensure fairness and prevent network abuse.

    • Fee Estimation Tools: Provide tools that can estimate fees before a transaction is initiated. +This helps users understand potential costs upfront.

    • Unified Gas Stations: Design platforms where users can purchase or allocate gas for both transaction types simultaneously, +simplifying the gas acquisition process.

By addressing these challenges head-on with a detailed and systematic approach, it's possible to unlock the full potential of a dual-architecture system, +combining the strengths of both UTXO and account-based models without their standalone limitations.

AspectDetails
Harmony- Advanced VM Development: Design tailored for private smart contracts. - Leverage Established Architectures: Use WASM or RISC-V to harness their versatile and encompassing nature suitable for zero-knowledge applications. - Support for UTXO & Account-Based Models: Enhance adaptability across various blockchain structures.
Challenges- Adaptation Concerns: WASM and RISC-V weren't designed with zero-knowledge proofs as a primary focus, posing integration challenges. - Complexities with Newer Systems: Systems like (Super)Nova, STARKs, and Sangria are relatively nascent, adding another layer of intricacy to the integration. - Optimization Concerns: Ensuring that these systems are optimized for zero-knowledge proofs.
Proposed Solutions- Integration of Nova: Consider Nova's proof system for its potential alignment with project goals. - Comprehensive Testing: Rigorously test and benchmark against alternatives like Halo2, Plonky, and Starky to validate choices. - Poseidon Recursion Technique: To conduct exhaustive performance tests, providing insights into each system's efficiency and scalability.

Goal 2: Virtual Machine Creation

The second goal entails the creation of an advanced virtual machine by leveraging established mainstream instruction sets like WASM or RISC-V. +Alternatively, the objective involves pioneering a new, specialized instruction set meticulously optimized for Zero-Knowledge applications.

This initiative seeks to foster a versatile and efficient environment for executing computations within the privacy-focused context of the project. +Both WASM and RISC-V exhibit adaptability to both UTXO and account-based models due to their encompassing nature as general-purpose instruction set architectures.

WASM, operating as a low-level virtual machine, possesses the capacity to execute code derived from a myriad of high-level programming languages, +and boasts seamless integration across diverse blockchain platforms.

Meanwhile, RISC-V emerges as a versatile option, accommodating both models, and can be seamlessly integrated with secure enclaves like SGX or TEE, +elevating the levels of security and privacy. However, it is crucial to acknowledge that employing WASM or RISC-V might present challenges, +given their original design without specific emphasis on optimizing for Zero-Knowledge Proofs (ZKPs).

Further complexity arises with the consideration of more potent proof systems like (Super)Nova, STARKs, and Sangria, which, +while potentially addressing optimization concerns, necessitate extensive research and testing due to their relatively nascent status within the field. +This accentuates the need for a judicious balance between established options and innovative solutions in pursuit of an architecture harmoniously amalgamating privacy, security, and performance.

The ambition to build a powerful virtual machine tailored to zero-knowledge (ZK) applications is both commendable and intricate. +The combination of two renowned instruction sets, WASM and RISC-V, in tandem with ZK, is an innovation that could redefine privacy standards in blockchain. +Let's dissect the challenges and possibilities inherent in this goal:

  1. Established Mainstream Instruction Sets - WASM and RISC-V

    • Strengths:

      • WASM: Rooted in its ability to execute diverse high-level language codes, its potential for cross-chain compatibility makes it a formidable contender. +Serving as a low-level virtual machine, its role in the blockchain realm is analogous to that of the Java Virtual Machine in the traditional computing landscape.

      • RISC-V: This open-standard instruction set architecture has made waves due to its customizable nature. +Its adaptability to both UTXO and account-based structures coupled with its compatibility with trusted execution environments like SGX and TEE augments its appeal, +especially in domains that prioritize security and privacy.

    • Challenges: Neither WASM nor RISC-V was primarily designed with ZKPs in mind. While they offer flexibility, +they might lack the necessary optimizations for ZK-centric tasks. Adjustments to these architectures might demand intensive R&D efforts.

  1. Pioneering a New, Specialized Instruction Set

    • Strengths: A bespoke instruction set can be meticulously designed from the ground up with ZK in focus, +potentially offering unmatched performance and optimizations tailored to the project's requirements.

    • Challenges: Crafting a new instruction set is a monumental task requiring vast resources, including expertise, time, and capital. +It would also need to garner community trust and support over time.

  1. Contemporary Proof Systems - (Super)Nova, STARKs, Sangria

    • Strengths: These cutting-edge systems, being relatively new, might offer breakthrough cryptographic efficiencies that older systems lack: designed with modern challenges in mind, +they could potentially bridge the gap where WASM and RISC-V might falter in terms of ZKP optimization.

    • Challenges: Their nascent nature implies a dearth of exhaustive testing, peer reviews, and potentially limited community support. +The unknowns associated with these systems could introduce unforeseen vulnerabilities or complexities. +While they could offer optimizations that address challenges presented by WASM and RISC-V, their young status demands rigorous vetting and testing.

Mainstream (WASM, RISC-V)ZK-optimized (New Instruction Set)
Existing ToolingYESNO
Blockchain-focusedNOYES
PerformantDEPENDSYES

Optimization Concerns for WASM and RISC-V:

  • Cryptography Libraries: ZKP applications rely heavily on specific cryptographic primitives. Neither WASM nor RISC-V natively supports all of these primitives. +Thus, a comprehensive library of cryptographic functions, optimized for these platforms, needs to be developed.

  • Parallel Execution: Given the heavy computational demands of ZKPs, leveraging parallel processing capabilities can optimize the time taken. +Both WASM and RISC-V would need modifications to handle parallel execution of ZKP processes efficiently.

  • Memory Management: ZKP computations can sometimes require significant amounts of memory, especially during the proof generation phase. +Fine-tuned memory management mechanisms are essential to prevent bottlenecks.

Emerging ZKP Optimized Systems Considerations:

  • Proof Size: Different systems generate proofs of varying sizes. A smaller proof size is preferable for blockchain applications to save on storage and bandwidth. +The trade-offs between proof size, computational efficiency, and security need to be balanced.

  • Universality: Some systems can support any computational statement (universal), while others might be tailored to specific tasks. +A universal system can be more versatile for diverse applications on the blockchain.

  • Setup Requirements: Certain ZKP systems, like zk-SNARKs, require a trusted setup, which can be a security concern. +Alternatives like zk-STARKs don't have this requirement but come with other trade-offs.

Strategies for Integration:

  • Iterative Development: Given the complexities, an iterative development approach can be beneficial. +Start with a basic integration of WASM or RISC-V for general tasks and gradually introduce specialized ZKP functionalities.

  • Benchmarking: Establish benchmark tests specifically for ZKP operations. This will provide continuous feedback on the performance of the system as modifications are made, ensuring optimization.

  • External Audits & Research: Regular checks from cryptographic experts and collaboration with academic researchers can help in staying updated and ensuring secure implementations.

Goal 3: Proofs Creation and Verification

The process of generating proofs for private state updates is vested in the hands of the user, aligning with our commitment to minimizing trust assumptions and enhancing privacy. +Concurrently, the responsibility of verifying these proofs and executing public functions within the virtual machine can be effectively delegated to an external prover, +a role that is incentivized to operate with utmost honesty and integrity. This intricate balance seeks to safeguard against information leakage, +preserving the confidentiality of private transactions. Integral to this mechanism is the establishment of a robust incentivization framework.

To ensure the prover’s steadfast commitment to performing tasks with honesty, we should introduce a mechanism that facilitates both rewards for sincere behavior and penalties for any deviation from the expected standards. +This two-pronged approach serves as a compelling deterrent against dishonest behavior and fosters an environment of accountability. +In addition to incentivization, a crucial consideration is the economic aspect of verification and execution. +The verification process has been intentionally designed to be more cost-effective than execution.

This strategic approach prevents potential malicious actors from exploiting the system by flooding it with spurious proofs, a scenario that could arise when the costs align favorably. +By maintaining a cost balance that favors verification, we bolster the system’s resilience against fraudulent activities while ensuring its efficiency. +In sum, our multifaceted approach endeavors to strike an intricate equilibrium between user-initiated proof creation, external verification, and incentivization. +This delicate interplay of mechanisms ensures a level of trustworthiness that hinges on transparency, accountability, and economic viability.

As a result, we are poised to cultivate an ecosystem where users’ privacy is preserved, incentives are aligned, +and the overall integrity of the system is fortified against potential adversarial actions. To achieve the goals of user-initiated proof creation, +external verification, incentivization, and cost-effective verification over execution, several options and mechanisms can be employed:

  1. User-Initiated Proof Creation: Users are entrusted with the generation of proofs for private state updates, thus ensuring greater privacy and reducing trust dependencies.

    • Challenges:

      • Maintaining the quality and integrity of the proofs generated by users.

      • Ensuring that users have the tools and knowledge to produce valid proofs.

    • Solutions:

      • Offer extensive documentation, tutorials, and user-friendly tools to streamline the proof-generation process.

      • Implement checks at the verifier's end to ensure the quality of proofs.

  1. External Verification by Provers: An external prover verifies the proofs and executes public functions within the virtual machine.

    • Challenges:

      • Ensuring that the external prover acts honestly.

      • Avoiding centralized points of failure.

    • Solutions:

      • Adopt a decentralized verification approach, with multiple provers cross-verifying each other’s work.

      • Use reputation systems to rank provers based on their past performances, creating a trust hierarchy.

  2. Incentivization Framework: A system that rewards honesty and penalizes dishonest actions, ensuring provers' commitment to the task.

    • Challenges:

      • Determining the right balance of rewards and penalties.

      • Ensuring that the system cannot be gamed for undue advantage.

    • Solutions1:

      • Implement a dynamic reward system that adjusts based on network metrics and provers' performance.

      • Use a staking mechanism where provers need to lock up a certain amount of assets. +Honest behavior earns rewards, while dishonest behavior could lead to loss of staked assets.

  3. Economic Viability through Cost Dynamics: Making verification more cost-effective than execution to deter spamming and malicious attacks.

    • Challenges:

      • Setting the right cost metrics for both verification and execution.

      • Ensuring that genuine users aren’t priced out of the system.

    • Solutions:

      • Use a dynamic pricing model, adjusting costs in real-time based on network demand.

      • Implement gas-like mechanisms to differentiate operation costs and ensure fairness.

  4. Maintaining Trustworthiness: Create a system that's transparent, holds all actors accountable, and is economically sound.

    • Challenges:

      • Keeping the balance where users feel their privacy is intact, while provers feel incentivized.

      • Ensuring the system remains resilient against adversarial attacks.

    • Solutions:

      • Implement layered checks and balances.

      • Foster community involvement, allowing them to participate in decision-making, potentially through a decentralized autonomous organization (DAO).

Each of these options can be combined or customized to suit the specific requirements of your project, striking a balance between user incentives, +cost dynamics, and verification integrity. A thoughtful combination of these mechanisms ensures that the system remains robust, resilient, +and conducive to the objectives of user-initiated proof creation, incentivized verification, and cost- effective validation.

AspectDetails
Design Principle- User Responsibility: Generating proofs for private state updates. - External Prover: Delegated the task of verifying proofs and executing public VM functions.
Trust & Privacy- Minimized Trust Assumptions: Place proof generation in users' hands. - Enhanced Privacy: Ensure confidentiality of private transactions and prevent information leakage.
Incentivization Framework- Rewards: Compensate honest behavior. - Penalties: Deter and penalize dishonest behavior.
Economic Considerations- Verification vs. Execution: Make verification more cost-effective than execution to prevent spurious proofs flooding. - Cost Balance: Strengthen resilience against fraudulent activities and maintain efficiency.
OutcomeAn ecosystem where: - Users' privacy is paramount. - Incentives are appropriately aligned. - The system is robust against adversarial actions.

Goal 4: Kernel-based Architecture Implementation

This goal centers on the establishment of a kernel-based architecture, akin to the model observed in ZEXE, to facilitate the attestation of accurate private function executions. +This innovative approach employs recursion to construct a call stack, which is then validated through iterative recursive computations. +At its core, this technique harnesses a recursive Succinct Non-Interactive Argument of Knowledge (SNARK) mechanism, where each function call’s proof accumulates within the call stack.

The subsequent verification of this stack’s authenticity leverages recursive SNARK validation. +While this method offers robust verification of private function executions, it’s essential to acknowledge its associated intricacies.

The generation of SNARK proofs necessitates a substantial computational effort, which, in turn, may lead to elevated gas fees for users. +Moreover, the iterative recursive computations could potentially exhibit computational expansion as the depth of recursion increases. +This calls for a meticulous balance between the benefits of recursive verification and the resource implications it may entail.

In essence, Goal 4 embodies a pursuit of enhanced verification accuracy through a kernel-based architecture. +By weaving recursion and iterative recursive computations into the fabric of our system, we aim to establish a mechanism that accentuates the trustworthiness of private function executions, +while conscientiously navigating the computational demands that ensue.

To accomplish the goal of implementing a kernel-based architecture for recursive verification of private function executions, +several strategic steps and considerations can be undertaken: recursion handling and depth management.

Recursion Handling
  • Call Stack Management:

    • Implement a data structure to manage the call stack, recording each recursive function call’s details, parameters, and state.
  • Proof Accumulation:

    • Design a mechanism to accumulate proof data for each function call within the call stack. +This includes cryptographic commitments, intermediate results, and cryptographic challenges.

    • Ensure that the accumulated proof data remains secure and tamper-resistant throughout the recursion process.

  • Intermediary SNARK Proofs:

    • Develop an intermediary SNARK proof for each function call’s correctness within the call stack. +This proof should demonstrate that the function executed correctly and produced expected outputs.

    • Ensure that the intermediary SNARK proof for each recursive call can be aggregated and verified together, maintaining the integrity of the entire call stack.

Depth management
  • Depth Limitation:

    • Define a threshold for the maximum allowable recursion depth based on the system’s computational capacity, gas limitations, and performance considerations.

    • Implement a mechanism to prevent further recursion beyond the defined depth limit, safeguarding against excessive computational growth.

  • Graceful Degradation:

    • Design a strategy for graceful degradation when the recursion depth approaches or reaches the defined limit. +This may involve transitioning to alternative execution modes or optimization techniques.

    • Communicate the degradation strategy to users and ensure that the system gracefully handles scenarios where recursion must be curtailed.

  • Resource Monitoring:

    • Develop tools to monitor resource consumption (such as gas usage and computational time) as recursion progresses. +Provide real-time feedback to users about the cost and impact of recursive execution.
  • Dynamic Depth Adjustment:

    • Consider implementing adaptive depth management that dynamically adjusts the recursion depth based on network conditions, transaction fees, and available resources.

    • Utilize algorithms to assess the optimal recursion depth for efficient execution while adhering to gas cost constraints.

  • Fallback Mechanisms:

    • Create fallback mechanisms that activate if the recursion depth limit is reached or if the system encounters resource constraints. +These mechanisms could involve alternative verification methods or delayed execution.
  • User Notifications:

    • Notify users when the recursion depth limit is approaching, enabling them to make informed decisions about the complexity of their transactions and potential resource usage.

Goal 4 underscores the project's ambition to integrate the merits of a kernel-based architecture with recursive verifications to bolster the reliability of private function executions. +While the approach promises robust outcomes, it's pivotal to maneuver through its intricacies with astute strategies, ensuring computational efficiency and economic viability. +By striking this balance, the architecture can realize its full potential in ensuring trustworthy and efficient private function executions.

Goal 5: Seamless Interaction Design

Goal 5 revolves around the meticulous design of a seamless interaction between public and private states within the blockchain ecosystem. +This objective envisions achieving not only composability between contracts but also the harmonious integration of private and public functions.

A notable challenge in this endeavor lies in the intricate interplay between public and private states, +wherein the potential linkage of a private transaction to a public one raises concerns about unintended information leakage.

The essence of this goal entails crafting an architecture that facilitates the dynamic interaction of different states while ensuring that the privacy and confidentiality of private transactions remain unbreached. +This involves the formulation of mechanisms that enable secure composability between contracts, guaranteeing the integrity of interactions across different layers of functionality.

A key focus of this goal is to surmount the challenge of information leakage by implementing robust safeguards. +The solution involves devising strategies to mitigate the risk of revealing private transaction details when connected to corresponding public actions. +By creating a nuanced framework that com- partmentalizes private and public interactions, the architecture aims to uphold privacy while facilitating seamless interoperability.

Goal 5 encapsulates a multifaceted undertaking, calling for the creation of an intricate yet transparent framework that empowers users to confidently engage in both public and private functions, +without compromising the confidentiality of private transactions. The successful realization of this vision hinges on a delicate blend of architectural ingenuity, cryptographic sophistication, and user-centric design.

To achieve seamless interaction between public and private states, composability, and privacy preservation, a combination of solutions and approaches can be employed. +In the table below, a comprehensive list of solutions that address these objectives:

Solution CategoryDescription
Layer 2 SolutionsEmploy zk-Rollups, Optimistic Rollups, and state channels to handle private interactions off-chain and settle them on-chain periodically. Boost scalability and cut transaction costs.
Intermediary Smart ContractsCraft smart contracts as intermediaries for secure public-private interactions. Use these to manage data exchange confidentially.
Decentralized Identity & PseudonymityImplement decentralized identity systems for pseudonymous interactions. Validate identity using cryptographic proofs.
Confidential Sidechains & Cross-ChainSet up confidential sidechains and employ cross-chain protocols to ensure private and composability across blockchains.
Temporal Data StructuresCreate chronological data structures for secure interactions. Utilize cryptographic methods for data integrity and privacy.
Homomorphic Encryption & MPCApply homomorphic encryption and MPC for computations on encrypted data and interactions between state layers.
Commit-Reveal SchemesIntroduce commit-reveal mechanisms for private transactions, revealing data only post necessary public actions.
Auditability & VerifiabilityUse on-chain tools for auditing and verifying interactions. Utilize cryptographic commitments for third-party validation.
Data Fragmentation & ShardingFragment data across shards for private interactions and curtailed data exposure. Bridge shards securely with cryptography.
Ring Signatures & CoinJoinIncorporate ring signatures and CoinJoin protocols to mask transaction details and mix transactions collaboratively.

Goal 6: Integration of DeFi Protocols with a Privacy-Preserving Framework

The primary aim of Goal 6 is to weave key DeFi protocols, such as AMMs and staking, into a user-centric environment that accentuates privacy. +This endeavor comes with inherent challenges, especially considering the heterogeneity of existing DeFi protocols, predominantly built on Ethereum. +These variations in programming languages and VMs exacerbate the quest for interoperability. Furthermore, the success and functionality of DeFi protocols is closely tied to liquidity, +which in turn is influenced by user engagement and the amount of funds locked into the system.

Strategic Roadmap for Goal 6

  1. Pioneering Privacy-Centric DeFi Models: Initiate the development of AMMs and staking solutions that are inherently protective of users' transactional privacy and identity.

  2. Specialized Smart Contracts with Privacy: Architect distinct smart contracts infused with privacy elements, setting the stage for secure user interactions within this new, confidential DeFi landscape.

  3. Optimized User Interfaces: Craft interfaces that resonate with user needs, simplifying the journey through the private DeFi space without compromising on security.

  4. Tackling Interoperability:

    • Deploy advanced bridge technologies and middleware tools to foster efficient data exchanges and guarantee operational harmony across a spectrum of programming paradigms and virtual environments.

    • Design and enforce universal communication guidelines that bridge the privacy-centric DeFi entities with the larger DeFi world seamlessly.

  1. Enhancing and Sustaining Liquidity:

    • Unveil innovative liquidity stimuli and yield farming incentives, compelling users to infuse liquidity into the private DeFi space.

    • Incorporate adaptive liquidity frameworks that continually adjust based on the evolving market demands, ensuring consistent liquidity.

    • Forge robust alliances with other DeFi stalwarts, jointly maximizing liquidity stores and honing sustainable token distribution strategies.

  1. Amplifying Community Engagement: Design and roll out enticing incentive schemes to rally users behind privacy-focused AMMs and staking systems, +thereby nurturing a vibrant, privacy-advocating DeFi community.

Through the integration of these approaches, we aim to achieve Goal 6, providing users with a privacy-focused platform for engaging effortlessly in core DeFi functions such as AMMs and staking, +all while effectively overcoming the obstacles related to interoperability and liquidity concerns.

Summary of the Architecture

In our quest to optimize privacy, we're proposing a Zero-Knowledge Virtual Machine (Zkvm) that harnesses the power of Zero-Knowledge Proofs (ZKPs). +These proofs ensure that while private state data remains undisclosed, public state transitions can still be carried out and subsequently verified by third parties. +This blend of public and private state is envisaged to be achieved through a state tree representing the public state, while the encrypted state leaves stand for the private state. +Each user's private state indicates validity through the absence of a corresponding nullifier. +A nullifier is a unique cryptographic value generated in privacy-preserving blockchain transactions to prevent double-spending, +ensuring that each private transaction is spent only once without revealing its details.

Private functions' execution mandates users to offer a proof underscoring the accurate execution of all encapsulated private calls. +For validating a singular private function call, we're leaning into the kernel-based model inspired by the ZEXE protocol. +Defined as kernel circuits, these functions validate the correct execution of each private function call. +Due to their recursive circuit structure, a succession of private function calls can be executed by calculating proofs in an iterative manner. +Execution-relevant data, like private and public call stacks and additions to the state tree, are incorporated as public inputs.

Our method integrates the verification keys for these functions within a merkle tree. Here's the innovation: a user's ZKP showcases the existence of the verification key in this tree, yet keeps the executed function concealed. +The unique function identifier can be presented as the verification key, with all contracts merkleized for hiding functionalities.

We suggest a nuanced shift from the ZEXE protocol's identity function, which crafts an identity for smart contracts delineating its behavior, access timeframes, and other functionalities. +Instead of the ZEXE protocol's structure, our approach pivots to a method anchored in the +security of a secret combined with the uniqueness from hashing with the contract address. +The underlying rationale is straightforward: the sender, equipped with a unique nonce and salt for the transaction, hashes the secret, payload, nonce, and salt. +This result is then hashed with the contract address for the final value. The hash function's unidirectional nature ensures that the input cannot be deduced easily from its output. +A specific concern, however, is the potential repetition of secret and payload values across transactions, which could jeopardize privacy. +Yet, by embedding the function's hash within the hash of the contract address, users can validate a specific function's execution without divulging the function, navigating this limitation.

Alternative routes do exist: We could employ signature schemes like ECDSA, focusing on uniqueness and authenticity, albeit at the cost of complex key management. +Fully Homomorphic Encryption (FHE) offers another pathway, enabling function execution on encrypted data, or Multi-Party Computation (MPC) which guarantees non-disclosure of function or inputs. +Yet, integrating ZKPs with either FHE or MPC presents a challenge. Combining cryptographic functions like SHA-3 and BLAKE2 can also bolster security and uniqueness. +It's imperative to entertain these alternatives, especially when hashing might not serve large input/output functions effectively or might fall short in guaranteeing uniqueness.

Current State

Our aim is to revolutionize the privacy and security paradigms through Nescience. +As we strive to set milestones and achieve groundbreaking advancements, +our current focus narrows onto the realization of Goal 2 and Goal 3.

Our endeavors to build a powerful virtual machine tailored for Zero-Knowledge applications have led us down the path of rigorous exploration and testing. +We believe that integrating the right proof system is pivotal to our project's success, which brings us to Nova [8]. +In our project journey, we have opted to integrate the Nova proof system, recognizing its potential alignment with our overarching goals. +However, as part of our meticulous approach to innovation and optimization, we acknowledge the need to thoroughly examine Nova’s performance capabilities, +particularly due to its status as a pioneering and relatively unexplored proof system.

This critical evaluation entails a comprehensive process of benchmarking and comparative analysis [9], +pitting Nova against other prominent proof systems in the field, including Halo2 [10], +Plonky2 [11], and Starky [12]. +This ongoing and methodical initiative is designed to ensure a fair and impartial assessment, enabling us to draw meaningful conclusions about Nova’s strengths and limitations in relation to its counterparts. +By leveraging the Poseidon recursion technique, we are poised to conduct an exhaustive performance test that delves into intricate details. +Through this testing framework, we aim to discern whether Nova possesses the potential to outshine its contemporaries in terms of efficiency, scalability, and overall performance. +The outcome of this rigorous evaluation will be pivotal in shaping our strategic decisions moving forward. +Armed with a comprehensive understanding of Nova’s performance metrics vis-à-vis other proof systems, +we can confidently chart a course that maximizes the benefits of our project’s optimization efforts.

Moreover, as we ambitiously pursue the establishment of a robust mechanism for proof creation and verification, our focus remains resolute on preserving user privacy, +incentivizing honest behaviour, and ensuring the cost-effective verification of transactions. +At the heart of this endeavor is our drive to empower users by allowing them the autonomy of generating proofs for private state updates, +thereby reducing dependencies and enhancing privacy. +We would like to actively work on providing comprehensive documentation, user-friendly tools, +and tutorials to aid users in this intricate process.

Parallelly, we're looking into decentralized verification processes, harnessing the strength of multiple external provers that cross-verify each other's work. +Our commitment is further cemented by our efforts to introduce a dynamic reward system that adjusts based on network metrics and prover performance. +This intricate balance, while challenging, aims to fortify our system against potential adversarial actions, aligning incentives, and preserving the overall integrity of the project.

References

[1] Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. Retrieved from https://bitcoin.org/bitcoin.pdf

[2] Sanchez, F. (2021). Cardano’s Extended UTXO accounting model. Retrived from https://iohk.io/en/blog/posts/2021/03/11/cardanos-extended-utxo-accounting-model/

[3] Morgan, D. (2020). HD Wallets Explained: From High Level to Nuts and Bolts. Retrieved from https://medium.com/mycrypto/the-journey-from-mnemonic-phrase-to-address-6c5e86e11e14

[4] Wuille, P. (012). Bitcoin Improvement Proposal (BIP) 44. Retrieved from https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki

[5] Jedusor, T. (2020). Introduction to Mimblewimble and Grin. Retrieved from https://github.com/mimblewimble/grin/blob/master/doc/intro.md

[6] Bitcoin's official wiki overview of the CoinJoin method. Retrieved from https://en.bitcoin.it/wiki/CoinJoin

[7] TornadoCash official Github page. Retrieved from https://github.com/tornadocash/tornado-classic-ui

[8] Kothapalli, A., Setty, S., Tzialla, I. (2021). Nova: Recursive Zero-Knowledge Arguments from Folding Schemes. Retrieved from https://eprint.iacr.org/2021/370

[9] ZKvm Github page. Retrieved from https://github.com/vacp2p/zk-explorations

[10] Electric Coin Company (2020). Explaining Halo 2. Retrieved from https://electriccoin.co/blog/explaining-halo-2/

[11] Polygon Labs (2022). Introducing Plonky2. Retrieved from https://polygon.technology/blog/introducing-plonky2

[12] StarkWare (2021). ethSTARK Documentation. Retrieved from https://eprint.iacr.org/2021/582


  1. Incentive Mechanisms:

    • Token Rewards: Design a token-based reward system where honest provers are compensated with tokens for their verification services. +This incentivizes participation and encourages integrity.

    • Staking and Slashing: Introduce a staking mechanism where provers deposit tokens as collateral. +Dishonest behavior results in slashing (partial or complete loss) of the staked tokens, while honest actions are rewarded.

    • Proof of Work/Proof of Stake: Implement a proof-of-work or proof-of- stake consensus mechanism for verification, +aligning incentives with the blockchain’s broader consensus mechanism.

]]>
+ + Moudy + +
+ + <![CDATA[Device Pairing in Js-waku and Go-waku]]> + https://vac.dev/rlog/device-pairing-in-js-waku-and-go-waku + + 2023-04-24T12:00:00.000Z + + Device pairing and secure message exchange using Waku and noise protocol.

As the world becomes increasingly connected through the internet, the need for secure and reliable communication becomes paramount. In this article it is described how the Noise protocol can be used as a key-exchange mechanism for Waku.

Recently, this feature was introduced in js-waku and go-waku, providing a simple API for developers to implement secure communication protocols using the Noise Protocol framework. These open-source libraries provide a solid foundation for building secure and decentralized applications that prioritize data privacy and security.

This functionality is designed to be simple and easy to use, even for developers who are not experts in cryptography. The library offers a clear and concise API that abstracts away the complexity of the Noise Protocol framework and provides an straightforward interface for developers to use. Using this, developers can effortlessly implement secure communication protocols on top of their JavaScript and Go applications, without having to worry about the low-level details of cryptography.

One of the key benefits of using Noise is that it provides end-to-end encryption, which means that the communication between two parties is encrypted from start to finish. This is essential for ensuring the security and privacy of sensitive information

Device Pairing

In today's digital world, device pairing has become an integral part of our lives. Whether it's connecting our smartphones with other computers or web applications, the need for secure device pairing has become more crucial than ever. With the increasing threat of cyber-attacks and data breaches, it's essential to implement secure protocols for device pairing to ensure data privacy and prevent unauthorized access.

To demonstrate how device pairing can be achieved using Waku and Noise, we have examples available at https://examples.waku.org/noise-js/. You can try pairing different devices, such as mobile and desktop, via a web application. This can be done by scanning a QR code or opening a URL that contains the necessary data for a secure handshake.

The process works as follows:

Actors:

  • Alice the initiator
  • Bob the responder
  1. The first step in achieving secure device pairing using Noise and Waku is for Bob generate the pairing information which could be transmitted out-of-band. For this, Bob opens https://examples.waku.org/noise-js/ and a QR code is generated, containing the data required to do the handshake. This pairing QR code is timeboxed, meaning that after 2 minutes, it will become invalid and a new QR code must be generated
  2. Alice scans the QR code using a mobile phone. This will open the app with the QR code parameters initiating the handshake process which is described in 43/WAKU2-DEVICE-PAIRING. These messages are exchanged between two devices over Waku to establish a secure connection. The handshake messages consist of three main parts: the initiator's message, the responder's message, and the final message, which are exchanged to establish a secure connection. While using js-noise, the developer is abstracted of this process, since the messaging happens automatically depending on the actions performed by the actors in the pairing process.
  3. Both Alice and Bob will be asked to verify each other's identity. This is done by confirming if an 8-digits authorization code match in both devices. If both actors confirm that the authorization code is valid, the handshake concludes succesfully
  4. Alice and Bob receive a set of shared keys that can be used to start exchanging encrypted messages. The shared secret keys generated during the handshake process are used to encrypt and decrypt messages sent between the devices. This ensures that the messages exchanged between the devices are secure and cannot be intercepted or modified by an attacker.

The above example demonstrates device pairing using js-waku. Additionally, You can also try building and experimenting with other noise implementations like nwaku, or go-waku, with an example available at https://github.com/waku-org/go-waku/tree/master/examples/noise in which the same flow described before is done with Bob (the receiver) using go-waku instead of js-waku.

Conclusion

With its easy to use API built on top of the Noise Protocol framework and the LibP2P networking stack, if you are a developer looking to implement secure messaging in their applications that are both decentralized and censorship resistant, Waku is definitely an excellent choice worth checking out!

Waku is also Open source with a MIT and APACHEv2 licenses, which means that developers are encouraged to contribute code, report bugs, and suggest improvements to make it even better.

Don't hesitate to try the live example at https://examples.waku.org/noise-js and build your own webapp using https://github.com/waku-org/js-noise, https://github.com/waku-org/js-waku and https://github.com/waku-org/go-waku. This will give you a hands-on experience of implementing secure communication protocols using the Noise Protocol framework in a practical setting. Happy coding!

References

]]>
+ + Richard + +
+ + <![CDATA[The Future of Waku Network: Scaling, Incentivization, and Heterogeneity]]> + https://vac.dev/rlog/future-of-waku-network + + 2023-04-03T00:00:00.000Z + + Learn how the Waku Network is evolving through scaling, incentivization, and diverse ecosystem development and what the future might look like.

Waku is preparing for production with a focus on the Status Communities use case. In this blog post, we will provide an +overview of recent discussions and research outputs, aiming to give you a better understanding of how the Waku network +may look like in terms of scaling and incentivization.

DOS Mitigation for Status Communities

Waku is actively exploring DOS mitigation mechanisms suitable for Status Communities. While RLN +(Rate Limiting Nullifiers) remains the go-to DOS protection solution due to its privacy-preserving and +censorship-resistant properties, there is still more work to be done. We are excited to collaborate with PSE +(Privacy & Scaling Explorations) in this endeavor. Learn more about their latest progress in this tweet.

A Heterogeneous Waku Network

As we noted in a previous forum post, Waku's protocol +incentivization model needs to be flexible to accommodate various business models. Flexibility ensures that projects +can choose how they want to use Waku based on their specific needs.

Reversing the Incentivization Question

Traditionally, the question of incentivization revolves around how to incentivize operators to run nodes. We'd like to +reframe the question and instead ask, "How do we pay for the infrastructure?"

Waku does not intend to offer a free lunch. +Ethereum's infrastructure is supported by transaction fees and inflation, with validators receiving rewards from both sources. +However, this model does not suit a communication network like Waku. +Users and platforms would not want to pay for every single message they send. Additionally, Waku aims to support instant +ephemeral messages that do not require consensus or long-term storage.

Projects that use Waku to enable user interactions, whether for chat messages, gaming, private DeFi, notifications, or +inter-wallet communication, may have different value extraction models. Some users might provide services for the +project and expect to receive value by running nodes, while others may pay for the product or run infrastructure to +contribute back. Waku aims to support each of these use cases, which means there will be various ways to "pay for the +infrastructure."

In his talk, Oskar addressed two strategies: RLN and service credentials.

RLN and Service Credentials

RLN enables DOS protection across the network in a privacy-preserving and permission-less manner: stake in a contract, +and you can send messages.

Service credentials establish a customer-provider relationship. Users might pay to have messages they are interested in +stored and served by a provider. Alternatively, a community owner could pay a service provider to host their community.

Providers could offer trial or limited free services to Waku users, similar to Slack or Discord. Once a trial is expired or outgrown, +a community owner could pay for more storage or bandwidth, similar to Slack's model. +Alternatively, individual users could contribute financially, akin to Discord's Server Boost, or by sharing their own +resources with their community.

We anticipate witnessing various scenarios across the spectrum: from users sharing resources to users paying for access to the network and everything in between.

Waku Network: Ethereum or Cosmos?

Another perspective is to consider whether the Waku network will resemble Ethereum or Cosmos.

For those not familiar with the difference between both, in a very concise manner:

  • Ethereum is a set of protocols and software that are designed to operate on one common network and infrastructure
  • Cosmos is a set of protocols and software (SDKs) designed to be deployed in separate yet interoperable networks and infrastructures by third parties

We want Waku to be decentralized to provide censorship resistance and privacy-preserving communication. +If each application has to deploy its own network, we will not achieve this goal. +Therefore, we aim Waku to be not only an open source set of protocols, but also a shared infrastructure that anyone can leverage to build applications on top, with some guarantees in terms of decentralization and anonymity. +This approach is closer in spirit to Ethereum than Cosmos. +Do note that, similarly to Ethereum, anyone is free to take Waku software and protocols and deploy their own network.

Yet, because of the difference in the fee model, the Waku Network is unlikely to be as unified as Ethereum's. +We currently assume that there will be separate gossipsub networks with different funding models. +Since there is no consensus on Waku, each individual operator can decide which network to support, enabling Waku to maintain its permission-less property.

Most likely, the Waku network will be heterogeneous, and node operators will choose the incentivization model they prefer.

Scalability and Discovery Protocols

To enable scalability, the flow of messages in the Waku network will be divided in shards, +so that not every node has to forward every message of the whole network. +Discovery protocols will facilitate users connecting to the right nodes to receive the messages they are interested in.

Different shards could be subject to a variety of rate limiting techniques (globally, targeted to that shard or something in-between).

Marketplace protocols may also be developed to help operators understand how they can best support the network and where +their resources are most needed. However, we are still far from establishing or even assert that such a marketplace will be needed.

Open Problems

Splitting traffic between shards reduces bandwidth consumption for every Waku Relay node. +This improvement increases the likelihood that users with home connections can participate and contribute to the gossipsub network without encountering issues.

However, it does not cap traffic. +There are still open problems regarding how to guarantee that someone can use Waku with lower Internet bandwidth or run critical services, such as a validation node, on the same connection.

We have several ongoing initiatives:

  • Analyzing the Status Community protocol to confirm efficient usage of Waku [4]
  • Simulating the Waku Network to measure actual bandwidth usage [5]
  • Segregating chat messages from control and media messages [6]

The final solution will likely be a combination of protocols that reduce bandwidth usage or mitigate the risk of DOS attacks, providing flexibility for users and platforms to enable the best experience.

The Evolving Waku Network

The definition of the "Waku Network" will likely change over time. In the near future, it will transition from a single +gossipsub network to a sharded set of networks unified by a common discovery layer. This change will promote scalability +and allow various payment models to coexist within the Waku ecosystem.

In conclusion, the future of Waku Network entails growth, incentivization, and heterogeneity while steadfastly +maintaining its core principles. As Waku continues to evolve, we expect it to accommodate a diverse range of use cases +and business models, all while preserving privacy, resisting censorship, avoiding surveillance, and remaining accessible +to devices with limited resources.

References

  1. 51/WAKU2-RELAY-SHARDING
  2. 57/STATUS-Simple-Scaling
  3. 58/RLN-V2
  4. Scaling Status Communities: Potential Problems
  5. Waku Network Testing
  6. 51/WAKU2-RELAY-SHARDING: Control Message Shards
]]>
+ + Franck + +
+ + <![CDATA[Waku for All Decentralized Applications and Infrastructures]]> + https://vac.dev/rlog/waku-for-all + + 2022-11-08T00:00:00.000Z + + Waku is an open communication protocol and network. Decentralized apps and infrastructure can use Waku for their +communication needs. It is designed to enable dApps and decentralized infrastructure projects to have secure, private, +scalable communication. Waku is available in several languages and platforms, from Web to mobile to desktop to cloud. +Initially, We pushed Waku adoption to the Web ecosystem, we learned that Waku is usable in a variety of complex applications +and infrastructure projects. We have prioritized our effort to make Waku usable on various platforms and environments.

Background

We have built Waku to be the communication layer for Web3. Waku is a collection of protocols to chose from for your +messaging needs. It enables secure, censorship-resistant, privacy-preserving, spam-protected communication for its user. +It is designed to run on any device, from mobile to the cloud.

Waku is available on many systems and environments and used by several applications and SDKs for decentralized communications.

This involved research efforts in various domains: conversational security, protocol incentivization, zero-knowledge, +etc.

Waku uses novel technologies. Hence, we knew that early dogfooding of Waku was necessary. Even if research +was still in progress [1]. Thus, as soon as Waku protocols and software were usable, we started to push +for the adoption of Waku. This started back in 2021.

Waku is the communication component of the Web3 trifecta. This trifecta was Ethereum (contracts), Swarm +(storage) and Whisper (communication). Hence, it made sense to first target dApps which already uses one of the pillars: +Ethereum.

As most dApps are web apps, we started the development of js-waku for the browser.

Once ready, we reached out to dApps to integrate Waku, added prizes to hackathons +and gave talks.

We also assumed we would see patterns in the usage of Waku, that we would facilitate with the help of +SDKs.

Finally, we created several web apps: +examples +and PoCs.

By discussing with Waku users and watching it being used, we learned a few facts:

  1. The potential use cases for Waku are varied and many:
  1. Many projects are interested in having an embedded chat in their dApp,
  2. There are complex applications that need Waku as a solution. Taking RAILGUN as an example:
  • Web wallet
  • + React Native mobile wallet
  • + NodeJS node/backend.

(1) means that it is not that easy to create SDKs for common use cases.

(2) was a clear candidate for an SDK. Yet, building a chat app is a complex task. Hence, the Status app team tackled +this in the form of Status Web.

Finally, (3) was the most important lesson. We learned that multi-tier applications need Waku for decentralized and +censorship-resistant communications. For these projects, js-waku is simply not enough. They need Waku to work in their +Golang backend, Unity desktop game and React Native mobile app.

We understood that we should see the whole Waku software suite +(js-waku, +nwaku, +go-waku, +waku-react-native, +etc) as an asset for its success. +That we should not limit outreach, marketing, documentation efforts to the web, but target all platforms.

From a market perspective, we identified several actors:

  • platforms: Projects that uses Waku to handle communication,
  • operators: Operators run Waku nodes and are incentivized to do so,
  • developers: Developers are usually part of a platforms or solo hackers learning Web3,
  • contributors: Developers and researchers with interests in decentralization, privacy, censorship-resistance, +zero-knowledge, etc.

Waku for All Decentralized Applications and Infrastructures

In 2022, we shifted our focus to make the various Waku implementations usable and used.

We made Waku multi-plaform.

We shifted Waku positioning to leverage all Waku implementations and better serve the user's needs:

We are consolidating the documentation for all implementations on a single website (work in progress) +to improve developer experience.

This year, we also started the operator outreach effort to push for users to run their own Waku nodes. We have +recently concluded our first operator trial run. +Nwaku's documentation, stability and performance has improved. It is now easier to +run your own Waku node.

Today, operator wannabes most likely run their own nodes to support or use the Waku network. +We are dogfooding +Waku RLN, our novel economic spam protection protocol, +and looking at incentivizing the Waku Store protocol. +This way, we are adding reasons to run your own Waku node.

For those who were following us in 2021, know that we are retiring the Waku Connect branding in favour of the Waku +branding.

Waku for Your Project

As discussed, Waku is now available on various platforms. The question remains: How can Waku benefit your project?

Here are a couple of use cases we recently investigated:

Layer-2 Decentralization

Most ([2] [3] roll-ups use a centralized sequencer or equivalent. Running several sequencers is not as straightforward as running several execution nodes. +Waku can help:

  • Provide a neutral marketplace for a mempool: If sequencers compete for L2 tx fees, they may not be incentivized to +share transactions with other sequencers. Waku nodes can act as a neutral network to enable all sequences to access +transactions.
  • Enable censorship-resistant wallet<>L2 communication,
  • Provide rate limiting mechanism for spam protection: Using RLN to prevent DDOS.

Device pairing and communication

With Waku Device Pairing, a user can setup a secure encrypted communication channel +between their devices. As this channel would operate over Waku, it would be censorship-resistant and privacy preserving. +These two devices could be:

  • Ethereum node and mobile phone to access a remote admin panel,
  • Alice's phone and Bob's phone for any kind of secure communication,
  • Mobile wallet and desktop/browser dApp for transaction and signature exchange.

Check js-waku#950 for the latest update on this.

Get Involved

Developer? Grab any of the Waku implementations and integrate it in your app: https://waku.org/platform.

Researcher? See https://vac.dev/contribute to participate in Waku research.

Tech-savvy? Try to run your own node: https://waku.org/operator.

Otherwise, play around with the various web examples.

If you want to help, we are hiring!

Moving Forward

What you can expect next:


References

  • [1] Waku is modular; it is a suite of protocols; hence some Waku protocols may be mature, while +new protocols are still being designed. Which means that research continues to be ongoing while +Waku is already used in production.
  • [2] The Optimism Foundation runs the only block produce on the Optimism network.
  • [3] Top 10 L2s are documented has having a centralized operator.
]]>
+ + Franck + +
+ + <![CDATA[Building Privacy-Protecting Infrastructure]]> + https://vac.dev/rlog/building-privacy-protecting-infrastructure + + 2022-11-04T12:00:00.000Z + + What is privacy-protecting infrastructure? Why do we need it and how we can build it? We'll look at Waku, the communication layer for Web3. We'll see how it uses ZKPs to incentivize and protect the Waku network. We'll also look at Zerokit, a library that makes it easier to use ZKPs in different environments. After reading this, I hope you'll better understand the importance of privacy-protecting infrastructure and how we can build it.

This write-up is based on a talk given at DevCon 6 in Bogota, a video can be found here

Intro

In this write-up, we are going to talk about building privacy-protecting +infrastructure. What is it, why do we need it and how can we build it?

We'll look at Waku, the communication layer for Web3. We'll look at how we are +using Zero Knowledge (ZK) technology to incentivize and protect the Waku +network. We'll also look at Zerokit, a library we are writing to make ZKP easier +to use in different environments.

At the end of this write-up, I hope you'll come away with an understanding of +the importance of privacy-protecting infrastructure and how we can build it.

About

First, briefly about Vac. We build public good protocols for the decentralized +web, with a focus on privacy and communication. We do applied research based on +which we build protocols, libraries and publications. We are also the custodians +of protocols that reflect a set of principles.

Principles

It has its origins in the Status app and trying to improve +the underlying protocols and infrastructure. We build Waku, +among other things.

Why build privacy-protecting infrastructure?

Privacy is the power to selectively reveal yourself. It is a requirement for +freedom and self-determination.

Just like you need decentralization in order to get censorship-resistance, you +need privacy to enable freedom of expression.

To build applications that are decentralized and privacy-protecting, you need +the base layer, the infrastructure itself, to have those properties.

We see this a lot. It is easier to make trade-offs at the application layer than +doing them at the base layer. You can build custodial solutions on top of a +decentralized and non-custodial network where participants control their own +keys, but you can't do the opposite.

If you think about it, buildings can be seen as a form of privacy-protecting +infrastructure. It is completely normal and obvious in many ways, but when it +comes to the digital realm our mental models and way of speaking about it hasn't +caught up yet for most people.

I'm not going too much more into the need for privacy or what happens when you +don't have it, but suffice to say it is an important property for any open +society.

When we have conversations, true peer-to-peer offline conversations, we can talk +privately. If we use cash to buy things we can do commerce privately.

On the Internet, great as it is, there are a lot of forces that makes this +natural state of things not the default. Big Tech has turned users into a +commodity, a product, and monetized user's attention for advertising. To +optimize for your attention they need to surveil your habits and activities, and +hence breach your privacy. As opposed to more old-fashioned models, where +someone is buying a useful service from a company and the incentives are more +aligned.

We need to build credibly neutral infrastructure that protects your privacy at +the base layer, in order to truly enable applications that are +censorship-resistant and encourage meaningful freedom of expression.

Web3 infrastructure

Infrastructure is what lies underneath. Many ways of looking at this but I'll +keep it simple as per the original Web3 vision. You had Ethereum for +compute/consensus, Swarm for storage, and Whisper for messaging. Waku has taken +over the mantle from Whisper and is a lot more +usable today than Whisper ever was, +for many reasons.

Web3 Infrastructure

On the privacy-front, we see how Ethereum is struggling. It is a big UX problem, +especially when you try to add privacy back "on top". It takes a lot of effort +and it is easier to censor. We see this with recent action around Tornado Cash. +Compare this with something like Zcash or Monero, where privacy is there by +default.

There are also problems when it comes to the p2p networking side of things, for +example with Ethereum validator privacy and hostile actors and jurisdictions. If +someone can easily find out where a certain validator is physically located, +that's a problem in many parts of the world. Being able to have stronger +privacy-protection guarantees would be very useful for high-value targets.

This doesn't begin to touch on the so called "dapps" that make a lot of +sacrifices in how they function, from the way domains work, to how websites are +hosted and the reliance on centralized services for communication. We see this +time and time again, where centralized, single points of failure systems work +for a while, but then eventually fail.

In many cases an individual user might not care enough though, and for platforms +the lure to take shortcuts is strong. That is why it is important to be +principled, but also pragmatic in terms of the trade-offs that you allow on top. +We'll touch more on this in the design goals around modularity that Waku has.

ZK for privacy-protecting infrastructure

ZKPs are a wonderful new tool. Just like smart contracts enables programmable +money, ZKPs allow us to express fundamentally new things. In line with the great +tradition of trust-minimization, we can prove statement while revealing the +absolute minimum information necessary. This fits the definition of privacy, the +power to selectively reveal yourself, perfectly. I'm sure I don't need to tell +anyone reading this but this is truly revolutionary. The technology is advancing +extremely fast and often it is our imagination that is the limit.

Zero knowledge

Waku

What is Waku? It is a set of modular protocols for p2p communication. It has a +focus on privacy, security and being able to run anywhere. It is the spiritual +success to Whisper.

By modular we mean that you can pick and choose protocols and how you use them +depending on constraints and trade-offs. For example, bandwidth usage vs +privacy.

It is designed to work in resource restricted environments, such as mobile +phones and in web browsers. It is important that infrastructure meets users +where they are and supports their real-world use cases. Just like you don't need +your own army and a castle to have your own private bathroom, you shouldn't need +to have a powerful always-on node to get reasonable privacy and +censorship-resistance. We might call this self-sovereignty.

Waku - adaptive nodes

One way of looking at Waku is as an open service network. There are nodes with +varying degrees of capabilities and requirements. For example when it comes to +bandwidth usage, storage, uptime, privacy requirements, latency requirements, +and connectivity restrictions.

We have a concept of adaptive nodes that can run a variety of protocols. A node +operator can choose which protocols they want to run. Naturally, there'll be +some nodes that do more consumption and other nodes that do more provisioning. +This gives rise to the idea of a service network, where services are provided +for and consumed.

Adaptive Nodes

Waku - protocol interactions

There are many protocols that interact. Waku Relay protocol is based on libp2p +GossipSub for p2p messaging. We have filter for bandwidth-restricted nodes to +only receive subset of messages. Lightpush for nodes with short connection +windows to push messages into network. Store for nodes that want to retrieve +historical messages.

On the payload layer, we provide support for Noise handshakes/key-exchanges. +This means that as a developers, you can get end-to-end encryption and expected +guarantees out of the box. We have support for setting up a secure channel from +scratch, and all of this paves the way for providing Signal's Double Ratchet at +the protocol level much easier. We also have experimental support for +multi-device usage. Similar features have existed in for example the Status app +for a while, but with this we make it easier for any platform using Waku to use +it.

There are other protocols too, related to peer discovery, topic usage, etc. See +specs for more details.

Protocol Interactions

Waku - Network

For the Waku network, there are a few problems. For example, when it comes to +network spam and incentivizing service nodes. We want to address these while +keeping privacy-guarantees of the base layer. I'm going to go into both of +these.

The spam problem arises on the gossip layer when anyone can overwhelm the +network with messages. The service incentivization is a problem when nodes don't +directly benefit from the provisioning of a certain service. This can happen if +they are not using the protocol directly themselves as part of normal operation, +or if they aren't socially inclined to provide a certain service. This depends a +lot on how an individual platform decides to use the network.

Waku Network

Dealing with network spam and RLN Relay

Since the p2p relay network is open to anyone, there is a problem with spam. If +we look at existing solutions for dealing with spam in traditional messaging +systems, a lot of entities like Google, Facebook, Twitter, Telegram, Discord use +phone number verification. While this is largely sybil-resistant, it is +centralized and not private at all.

Historically, Whisper used PoW which isn't good for heterogenerous networks. +Peer scoring is open to sybil attacks and doesn't directly address spam +protection in an anonymous p2p network.

The key idea here is to use RLN for private economic spam protection using +zkSNARKs.

I'm not going to go into too much detail of RLN here. If you are interested, I +gave a talk in Amsterdam at +Devconnect about this. We have some write-ups on RLN +here by Sanaz who has been pushing a lot of this +from our side. There's also another talk at Devcon by Tyler going into RLN in +more detail. Finally, here's the RLN spec.

I'll briefly go over what it is, the interface and circuit and then talk about +how it is used in Waku.

RLN - Overview and Flow

RLN stands for Rate Limiting Nullifier. It is an anonyomous rate limiting +mechanism based on zkSNARKs. By rate limiting we mean you can only send N +messages in a given period. By anonymity we mean that you can't link message to +a publisher. We can think of it as a voting booth, where you are only allowed to +vote once every election.

Voting Booth

It can be used for spam protection in p2p messaging systems, and also rate +limiting in general, such as for a decentralized captcha.

There are three parts to it. You register somewhere, then you can signal and +finally there's a verification/slashing phase. You put some capital at risk, +either economic or social, and if you double signal you get slashed.

RLN - Circuit

Here's what the private and public inputs to the circuit look like. The identity +secret is generated locally, and we create an identity commitment that is +inserted into a Merkle tree. We then use Merkle proofs to prove membership. +Registered member can only signal once for a given epoch or external nullifier, +for example every ten seconds in Unix time. RLN identifer is for a specific RLN +app.

We also see what the circuit output looks like. This is calculated locally. y +is a share of the secret equation, and the (internal) nullifier acts as a unique +fingerprint for a given app/user/epoch combination. How do we calculate y and +the internal nullifier?

// Private input
signal input identity_secret;
signal input path_elements[n_levels][1];
signal input identity_path_index[n_levels];

// Public input
signal input x; // signal_hash
signal input epoch; // external_nullifier
signal input rln_identifier;

// Circuit output
signal output y;
signal output root;
signal output nullifier;

RLN - Shamir's secret sharing

This is done using Shamir's secret +sharing. Shamir’s +secret sharing is based on idea of splitting a secret into shares. This is how +we enable slashing of funds.

In this case, we have two shares. If a given identity a0 signals twice in +epoch/external nullifier, a1 is the same. For a given RLN app, +internal_nullifier then stays the same. x is signal hash which is different, +and y is public, so we can reconstruct identity_secret. With the identity +secret revealed, this gives access to e.g. financial stake.

a_0 = identity_secret // secret S
a_1 = poseidonHash([a0, external_nullifier])

y = a_0 + x * a_1

internal_nullifier = poseidonHash([a_1, rln_identifier])

Shamir&#39;s secret sharing

RLN Relay

This is how RLN is used with Relay/GossipSub protocol. A node registers and +locks up funds, and after that it can send messages. It publishes a message +containing the Zero Knowledge proof and some other details.

Each relayer node listens to the membership contract for new members, and it +also keeps track of relevant metadata and merkle tree. Metadata is needed to be +able to detect double signaling and perform slashing.

Before forwarding a message, it does some verification checks to ensure there +are no duplicate messages, ZKP is valid and no double signaling has occured. It +is worth noting that this can be combined with peer scoring, for example for +duplicate messages or invalid ZK proofs.

In line of Waku's goals of modularity, RLN Relay is applied on a specific subset +of pubsub and content topics. You can think of it as an extra secure channel.

RLN Relay

RLN Relay cross-client testnet

Where are we with RLN Relay deployment? We've recently launched our second +testnet. This is using RLN Relay with a smart contract on Goerli. It integrates +with our example p2p chat application, and it does so through three different +clients, nwaku, go-waku and js-waku for browsers. This is our first p2p +cross-client testnet for RLN Relay.

Here's a video that shows a user +registering in a browser, signaling through JS-Waku. It then gets relayed to a +nwaku node, that verifies the proof. The second +video shows what happens in the +spam case. when more than one message is sent in a given epoch, it detects it as +spam and discards it. Slashing hasn't been implemented fully yet in the client +and is a work in progress.

If you are curious and want to participate, you can join the effort on our Vac +Discord. We also have +tutorials +setup for all clients so you can play around with it.

As part of this, and to make it work in multiple different environments, we've +also been developing a new library called Zerokit. I'll talk about this a bit +later.

Private settlement / Service credentials

Going back to the service network idea, let's talk about service credentials. +The idea behind service credentials and private settlement is to enable two +actors to pay for and provide services without compromising their privacy. We do +not want the payment to create a direct public link between the service provider +and requester.

Recall the Waku service network illustration with adaptive nodes that choose +which protocols they want to run. Many of these protocols aren't very heavy and +just work by default. For example the relay protocol is enabled by default. +Other protocols are much heavier to provide, such as storing historical +messages.

It is desirable to have additional incentives for this, especially for platforms +that aren't community-based where some level of altruism can be assumed (e.g. +Status Communities, or WalletConnect cloud infrastructure).

You have a node Alice that is often offline and wants to consume historical +messages on some specific content topics. You have another node Bob that runs a +server at home where they store historical messages for the last several weeks. +Bob is happy to provide this service for free because he's excited about running +privacy-preserving infrastructure and he's using it himself, but his node is +getting overwhelmed by freeloaders and he feels like he should be paid something +for continuing to provide this service.

Alice deposits some funds in a smart contract which registers it in a tree, +similar to certain other private settlement mechanisms. A fee is taken or +burned. In exchange, she gets a set of tokens or service credentials. When she +wants to do a query with some criteria, she sends this to Bob. Bob responds with +size of response, cost, and receiver address. Alice then sends a proof of +delegation of a service token as a payment. Bob verifies the proof and resolves +the query.

The end result is that Alice has consumed some service from Bob, and Bob has +received payment for this. There's no direct transaction link between Alice and +Bob, and gas fees can be minimized by extending the period before settling on +chain.

This can be complemented with altruistic service provisioning, for example by +splitting the peer pool into two slots, or only providing a few cheap queries +for free.

The service provisioning is general, and can be generalized for any kind of +request/response service provisoning that we want to keep private.

This isn't a perfect solution, but it is an incremental improvement on top of +the status quo. It can be augmented with more advanced techniques such as better +non-repudiable node reputation, proof of correct service provisioning, etc.

We are currently in the raw spec / proof of concept stage of this. We expect to +launch a testnet of this later this year or early next year.

Service credentials flow

Zerokit

Zerokit is a set of Zero Knowledge modules, +written in Rust and designed to be used in many different environments. The +initial goal is to get the best of both worlds with Circom/Solidity/JS and +Rust/ZK ecosystem. This enables people to leverage Circom-based constructs from +non-JS environments.

For the RLN module, it is using Circom circuits via ark-circom and Rust for +scaffolding. It exposes a C FFI API that can be used through other system +programming environments, like Nim and Go. It also exposes an experimental WASM +API that can be used through web browsers.

Waku is p2p infrastructure running in many different environments, such as +Nim/JS/Go/Rust, so this a requirement for us.

Circom and JS strengths are access to Dapp developers, tooling, generating +verification code, circuits etc. Rust strengths is that it is systems-based and +easy to interface with other language runtime such as Nim, Go, Rust, C. It also +gives access to other Rust ZK ecosystems such as arkworks. This opens door for +using other constructs, such as Halo2. This becomes especially relevant for +constructs where you don't want to do a trusted setup or where circuits are more +complex/custom and performance requirements are higher.

In general with Zerokit, we want to make it easy to build and use ZKP in a +multitude of environments, such as mobile phones and web browsers. Currently it +is too complex to write privacy-protecting infrastructure with ZKPs considering +all the languages and tools you have to learn, from JS, Solidity and Circom to +Rust, WASM and FFI. And that isn't even touching on things like secure key +storage or mobile dev. Luckily more and more projects are working on this, +including writing DSLs etc. It'd also be exciting if we can make a useful +toolstack for JS-less ZK dev to reduce cognitive overhead, similar to what we +have with something like Foundry.

Other research

I also want to mention a few other things we are doing. One thing is +protocol specifications. We think this is very important +for p2p infra, and we see a lot of other projects that claim to do it p2p +infrastructure but they aren't clear about guarantees or how stable something +is. That makes it hard to have multiple implementations, to collaborate across +different projects, and to analyze things objectively.

Related to that is publishing papers. We've put +out three so far, related to Waku and RLN-Relay. This makes it easier to +interface with academia. There's a lot of good researchers out there and we want +to build a better bridge between academia and industry.

Another thing is network +privacy. Waku is modular with +respect to privacy guarantees, and there are a lot of knobs to turn here +depending on specific deployments. For example, if you are running the full +relay protocol you currently have much stronger receiver anonymity than if you +are running filter protocol from a bandwidth or connectivity-restricted node.

We aim to make this pluggable depending on user needs. E.g. mixnets such as Nym +come with some trade-offs but are a useful tool in the arsenal. A good mental +model to keep in mind is the anonymity trilemma, where you can only pick 2/3 out +of low latency, low bandwidth usage and strong anonymity.

We are currently exploring Dandelion-like +additions to the relay/gossip +protocol, which would provide for stronger sender anonymity, especially in a +multi-node/botnet attacker model. As part of this we are looking into different +parameters choices and general possibilities for lower latency usage. This could +make it more amenable for latency sensitive environments, such as validator +privacy, for specific threat models. The general theme here is we want to be +rigorous with the guarantees we provide, under what conditions and for what +threat models.

Another thing mentioned earlier is Noise payload +encryption, and specifically things like allowing +for pairing different devices with e.g. QR codes. This makes it easier for +developers to provide secure messaging in many realistic scenarios in a +multi-device world.

Other research

Summary

We've gone over what privacy-protecting infrastructure is, why we want it and +how we can build it. We've seen how ZK is a fundamental building block for this. +We've looked at Waku, the communication layer for Web3, and how it uses Zero +Knowledge proofs to stay private and function better. We've also looked at +Zerokit and how we can make it easier to do ZKP in different environments.

Finally we also looked at some other research we've been doing. All of the +things mentioned in this article, and more, is available as +write-ups, specs, or +discussions on our forum or Github.

If you find any of this exciting to work on, feel free to reach out on our +Discord. We are also hiring, and we have started +expanding into other privacy infrastructure tech like private and provable +computation with ZK-WASM.

]]>
+ + Oskar + +
+ + <![CDATA[Waku Privacy and Anonymity Analysis Part I: Definitions and Waku Relay]]> + https://vac.dev/rlog/wakuv2-relay-anon + + 2022-07-22T10:00:00.000Z + + Introducing a basic threat model and privacy/anonymity analysis for the Waku v2 relay protocol.

Waku v2 enables secure, privacy preserving communication using a set of modular P2P protocols. +Waku v2 also aims at protecting the user's anonymity. +This post is the first in a series about Waku v2 security, privacy, and anonymity. +The goal is to eventually have a full privacy and anonymity analysis for each of the Waku v2 protocols, as well as covering the interactions of various Waku v2 protocols. +This provides transparency with respect to Waku's current privacy and anonymity guarantees, and also identifies weak points that we have to address.

In this post, we first give an informal description of security, privacy and anonymity in the context of Waku v2. +For each definition, we summarize Waku's current guarantees regarding the respective property. +We also provide attacker models, an attack-based threat model, and a first anonymity analysis of Waku v2 relay within the respective models.

Waku comprises many protocols that can be combined in a modular way. +For our privacy and anonymity analysis, we start with the relay protocol because it is at the core of Waku v2 enabling Waku's publish subscribe approach to P2P messaging. +In its current form, Waku relay is a minor extension of libp2p GossipSub.

Figure 1: The Waku v2 relay mesh is based on the [GossipSub mesh](https://docs.libp2p.io/concepts/publish-subscribe#types-of-peering)

Informal Definitions: Security, Privacy, and Anonymity

The concepts of security, privacy, and anonymity are linked and have quite a bit of overlap.

Security

Of the three, Security has the clearest agreed upon definition, +at least regarding its key concepts: confidentiality, integrity, and availability.

  • confidentiality: data is not disclosed to unauthorized entities.
  • integrity: data is not modified by unauthorized entities.
  • availability: data is available, i.e. accessible by authorized entities.

While these are the key concepts, the definition of information security has been extended over time including further concepts, +e.g. authentication and non-repudiation. +We might cover these in future posts.

Privacy

Privacy allows users to choose which data and information

  • they want to share
  • and with whom they want to share it.

This includes data and information that is associated with and/or generated by users. +Protected data also comprises metadata that might be generated without users being aware of it. +This means, no further information about the sender or the message is leaked. +Metadata that is protected as part of the privacy-preserving property does not cover protecting the identities of sender and receiver. +Identities are protected by the anonymity property.

Often privacy is realized by the confidentiality property of security. +This neither makes privacy and security the same, nor the one a sub category of the other. +While security is abstract itself (its properties can be realized in various ways), privacy lives on a more abstract level using security properties. +Privacy typically does not use integrity and availability. +An adversary who has no access to the private data, because the message has been encrypted, could still alter the message.

Waku offers confidentiality via secure channels set up with the help of the Noise Protocol Framework. +Using these secure channels, message content is only disclosed to the intended receivers. +They also provide good metadata protection properties. +However, we do not have a metadata protection analysis as of yet, +which is part of our privacy/anonymity roadmap.

Anonymity

Privacy and anonymity are closely linked. +Both the identity of a user and data that allows inferring a user's identity should be part of the privacy policy. +For the purpose of analysis, we want to have a clearer separation between these concepts.

We define anonymity as unlinkablity of users' identities and their shared data and/or actions.

We subdivide anonymity into receiver anonymity and sender anonymity.

Receiver Anonymity

We define receiver anonymity as unlinkability of users' identities and the data they receive and/or related actions. +The data transmitted via Waku relay must be a Waku message, which contains a content topic field. +Because each message is associated with a content topic, and each receiver is interested in messages with specific content topics, +receiver anonymity in the context of Waku corresponds to subscriber-topic unlinkability. +An example for the "action" part of our receiver anonymity definition is subscribing to a specific topic.

The Waku message's content topic is not related to the libp2p pubsub topic. +For now, Waku uses a single libp2p pubsub topic, which means messages are propagated via a single mesh of peers. +With this, the receiver discloses its participation in Waku on the gossipsub layer. +We will leave the analysis of libp2p gossipsub to a future article within this series, and only provide a few hints and pointers here.

Waku offers k-anonymity regarding content topic interest in the global adversary model. +K-anonymity in the context of Waku means an attacker can link receivers to content topics with a maximum certainty of 1/k1/k. +The larger kk, the less certainty the attacker gains. +Receivers basically hide in a pool of kk content topics, any subset of which could be topics they subscribed to. +The attacker does not know which of those the receiver actually subscribed to, +and the receiver enjoys plausible deniability regarding content topic subscription. +Assuming there are nn Waku content topics, a receiver has nn-anonymity with respect to association to a specific content topic.

Technically, Waku allows distributing messages over several libp2p pubsub topics. +This yields kk-anonymity, assuming kk content topics share the same pubsub topic. +However, if done wrongly, such sharding of pubsub topics can breach anonymity. +A formal specification of anonymity-preserving topic sharding building on the concepts of partitioned topics is part of our roadmap.

Also, Waku is not directly concerned with 1:1 communication, so for this post, 1:1 communication is out of scope. +Channels for 1:1 communication can be implemented on top of Waku relay. +In the future, a 1:1 communication protocol might be added to Waku. +Similar to topic sharding, it would maintain receiver anonymity leveraging partitioned topics.

Sender Anonymity

We define sender anonymity as unlinkability of users' identities and the data they send and/or related actions. +Because the data in the context of Waku is Waku messages, sender anonymity corresponds to sender-message unlinkability.

In summary, Waku offers weak sender anonymity because of Waku's strict no sign policy, +which has its origins in the Ethereum consensus specs. +17/WAKU-RLN-RELAY and 18/WAKU2-SWAP mitigate replay and injection attacks.

Waku currently does not offer sender anonymity in stronger attacker models, as well as cannot protect against targeted attacks in weaker attacker models like the single or multi node attacker. +We will cover this in more detail in later sections.

Anonymity Trilemma

The Anonymity trilemma states that only two out of strong anonymity, low bandwidth, and low latency can be guaranteed in the global on-net attacker model. +Waku's goal, being a modular set of protocols, is to offer any combination of two out of these three properties, as well as blends. +An example for blending is an adjustable number of pubsub topics and peers in the respective pubsub topic mesh; this allows tuning the trade-off between anonymity and bandwidth.

Figure 2: Anonymity Trilemma: pick two.

A fourth factor that influences the anonymity trilemma is frequency and patterns of messages. +The more messages there are, and the more randomly distributed they are, the better the anonymity protection offered by a given anonymous communication protocol. +So, incentivising users to use the protocol, for instance by lowering entry barriers, helps protecting the anonymity of all users. +The frequency/patterns factor is also related to the above described k-anonymity.

Censorship Resistance

Another security related property that Waku aims to offer is censorship resistance. +Censorship resistance guarantees that users can participate even if an attacker tries to deny them access. +So, censorship resistance ties into the availability aspect of security. +In the context of Waku that means users should be able to send messages as well as receive all messages they are interested in, +even if an attacker tries to prevent them from disseminating messages or tries to deny them access to messages.

Currently, Waku only guarantees censorship resistance in the weak single node attacker model. +While currently employed secure channels mitigate targeted censorship, e.g. blocking specific content topics, +general censorship resistance in strong attacker models is part of our roadmap. +Among other options, we will investigate Pluggable Transports in future articles.

Attacker Types

The following lists various attacker types with varying degrees of power. +The more power an attacker has, the more difficult it is to gain the respective attacker position.

Each attacker type comes in a passive and an active variant. +While a passive attacker can stay hidden and is not suspicious, +the respective active attacker has more (or at least the same) deanonymization power.

We also distinguish between internal and external attackers.

Internal

With respect to Waku relay, an internal attacker participates in the same pubsub topic as its victims. +Without additional measures on higher layer protocols, access to an internal position is easy to get.

Single Node

This attacker controls a single node. +Because this position corresponds to normal usage of Waku relay, it is trivial to obtain.

Multi Node

This attacker controls several nodes. We assume a smaller static number of controlled nodes. +The multi node position can be achieved relatively easily by setting up multiple nodes. +Botnets might be leveraged to increase the number of available hosts. +Multi node attackers could use Sybil attacks to increase the number of controlled nodes. +A countermeasure is for nodes to only accept libp2p gossipsub graft requests from peers with different IP addresses, or even different subnets.

Linearly Scaling Nodes

This attacker controls a number of nodes that scales linearly with the number of nodes in the network. +This attacker is especially interesting to investigate in the context of DHT security, +which Waku uses for ambient peer discovery.

External

An external attacker can only see encrypted traffic (protected by a secure channel set up with Noise). +Because an internal position can be easily obtained, +in practice external attackers would mount combined attacks that leverage both internal an external attacks. +We cover this more below when describing attacks.

Local

A local attacker has access to communication links in a local network segment. +This could be a rogue access point (with routing capability).

AS

An AS attacker controls a single AS (autonomous system). +A passive AS attacker can listen to traffic on arbitrary links within the AS. +An active AS attacker can drop, inject, and alter traffic on arbitrary links within the AS.

In practice, a malicious ISP would be considered as an AS attacker. +A malicious ISP could also easily setup a set of nodes at specific points in the network, +gaining internal attack power similar to a strong multi node attacker.

Global On-Net

A global on-net attacker has complete overview over the whole network. +A passive global attacker can listen to traffic on all links, +while the active global attacker basically carries the traffic: it can freely drop, inject, and alter traffic at all positions in the network. +This basically corresponds to the Dolev-Yao model.

An entity with this power would, in practice, also have the power of the internal linearly scaling nodes attacker.

Attack-based Threat Analysis

The following lists various attacks including the weakest attacker model in which the attack can be successfully performed. +The respective attack can be performed in all stronger attacker models as well.

An attack is considered more powerful if it can be successfully performed in a weaker attacker model.

If not stated otherwise, we look at these attacks with respect to their capability to deanonymize the message sender.

Scope

In this post, we introduce a simple tightly scoped threat model for Waku v2 Relay, which will be extended in the course of this article series.

In this first post, we will look at the relay protocol in isolation. +Even though many threats arise from layers Waku relay is based on, and layers that in turn live on top of relay, +we want to first look at relay in isolation because it is at the core of Waku v2. +Addressing and trying to solve all security issues of a complex system at once is an overwhelming task, which is why we focus on the soundness of relay first.

This also goes well with the modular design philosophy of Waku v2, as layers of varying levels of security guarantees can be built on top of relay, all of which can relay on the guarantees that Waku provides. +Instead of looking at a multiplicative explosion of possible interactions, we look at the core in this article, and cover the most relevant combinations in future posts.

Further restricting the scope, we will look at the data field of a relay message as a black box. +In a second article on Waku v2 relay, we will look into the data field, which according to the specification of Waku v2 relay must be a Waku v2 message. +We only consider messages with version field 2, which indicates that the payload has to be encoded using 35/WAKU2-NOISE.

Prerequisite: Get a Specific Position in the Network

Some attacks require the attacker node(s) to be in a specific position in the network. +In most cases, this corresponds to trying to get into the mesh peer list for the desired pubsub topic of the victim node.

In libp2p gossipsub, and by extension Waku v2 relay, nodes can simply send a graft message for the desired topic to the victim node. +If the victim node still has open slots, the attacker gets the desired position. +This only requires the attacker to know the gossipsub multiaddress of the victim node.

A linearly scaling nodes attacker can leverage DHT based discovery systems to boost the probability of malicious nodes being returned, which in turn significantly increases the probability of attacker nodes ending up in the peer lists of victim nodes. +Waku v2 discv5 will employ countermeasures that mitigate the amplifying effect this attacker type can achieve.

Replay Attack

In the scope we defined above, Waku v2 is resilient against replay attacks. +GossipSub nodes, and by extension Waku relay nodes, feature a seen cache, and only relay messages they have not seen before. +Further, replay attacks will be punished by RLN and SWAP.

Neighbourhood Surveillance

This attack can be performed by a single node attacker that is connected to all peers of the victim node vv with respect to a specific topic mesh. +The attacker also has to be connected to vv. +In this position, the attacker will receive messages mvm_v sent by vv both on the direct path from vv, and on indirect paths relayed by peers of vv. +It will also receive messages mxm_x that are not sent by vv. These messages mxm_x are relayed by both vv and the peers of vv. +Messages that are received (significantly) faster from vv than from any other of vv's peers are very likely messages that vv sent, +because for these messages the attacker is one hop closer to the source.

The attacker can (periodically) measure latency between itself and vv, and between itself and the peers of vv to get more accurate estimates for the expected timings. +An AS attacker (and if the topology allows, even a local attacker) could also learn the latency between vv and its well-behaving peers. +An active AS attacker could also increase the latency between vv and its peers to make the timing differences more prominent. +This, however, might lead to vv switching to other peers.

This attack cannot (reliably) distinguish messages mvm_v sent by vv from messages mym_y relayed by peers of vv the attacker is not connected to. +Still, there are hop-count variations that might be leveraged. +Messages mvm_v always have a hop-count of 1 on the path from vv to the attacker, while all other paths are longer. +Messages mym_y might have the same hop-count on the path from vv as well as on other paths.

Controlled Neighbourhood

If a multi node attacker manages to control all peers of the victim node, it can trivially tell which messages originated from vv.

Observing Messages

If Waku relay was not protected with Noise, the AS attacker could simply check for messages leaving vv which have not been relayed to vv. +These are the messages sent by vv. +Waku relay protects against this attack by employing secure channels setup using Noise.

Correlation

Monitoring all traffic (in an AS or globally), allows the attacker to identify traffic correlated with messages originating from vv. +This (alone) does not allow an external attacker to learn which message vv sent, but it allows identifying the respective traffic propagating through the network. +The more traffic in the network, the lower the success rate of this attack.

Combined with just a few nodes controlled by the attacker, the actual message associated with the correlated traffic can eventually be identified.

DoS

An active single node attacker could run a disruption attack by

  • (1) dropping messages that should be relayed
  • (2) flooding neighbours with bogus messages

While (1) has a negative effect on availability, the impact is not significant. +A linearly scaling botnet attacker, however, could significantly disrupt the network with such an attack. +(2) is thwarted by RLN. +Also SWAP helps mitigating DoS attacks.

A local attacker can DoS Waku by dropping all Waku traffic within its controlled network segment. +An AS attacker can DoS Waku within its authority, while a global attacker can DoS the whole network. +A countermeasure are censorship resistance techniques like Pluggable Transports.

Summary and Future Work

Currently, Waku v2 relay offers k-anonymity with respect to receiver anonymity. +This also includes k-anonymity towards legitimate members of the same topic.

Waku v2 relay offers sender anonymity in the single node attacker model with its strict no sign policy. +Currently, Waku v2 does not guarantee sender anonymity in the multi node and stronger attacker models. +However, we are working on modular anonymity-preserving protocols and building blocks as part of our privacy/anonymity roadmap. +The goal is to allow tunable anonymity with respect to trade offs between strong anonymity, low bandwidth, and low latency. +All of these cannot be fully guaranteed as the the anonymity trilemma states. +Some applications have specific requirements, e.g. low latency, which require a compromise on anonymity. +Anonymity-preserving mechanisms we plan to investigate and eventually specify as pluggable anonymity protocols for Waku comprise

  • Dandelion++ for lightweight anonymity;
  • onion routing as a building block adding a low latency anonymization layer;
  • a mix network for providing strong anonymity (on top of onion routing) even in the strongest attacker model at the cost of higher latency.

These pluggable anonymity-preserving protocols will form a sub-set of the Waku v2 protocol set. +As an intermediate step, we might directly employ Tor for onion-routing, and Nym as a mix-net layer.

In future research log posts, we will cover further Waku v2 protocols and identify anonymity problems that will be added to our roadmap. +These protocols comprise

  • 13/WAKU2-STORE, which can violate receiver anonymity as it allows filtering by content topic. +A countermeasure is using the content topic exclusively for local filters.
  • 12/WAKU2-FILTER, which discloses nodes' interest in topics;
  • 19/WAKU2-LIGHTPUSH, which also discloses nodes' interest in topics and links the lightpush client as the sender of a message to the lightpush service node;
  • 21/WAKU2-FTSTORE, which discloses nodes' interest in specific time ranges allowing to infer information like online times.

While these protocols are not necessary for the operation of Waku v2, and can be seen as pluggable features, +we aim to provide alternatives without the cost of lowering the anonymity level.

References

]]>
+ + Daniel + +
+ + <![CDATA[Noise handshakes as key-exchange mechanism for Waku]]> + https://vac.dev/rlog/wakuv2-noise + + 2022-05-17T10:00:00.000Z + + We provide an overview of the Noise Protocol Framework as a tool to design efficient and secure key-exchange mechanisms in Waku2.

Introduction

In this post we will provide an overview of how Waku v2 users can adopt Noise handshakes to agree on cryptographic keys used to securely encrypt messages.

This process belongs to the class of key-exchange mechanisms, consisting of all those protocols that, with different levels of complexity and security guarantees, allow two parties to publicly agree on a secret without letting anyone else know what this secret is.

But why do we need key-exchange mechanisms in the first place?

With the advent of public-key cryptography, it become possible to decouple encryption from decryption through use of two distinct cryptographic keys: one public, used to encrypt information and that can be made available to anyone, and one private (kept secret), which enables decryption of messages encrypted with its corresponding public key. The same does not happen in the case of symmetric encryption schemes where, instead, the same key is used for both encryption and decryption operations and hence cannot be publicly revealed as for public keys.

In order to address specific application needs, many different public, symmetric and hybrid cryptographic schemes were designed: Waku v1 and Waku v2, which inherits part of their design from the Ethereum messaging protocol Whisper, provide support to both public-key primitives (ECIES, ECDSA) and symmetric primitives (AES-256-GCM, KECCAK-256), used to sign, hash, encrypt and decrypt exchanged messages.

In principle, when communications employ public-key based encryption schemes (ECIES, in the case of Waku), there is no need for a key-agreement among parties: messages can be directly encrypted using the recipient's public-key before being sent over the network. However, public-key encryption and decryption primitives are usually very inefficient in processing large amount of data, and this may constitute a bottleneck for many of today's applications. Symmetric encryption schemes such as AES-256-GCM, on the other hand, are much more efficient, but the encryption/decryption key needs to be shared among users beforehand any encrypted messages is exchanged.

To counter the downsides given by each of these two approaches while taking advantage of their strengths, hybrid constructions were designed. In these, public-key primitives are employed to securely agree on a secret key which, in turn, is used with a symmetric cipher for encrypting messages. In other words, such constructions specify a (public-key based) key-agreement mechanism!

Waku, up to payload version 1, does not implement nor recommend any protocol for exchanging symmetric ciphers' keys, leaving such task to the application layer. It is important to note that the kind of key-agreement employed has a direct impact on the security properties that can be granted on later encrypted messages, while security requirements usually depend on the specific application for which encryption is needed in the first place.

In this regard, Status, which builds on top of Waku, implements a custom version of the X3DH key-agreement protocol, in order to allow users to instantiate end-to-end encrypted communication channels. However, although such a solution is optimal when applied to (distributed) E2E encrypted chats, it is not flexible enough to fit or simplify the variety of applications Waku aims to address. +Hence, proposing and implementing one or few key-agreements which provide certain (presumably strong) security guarantees, would inevitably degrade performances of all those applications for which, given their security requirements, more tailored and efficient key-exchange mechanisms can be employed.

Guided by different examples, in the following sections we will overview Noise, a protocol framework we are currently integrating in Waku, for building secure key-agreements between two parties. One of the great advantage of using Noise is that it is possible to add support to new key-exchanges by just specifying users' actions from a predefined list, requiring none to minimal modifications to existing implementations. Furthermore, Noise provides a framework to systematically analyze protocols' security properties and the corresponding attacker threat models. This allows not only to easily design new key-agreements eventually optimized for specific applications we want to address, but also to easily analyze or even formally verify any of such custom protocol!

We believe that with its enormous flexibility and features, Noise represents a perfect candidate for bringing key-exchange mechanisms in Waku.

The Diffie-Hellman Key-exchange

The formalization of modern public-key cryptography started with the pioneering work of Whitefield Diffie and Martin Hellman, who detailed one of the earliest known key-agreement protocols: the famous Diffie-Hellman Key-Exchange.

Diffie-Hellman (DH) key-exchange is largely used today and represents the main cryptographic building block on which Noise handshakes' security is based.

In turn, the security of DH is based on a mathematical problem called discrete logarithm which is believed to be hard when the agreement is practically instantiated using certain elliptic curves EE defined over finite fields Fp\mathbb{F}_p.

Informally, a DH exchange between Alice and Bob proceeds as follows:

  • Alice picks a secret scalar sAFps_A\in\mathbb{F}_p and computes, using the underlying curve's arithmetic, the point PA=sAPE(Fp)P_A = s_A\cdot P\in E(\mathbb{F}_p) for a certain pre-agreed public generator PP of the elliptic curve E(Fp)E(\mathbb{F}_p). She then sends PAP_A to Bob.
  • Similarly, Bob picks a secret scalar sBFps_B\in\mathbb{F}_p, computes PB=sBPE(Fp)P_B = s_B\cdot P\in E(\mathbb{F}_p) and sends PBP_B to Alice.
  • By commutativity of scalar multiplication, both Alice and Bob can now compute the point PAB=sAsBPP_{AB} = s_As_B\cdot P, using the elliptic curve point received from the other party and their secret scalar.

The assumed hardness of computing discrete logarithms in the elliptic curve, ensures that it is not possible to compute sAs_A or sBs_B from PAP_A and PBP_B, respectively. Another security assumption (named Computational Diffie-Hellman assumption) ensures that it is not possible to compute PABP_{AB} from PP, PAP_A and PBP_B. Hence the point PABP_{AB} shared by Alice and Bob at the end of the above protocol cannot be efficiently computed by an attacker intercepting PAP_A and PBP_B, and can then be used to generate a secret to be later employed, for example, as a symmetric encryption key.

On a side note, this protocol shows the interplay between two components typical to public-key based schemes: the scalars sAs_A and sBs_B can be seen as private keys associated to the public keys PAP_A and PBP_B, respectively, which allow Alice and Bob only to compute the shared secret point PABP_{AB}.

Ephemeral and Static Public Keys

Although we assumed that it is practically impossible for an attacker to compute the randomly picked secret scalar from the corresponding public elliptic curve point, it may happen that such scalar gets compromised or can be guessed due to a faulty employed random number generator. In such cases, an attacker will be able to recover the final shared secret and all encryption keys eventually derived from that, with clear catastrophic consequences for the privacy of exchanged messages.

To mitigate such issues, multiple DH operations can be combined using two different types of exchanged elliptic curve points or, better, public keys: ephemeral keys, that is random keys used only once in a DH operation, and long-term static keys, used mainly for authentication purposes since employed multiple times.

Just to provide an example, let us suppose Alice and Bob perform the following custom DH-based key-exchange protocol:

  • Alice generates an ephemeral key EA=eAPE_A=e_A\cdot P by picking a random scalar eAe_A and sends EAE_A to Bob;
  • Similarly, Bob generates an ephemeral key EB=eBPE_B=e_B\cdot P and sends EBE_B to Alice;
  • Alice and Bob computes EAB=eAeBPE_{AB} = e_Ae_B \cdot P and from it derive a secret encryption key kk.
  • Bob sends to Alice his static key SB=sBPS_B = s_B\cdot P encrypted with kk.
  • Alice encrypts with kk her static key SA=sAPS_A = s_A\cdot P and sends it to Bob.
  • Alice and Bob decrypt the received static keys, compute the secret SAB=sAsBPS_{AB} = s_As_B \cdot P and use it together with EABE_{AB} to derive a new encryption key k~\tilde{k} to be later used with a symmetric cipher.

In this protocol, if Alice's and/or Bob's static keys get compromised, it would not possible to derive the final secret key k~\tilde{k}, since at least one ephemeral key among EAE_A and EBE_B has to be compromised too in order to recover the secret EABE_{AB}. Furthermore, since Alice's and Bob's long-term static keys are encrypted, an attacker intercepting exchanged (encrypted) public keys will not be able to link such communication to Alice or Bob, unless one of the ephemeral key is compromised (and, even in such case, none of the messages encrypted under the key k~\tilde{k} can be decrypted).

The Noise Protocol Framework

In previous section we gave a small intuition on how multiple DH operations over ephemeral and static users' public keys can be combined to create different key-exchange protocols.

The Noise Protocol Framework, defines various rules for building custom key-exchange protocols while allowing easy analysis of the security properties and threat models provided given the type and order of the DH operations employed.

In Noise terminology, a key-agreement or Noise protocol consists of one or more Noise handshakes. During a Noise handshake, Alice and Bob exchange multiple (handshake) messages containing their ephemeral keys and/or static keys. These public keys are then used to perform a handshake-dependent sequence of Diffie-Hellman operations, whose results are all hashed into a shared secret key. Similarly as we have seen above, after a handshake is complete, each party will use the derived secret key to send and receive authenticated encrypted data by employing a symmetric cipher.

Depending on the handshake pattern adopted, different security guarantees can be provided on messages encrypted using a handshake-derived key.

The Noise handshakes we support in Waku all provide the following security properties:

  • Confidentiality: the adversary should not be able to learn what data is being sent between Alice and Bob.
  • Strong forward secrecy: an active adversary cannot decrypt messages nor infer any information on the employed encryption key, even in the case he has access to Alice's and Bob's long-term private keys (during or after their communication).
  • Authenticity: the adversary should not be able to cause either Alice or Bob to accept messages coming from a party different than their original senders.
  • Integrity: the adversary should not be able to cause Alice or Bob to accept data that has been tampered with.
  • Identity-hiding: once a secure communication channel is established, a passive adversary should not be able to link exchanged encrypted messages to their corresponding sender and recipient by knowing their long-term static keys.

We refer to Noise specification for more formal security definitions and precise threat models relative to Waku supported Noise Handshake patterns.

Message patterns

Noise handshakes involving DH operations over ephemeral and static keys can be succinctly sketched using the following set of handshake message tokens: e,s,ee,se,es,ss.

Tokens employing single letters denote (the type of) users' public keys: e refers to randomly generated ephemeral key(s), while s indicates the users' long-term static key(s).

Two letters tokens, instead, denotes DH operations over the two users' public keys the token refers to, given that the left token letter refers to the handshake initiator's public key, while the right token letter indicates the used responder's public key. Thus, if Alice started a handshake with Bob, the es token will shortly represent a DH operation among Alice's ephemeral key e and Bob's static key s.

Since, in order to perform any DH operations users need to share (or pre-share) the corresponding public keys, Noise compactly represents messages' exchanges using the two direction -> and <-, where the -> denotes a message (arbitrary and/or DH public key) from the initiator to the responder, while <- the opposite.

Hence a message pattern consisting of a direction and one or multiple tokens such as <- e, s, es has to be interpreted one token at a time: in this example, the responder is sending his ephemeral and static key to the initiator and is then executing a DH operation over the initiator's ephemeral key e (shared in a previously exchanged message pattern) and his static key s. On the other hand, such message indicates also that the initiator received the responder's ephemeral and static keys e and s, respectively, and performed a DH operation over his ephemeral key and the responder's just received static key s. In this way, both parties will be able to derive at the end of each message pattern processed the same shared secret, which is eventually used to update any derived symmetric encryption keys computed so far.

In some cases, DH public keys employed in a handshake are pre-shared before the handshake itself starts. In order to chronologically separate exchanged keys and DH operations performed before and during a handshake, Noise employs the ... delimiter.

For example, the following message patterns

<- e
...
-> e, ee

indicates that the initiator knew the responder's ephemeral key before he sends his own ephemeral key and executes a DH operation between both parties ephemeral keys (similarly, the responder receives the initiator's ephemeral key and does a ee DH operation).

At this point it should be clear how such notation is able to compactly represent a large variety of DH based key-agreements. Nevertheless, we can easily define additional tokens and processing rules in order to address specific applications and security requirements, such as the psk token used to process arbitrary pre-shared key material.

As an example of Noise flexibility, the custom protocol we detailed above can be shortly represented as (Alice is on the left):

-> e
<- e, ee, s
-> s, ss

where after each DH operation an encryption key is derived (along with the secrets computed by all previously executed DH operations) in order to encrypt/decrypt any subsequent sent/received message.

Another example is given by the possibility to replicate within Noise the well established Signal's X3DH key-agreement protocols, thus making the latter a general framework to design and study security of many practical and widespread DH-based key-exchange protocols.

The Noise State Objects

We mentioned multiple times that parties derive an encryption key each time they perform a DH operation, but how does this work in more details?

Noise defines three state object: a Handshake State, a Symmetric State and a Cipher State, each encapsulated into each other and instantiated during the execution of a handshake.

The Handshake State object stores the user's and other party's received ephemeral and static keys (if any) and embeds a Symmetric State object.

The Symmetric State, instead, stores a handshake hash value h, iteratively updated with any message read/received and DH secret computed, and a chaining key ck, updated using a key derivation function every time a DH secret is computed. This object further embeds a Cipher State.

Lastly, the Cipher State stores a symmetric encryption k key and a counter n used to encrypt and decrypt messages exchanged during the handshake (not only static keys, but also arbitrary payloads). These key and counter are refreshed every time the chaining key is updated.

While processing each handshake's message pattern token, all these objects are updated according to some specific processing rules which employ a combination of public-key primitives, hash and key-derivation functions and symmetric ciphers. It is important to note, however, that at the end of each processed message pattern, the two users will share the same Symmetric and Cipher State embedded in their respective Handshake States.

Once a handshake is complete, users derive two new Cipher States and can then discard the Handshake State object (and, thus, the embedded Symmetric State and Cipher State objects) +employed during the handshake.

These two Cipher states are used to encrypt and decrypt all outbound and inbound after-handshake messages, respectively, and only to these will be granted the confidentiality, authenticity, integrity and identity-hiding properties we detailed above.

For more details on processing rules, we refer to Noise specifications.

Supported Noise Handshakes in Waku

The Noise handshakes we provided support to in Waku address four typical scenarios occurring when an encrypted communication channel between Alice and Bob is going to be created:

  • Alice and Bob know each others' static key.
  • Alice knows Bob's static key;
  • Alice and Bob share no key material and they don't know each others' static key.
  • Alice and Bob share some key material, but they don't know each others' static key.

The possibility to have handshakes based on the reciprocal knowledge parties have of each other, allows designing Noise handshakes that can quickly reach the desired level of security on exchanged encrypted messages while keeping the number of interactions between Alice and Bob minimum.

Nonetheless, due to the pure token-based nature of handshake processing rules, implementations can easily add support to any custom handshake pattern with minor modifications, in case more specific application use-cases need to be addressed.

On a side note, we already mentioned that identity-hiding properties can be guaranteed against a passive attacker that only reads the communication occurring between Alice and Bob. However, an active attacker who compromised one party's static key and actively interferes with the parties' exchanged messages, may lower the identity-hiding security guarantees provided by some handshake patterns. In our security model we exclude such adversary, but, for completeness, in the following we report a summary of possible de-anonymization attacks that can be performed by such an active attacker.

For more details on supported handshakes and on how these are implemented in Waku, we refer to 35/WAKU2-NOISE RFC.

The K1K1 Handshake

If Alice and Bob know each others' static key (e.g., these are public or were already exchanged in a previous handshake) , they MAY execute a K1K1 handshake. In Noise notation (Alice is on the left) this can be sketched as:

 K1K1:
-> s
<- s
...
-> e
<- e, ee, es
-> se

We note that here only ephemeral keys are exchanged. This handshake is useful in case Alice needs to instantiate a new separate encrypted communication channel with Bob, e.g. opening multiple parallel connections, file transfers, etc.

Security considerations on identity-hiding (active attacker): no static key is transmitted, but an active attacker impersonating Alice can check candidates for Bob's static key.

The XK1 Handshake

Here, Alice knows how to initiate a communication with Bob and she knows his public static key: such discovery can be achieved, for example, through a publicly accessible register of users' static keys, smart contracts, or through a previous public/private advertisement of Bob's static key.

A Noise handshake pattern that suits this scenario is XK1:

 XK1:
<- s
...
-> e
<- e, ee, es
-> s, se

Within this handshake, Alice and Bob reciprocally authenticate their static keys s using ephemeral keys e. We note that while Bob's static key is assumed to be known to Alice (and hence is not transmitted), Alice's static key is sent to Bob encrypted with a key derived from both parties ephemeral keys and Bob's static key.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party. An active attacker initiating the handshake can check candidates for Bob's static key against recorded/accepted exchanged handshake messages.

The XX and XXpsk0 Handshakes

If Alice is not aware of any static key belonging to Bob (and neither Bob knows anything about Alice), she can execute an XX handshake, where each party tranXmits to the other its own static key.

The handshake goes as follows:

 XX:
-> e
<- e, ee, s, es
-> s, se

We note that the main difference with XK1 is that in second step Bob sends to Alice his own static key encrypted with a key obtained from an ephemeral-ephemeral Diffie-Hellman exchange.

This handshake can be slightly changed in case both Alice and Bob pre-shares some secret psk which can be used to strengthen their mutual authentication during the handshake execution. One of the resulting protocol, called XXpsk0, goes as follow:

 XXpsk0:
-> psk, e
<- e, ee, s, es
-> s, se

The main difference with XX is that Alice's and Bob's static keys, when transmitted, would be encrypted with a key derived from psk as well.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party for both XX and XXpsk0 handshakes. In XX, Bob's static key is encrypted with forward secrecy but is transmitted to a non-authenticated user which can then be an active attacker. In XXpsk0, instead, Bob's secret key is protected by forward secrecy to a partially authenticated party (through the pre-shared secret psk but not through any static key), provided that psk was not previously compromised (in such case identity-hiding properties provided by the XX handshake applies).

Session Management and Multi-Device Support

When two users complete a Noise handshake, an encryption/decryption session - or Noise session - consisting of two Cipher States is instantiated.

By identifying Noise session with a session-id derived from the handshake's cryptographic material, we can take advantage of the PubSub/GossipSub protocols used by Waku for relaying messages in order to manage instantiated Noise sessions.

The core idea is to exchange after-handshake messages (encrypted with a Cipher State specific to the Noise session), over a content topic derived from the (secret) session-id the corresponding session refers to.

This allows to decouple the handshaking phase from the actual encrypted communication, thus improving users' identity-hiding capabilities.

Furthermore, by publicly revealing a value derived from session-id on the corresponding session content topic, a Noise session can be marked as stale, enabling peers to save resources by discarding any eventually stored message sent to such content topic.

One relevant aspect in today's applications is the possibility for users to employ different devices in their communications. In some cases, this is non-trivial to achieve since, for example, encrypted messages might be required to be synced on different devices which do not necessarily share the necessary key material for decryption and may be temporarily offline.

We address this by requiring each user's device to instantiate multiple Noise sessions either with all user's other devices which, in turn, all together share a Noise session with the other party, or by directly instantiating a Noise session with all other party's devices.

We named these two approaches N11MN11M and NMNM, respectively, which are in turn loosely based on the paper “Multi-Device for Signal” and Signal’s Sesame Algorithm.

Informally, in the N11MN11M session management scheme, once the first Noise session between any of Alice’s and Bob’s device is instantiated, its session information is securely propagated to all other devices using previously instantiated Noise sessions. Hence, all devices are able to send and receive new messages on the content topic associated to such session.

In the NMNM session management scheme, instead, all pairs of Alice's and Bob's devices have a distinct Noise session: a message is then sent from the currently-in-use sender’s device to all recipient’s devices, by properly encrypting and sending it to the content topics of each corresponding Noise session. If sent messages should be available on all sender’s devices as well, we require each pair of sender’s devices to instantiate a Noise session used for syncing purposes.

For more technical details on how Noise sessions are instantiated and managed within these two mechanisms and the different trade-offs provided by the latter, we refer to 37/WAKU2-NOISE-SESSIONS.

Conclusions

In this post we provided an overview of Noise, a protocol framework for designing Diffie-Hellman based key-exchange mechanisms allowing systematic security and threat model analysis.

The flexibility provided by Noise components allows not only to fully replicate with same security guarantees well established key-exchange primitives such as X3DH, currently employed by Status 5/TRANSPORT-SECURITY, but enables also optimizations based on the reciprocal knowledge parties have of each other while allowing easier protocols' security analysis and (formal) verification.

Furthermore, different handshakes can be combined and executed one after each other, a particularly useful feature to authenticate multiple static keys employed by different applications but also to ease keys revocation.

The possibility to manage Noise sessions over multiple devices and the fact that handshakes can be concretely instantiated using modern, fast and secure cryptographic primitives such as ChaChaPoly and BLAKE2b, make Noise one of the best candidates for efficiently and securely address the many different needs of applications built on top of Waku requiring key-agreement.

Future steps

The available implementation of Noise in nwaku, although mostly complete, is still in its testing phase. As future steps we would like to:

  • have an extensively tested and robust Noise implementation;
  • formalize, implement and test performances of the two proposed N11MN11M and NMNM session management mechanisms and their suitability for common use-case scenarios;
  • provide Waku network nodes a native protocol to readily support key-exchanges, strongly-encrypted communication and multi-device session management mechanisms with none-to-little interaction besides applications' connection requests.

References

]]>
+ + s1fr0 + +
+ + <![CDATA[Waku v2 Ambient Peer Discovery]]> + https://vac.dev/rlog/wakuv2-apd + + 2022-05-09T10:00:00.000Z + + Introducing and discussing ambient peer discovery methods currently used by Waku v2, as well as future plans in this area.

Waku v2 comprises a set of modular protocols for secure, privacy preserving communication. +Avoiding centralization, these protocols exchange messages over a P2P network layer. +In order to build a P2P network, participating nodes first have to discover peers within this network. +This is where ambient peer discovery comes into play: +it allows nodes to find peers, making it an integral part of any decentralized application.

In this post the term node to refers to our endpoint or the endpoint that takes action, +while the term peer refers to other endpoints in the P2P network. +These endpoints can be any device connected to the Internet: e.g. servers, PCs, notebooks, mobile devices, or applications like a browser. +As such, nodes and peers are the same. We use these terms for the ease of explanation without loss of generality.

In Waku's modular design, ambient peer discovery is an umbrella term for mechanisms that allow nodes to find peers. +Various ambient peer discovery mechanisms are supported, and each is specified as a separate protocol. +Where do these protocols fit into Waku's protocol stack? +The P2P layer of Waku v2 builds on libp2p gossipsub. +Nodes participating in a gossipsub protocol manage a mesh network that is used for routing messages. +This mesh network is an unstructured P2P network +offering high robustness and resilience against attacks. +Gossipsub implements many improvements overcoming the shortcomings typically associated with unstructured P2P networks, e.g. inefficient flooding based routing. +The gossipsub mesh network is managed in a decentralized way, which requires each node to know other participating peers. +Waku v2 may use any combination of its ambient discovery protocols to find appropriate peers.

Summarizing, Waku v2 comprises a peer management layer based on libp2p gossipsub, +which manages the peers of nodes, and an ambient peer discovery layer, +which provides information about peers to the peer management layer.

We focus on ambient peer discovery methods that are in line with our goal of building a fully decentralized, generalized, privacy-preserving and censorship-resistant messaging protocol. +Some of these protocols still need adjustments to adhere to our privacy and anonymity requirements. For now, we focus on operational stability and feasibility. +However, when choosing techniques, we pay attention to selecting mechanisms that can feasibly be tweaked for privacy in future research efforts. +Because of the modular design and the fact that Waku v2 has several discovery methods at its disposal, we could even remove a protocol in case future evaluation deems it not fitting our standards.

This post covers the current state and future considerations of ambient peer discovery for Waku v2, +and gives reason for changes and modifications we made or plan to make. +The ambient peer discovery protocols currently supported by Waku v2 are a modified version of Ethereum's Discovery v5 +and DNS-based discovery. +Waku v2 further supports gossipsub's peer exchange protocol. +In addition, we plan to introduce protocols for general peer exchange and capability discovery, respectively. +The former allows resource restricted nodes to outsource querying for peers to stronger peers, +the latter allows querying peers for their supported capabilities. +Besides these new protocols, we are working on integrating capability discovery in our existing ambient peer discovery protocols.

Static Node Lists

The simplest method of learning about peers in a P2P network is via static node lists. +These can be given to nodes as start-up parameters or listed in a config-file. +They can also be provided in a script-parseable format, e.g. in JSON. +While this method of providing bootstrap nodes is very easy to implement, it requires static peers, which introduce centralized elements. +Also, updating static peer information introduces significant administrative overhead: +code and/or config files have to be updated and released. +Typically, static node lists only hold a small number of bootstrap nodes, which may lead to high load on these nodes.

DNS-based Discovery

Compared to static node lists, +DNS-based discovery (specified in EIP-1459) +provides a more dynamic way of discovering bootstrap nodes. +It is very efficient, can easily be handled by resource restricted devices and provides very good availability. +In addition to a naive DNS approach, Ethereum's DNS-based discovery introduces efficient authentication leveraging Merkle trees.

A further advantage over static node lists is the separation of code/release management and bootstrap node management. +However, changing and updating the list of bootstrap nodes still requires administrative privileges because DNS records have to be added or updated.

While this method of discovery still requires centralized elements, +node list management can be delegated to various DNS zones managed by other entities mitigating centralization.

Discovery V5

A much more dynamic method of ambient peer discovery is Discovery v5, which is Ethereum's peer discovery protocol. +It is based on the Kademlia distributed hashtable (DHT). +An introduction to discv5 and its history, and a discv5 Waku v2 feasibility study +can be found in previous posts on this research log.

We use Discovery v5 as an ambient peer discovery method for Waku v2 because it is decentralized, efficient, actively researched, and has web3 as its main application area. +Discv5 also offers mitigation techniques for various attacks, which we cover later in this post.

Using a DHT (structured P2P network) as a means for ambient peer discovery, while using the gossipsub mesh network (unstructured P2P network) for transmitting actual messages, +Waku v2 leverages advantages from both worlds. +One of the main benefits of DHTs is offering a global view over participating nodes. +This, in turn, allows sampling random sets of nodes which is important for equally distributing load. +Gossipsub, on the other hand, offers great robustness and resilience against attacks. +Even if discv5 discovery should not work in advent of a DoS attack, Waku v2 can still operate switching to different discovery methods.

Discovery methods that use separate P2P networks still depend on bootstrapping, +which Waku v2 does via parameters on start-up or via DNS-based discovery. +This might raise the question of why such discovery methods are beneficial. +The answer lies in the aforementioned global view of DHTs. Without discv5 and similar methods, the bootstrap nodes are used as part of the gossipsub mesh. +This might put heavy load on these nodes and further, might open pathways to inference attacks. +Discv5, on the other hand, uses the bootstrap nodes merely as an entry to the discovery network and can provide random sets of nodes (sampled from a global view) +for bootstrapping or expanding the mesh.

DHT Background

Distributed Hash Tables are a class of structured P2P overlay networks. +A DHT can be seen as a distributed node set of which each node is responsible for a part of the hash space. +In contrast to unstructured P2P networks, e.g. the mesh network maintained by gossipsub, +DHTs have a global view over the node set and the hash space (assuming the participating nodes behave well).

DHTs are susceptible to various kinds of attacks, especially Sybil attacks +and eclipse attacks. +While security aspects have been addressed in various research papers, general practical solutions are not available. +However, discv5 introduced various practical mitigation techniques.

Random Walk Discovery

While discv5 is based on the Kademlia DHT, it only uses the distributed node set aspect of DHTs. +It does not map values (items) into the distributed hash space. +This makes sense, because the main purpose of discv5 is discovering other nodes that support discv5, which are expected to be Ethereum nodes. +Ethereum nodes that want to discover other Ethereum nodes simply query the discv5 network for a random set of peers. +If Waku v2 would do the same, only a small subset of the retrieved nodes would support Waku v2.

A first naive solution for Waku v2 discv5 discovery is

  • retrieve a random node set, which is achieved by querying for a set of randomly chosen node IDs
  • filter the returned nodes on the query path based on Waku v2 capability via the Waku v2 ENR
  • repeat until enough Waku v2 capable nodes are found

This query process boils down to random walk discovery, which is very resilient against attacks, but also very inefficient if the number of nodes supporting the desired capability is small. +We refer to this as the needle-in-the-haystack problem.

Random Walk Performance Estimation

This subsection provides a rough estimation of the overhead introduced by random walk discovery.

Given the following parameters:

  • nn number of total nodes participating in discv5
  • pp percentage of nodes supporting Waku
  • WW the event of having at least one Waku node in a random sample
  • kk the size of a random sample (default = 16)
  • α\alpha the number of parallel queries started
  • bb bits per hop
  • qq the number of queries

A query takes log2bnlog_{2^b}n hops to retrieve a random sample of nodes.

P(W)=1(1p/100)kP(W) = 1 - (1-p/100)^k is the probability of having at least one Waku node in the sample.

P(Wq)=1(1p/100)kqP(W^q) = 1 - (1-p/100)^{kq} is the probability of having at least one Waku node in the union of qq samples.

Expressing this in terms of qq, we can write: +P(Wq)=1(1p/100)kq    q=log(1p/100)k(1P(Wq))P(W^q) = 1 - (1-p/100)^{kq} \iff q = log_{(1-p/100)^k}(1-P(W^q))

Figure 1 shows a log-log plot for P(Wq)=90%P(W^q) = 90\%.

Figure 1: log-log plot showing the number of queries necessary to retrieve a Waku v2 node with a probability of 90% in relation to the Waku v2 node concentration in the network.

Assuming p=0.1p=0.1, we would need

0.9=1(10.1/100)16q=>q1440.9 = 1 - (1-0.1/100)^{16q} => q \approx 144

queries to get a Waku node with 90% probability, which leads to 14418=2592\approx 144 * 18 = 2592 overlay hops. +Choosing b=3b=3 would reduce the number to 1446=864\approx 144 * 6 = 864. +Even when choosing α=10\alpha = 10 we would have to wait at least 80 RTTs. +This effort is just for retrieving a single Waku node. Ideally, we want at least 3 Waku nodes for bootstrapping a Waku relay.

The discv5 doc roughly estimates p=1p=1% to be the threshold for acceptably efficient random walk discovery. +This is in line with our estimation:

0.9=1(11/100)16q=>q140.9 = 1 - (1-1/100)^{16q} => q \approx 14

The number of necessary queries is linearly dependent on the percentage pp of Waku nodes. +The number of hops per query is logarithmically dependent on nn. +Thus, random walk searching is inefficient for small percentages pp. +Still, random walks are more resilient against attacks.

We can conclude that a Waku node concentration below 1% renders vanilla discv5 unfit for our needs. +Our current solution and future plans for solving this issue are covered in the next subsections.

Simple Solution: Separate Discovery Network

The simple solution we currently use for Waku v2 discv5 is a separate discv5 network. +All (well behaving) nodes in this network support Waku v2, resulting in a very high query efficiency. +However, this solution reduces resilience because the difficulty of attacking a DHT scales with the number of participating nodes.

Discv5 Topic Discovery

We did not base our solution on the current version of discv5 topic discovery, +because, similar to random walk discovery, it suffers from poor performance for relatively rare capabilities/topics.

However, there is ongoing research in discv5 topic discovery which is close to ideas we explored when pondering efficient and resilient Waku discv5 solutions. +We keep a close eye on this research, give feedback, and make suggestions, as we plan to switch to this version of topic discovery in the future.

In a nutshell, topic discovery will manage separate routing tables for each topic. +These topic specific tables are initialized with nodes from the discv5 routing table. +While the buckets of the discv5 routing table represent distance intervals from the node's node ID, the topic table buckets represent distance intervals from topic IDs.

Nodes that want to register a topic try to register that topic at one random peer per bucket. +This leads to registering the topic at peers in closer and closer neighbourhoods around the topic ID, which +yields a very efficient and resilient compromise between random walk discovery and DHT discovery. +Peers in larger neighbourhoods around the topic ID are less efficient to discover, however more resilient against eclipse attacks and vice versa.

Further, this works well with the overload and DoS protection discv5 employs. +Discv5 limits the amount of nodes registered per topic on a single peer. Further, discv5 enforces a waiting time before nodes can register topics at peers. +So, for popular topics, a node might fail to register the topic in a close neighbourhood. +However, because the topic is popular (has a high occurrence percentage pp), it can still be efficiently discovered.

In the future, we also plan to integrate Waku v2 capability discovery, which will not only allow asking for nodes that support Waku v2, +but asking for Waku v2 nodes supporting specific Waku v2 protocols like filter or store. +For the store protocol we envision sub-capabilities reflecting message topics and time frames of messages. +We will also investigate related security implications.

Attacks on DHTs

In this post, we only briefly describe common attacks on DHTs. +These attacks are mainly used for denial of service (DoS), +but can also used as parts of more sophisticated attacks, e.g. deanonymization attacks. +A future post on this research log will cover security aspects of ambient peer discovery with a focus on privacy and anonymity.

Sybil Attack

The power of an attacker in a DHT is proportional to the number of controlled nodes. +Controlling nodes comes at a high resource cost and/or requires controlling a botnet via a preliminary attack.

In a Sybil attack, an attacker generates lots of virtual node identities. +This allows the attacker to control a large portion of the ID space in a DHT at a relatively low cost. +Sybil attacks are especially powerful when the attacker can freely choose the IDs of generated nodes, +because this allows positioning at chosen points in the DHT.

Because Sybil attacks amplify the power of many attacks against DHTs, +making Sybil attacks as difficult as possible is the basis for resilient DHT operation. +The typical abstract mitigation approach is binding node identities to physical network interfaces. +To some extend, this can be achieved by introducing IP address based limits. +Further, generating node IDs can be bound by proof of work (PoW), +which, however, comes with a set of shortcomings, e.g. relatively high costs on resource restricted devices. +The discv5 doc +describes both Sybil and eclipse attacks, as well as concrete mitigation techniques employed by discv5.

Eclipse Attack

In an eclipse attack, nodes controlled by the attacker poison the routing tables of other nodes in a way that parts of the DHT become eclipsed, i.e. invisible. +When a controlled node is asked for the next step in a path, +it provides another controlled node as the next step, +effectively navigating the querying node around or away from certain areas of the DHT. +While several mitigation techniques have been researched, there is no definitive protection against eclipse attacks available as of yet. +One mitigation technique is increasing α\alpha, the number of parallel queries, and following each concurrent path independently for the lookup.

The eclipse attack becomes very powerful in combination with a successful Sybil attack; +especially when the attacker can freely choose the position of the Sybil nodes.

The aforementioned new topic discovery of discv5 provides a good balance between protection against eclipse attacks and query performance.

Peer Exchange Protocol

While discv5 based ambient peer discovery has many desirable properties, resource restricted nodes and nodes behind restrictive NAT setups cannot run discv5 satisfactory. +With these nodes in mind, we started working on a simple peer exchange protocol based on ideas proposed here. +The peer exchange protocol will allow nodes to ask peers for additional peers. +Similar to discv5, the peer exchange protocol will also support capability discovery.

The new peer exchange protocol can be seen as a simple replacement for the Rendezvous protocol, which Waku v2 does not support. +While the rendezvous protocol involves nodes registering at rendezvous peers, the peer exchange protocol simply allows nodes to ask any peer for a list of peers (with a certain set of capabilities). +Rendezvous tends to introduce centralized elements as rendezvous peers have a super-peer role.

In the future, we will investigate resource usage of Waku v2 discv5 and provide suggestions for minimal resources nodes should have to run discv5 satisfactory.

Waku v2 comprises further protocols related to ambient peer discovery. We shortly mention them for context, even though they are not strictly ambient peer discovery protocols.

Gossipsub Peer Exchange Protocol

Gossipsub provides an integrated peer exchange mechanism which is also supported by Waku v2. +Gossipsub peer exchange works in a push manner. Nodes send peer lists to peers they prune from the active mesh. +This pruning is part of the gossipsub peer management, blurring the boundaries of peer management and ambient peer discovery.

We will investigate anonymity implications of this protocol and might disable it in favour of more anonymity-preserving protocols. +Sending a list of peers discloses information about the sending node. +We consider restricting these peer lists to cached peers that are currently not used in the active gossipsub mesh.

Capability Negotiation

Some of the ambient peer discovery methods used by Waku2 will support capability discovery. +This allows to narrow down the set of retrieved peers to peers that support specific capabilities. +This is efficient because it avoids establishing connections to nodes that we are not interested in.

However, the ambient discovery interface does not require capability discovery, which will lead to nodes having peers with unknown capabilities in their peer lists. +We work on a capability negotiation protocol which allows nodes to ask peers

  • for their complete list of capabilities, and
  • whether they support a specific capability

We will investigate security implications, especially when sending full capability lists.

NAT traversal

For NAT traversal, Waku v2 currently supports the port mapping protocols UPnP and NAT-PMP / PCP.

In the future, we plan to add support for parts of ICE, e.g. STUN. +We do not plan to support TURN because TURN relays would introduce a centralized element. +A modified decentralized version of TURN featuring incentivization might be an option in the future; +strong peers could offer a relay service similar to TURN.

There are plans to integrate more NAT traversal into discv5, in which we might participate. +So far, the only traversal technique supported by discv5 is nodes receiving their external IP address in pong messages.

While NAT traversal is very important, adding more NAT traversal techniques is not a priority at the moment. +Nodes behind restrictive symmetric NAT setups cannot be discovered, but they can still discover peers in less restrictive setups. +While we wish to have as many nodes as possible to be discoverable via ambient peer discovery, two nodes behind a restrictive symmetric NAT can still exchange Waku v2 messages if they discovered a shared peer. +This is one of the nice resilience related properties of flooding based routing algorithms.

For mobile nodes, which suffer from changing IP addresses and double NAT setups, we plan using the peer exchange protocol to ask peers for more peers. +Besides saving resources on resource restricted devices, this approach works as long as peers are in less restrictive environments.

Conclusion and Future Prospects

Ambient peer discovery is an integral part of decentralized applications. It allows nodes to learn about peers in the network. +As of yet, Waku v2 supports DNS-based discovery and a slightly modified version of discv5. +We are working on further protocols, including a peer exchange protocol that allows resource restricted nodes to ask stronger peers for peer lists. +Further, we are working on adding capability discovery to our ambient discovery protocols, allowing nodes to find peers with desired properties.

These protocols can be combined in a modular way and allow Waku v2 nodes to build a strong and resilient mesh network, +even if some discovery methods are not available in a given situation.

We will investigate security properties of these discovery mechanisms with a focus on privacy and anonymity in a future post on this research log. +As an outlook we can already state that DHT approaches typically allow inferring information about the querying node. +Further, sending peer lists allows inferring the position of a node within the mesh, and by extension information about the node. +Waku v2 already provides some mitigation, because the mesh for transmitting actual messages, and the peer discovery network are separate. +To mitigate information leakage by transmitting peer lists, we plan to only reply with lists of peers that nodes do not use in their active meshes.


References

]]>
+ + Daniel + +
+ + <![CDATA[Introducing nwaku]]> + https://vac.dev/rlog/introducing-nwaku + + 2022-04-12T10:00:00.000Z + + Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas.

Background

If you've been following our research log, +you'll know that many things have happened in the world of Waku v2 since our last general update. +In line with our long term goals, +we've introduced new protocols, +tweaked our existing protocols +and expanded our team. +We've also shown in a series of practical experiments that Waku v2 does indeed deliver on some of the theoretical advantages it was designed to have over its predecessor, Waku v1. +A sustainability and business workshop led to the formulation of a clearer vision for Vac as a team.

From the beginning, our protocol development has been complemented by various client implementations of these protocols, +first in Nim, +but later also in JavaScript +and Go. +A follow-up post will clarify the purposes, similarities and differences between these three clients. +The Nim client, is our reference implementation, +developed by the research team in parallel with the specs +and building on a home-grown implementation of libp2p. +The Nim client is suitable to run as a standalone adaptive node, +managed by individual operators +or as an encapsulated service node in other applications. +This post looks at some recent developments within the Nim client.

1. nim-waku is now known as nwaku

Pronounced NWHA-koo. +You may already have seen us refer to "nwaku" on Vac communication channels, +but it is now official: +The nim-waku Waku v2 client has been named nwaku. +Why? Well, we needed a recognizable name for our client that could easily be referred to in everyday conversations +and nim-waku just didn't roll off the tongue. +We've followed the example of the closely related nimbus project to find a punchier name +that explicitly links the client to both the Waku set of protocols and the Nim language.

2. Improvements in stability and performance

The initial implementation of Waku v2 demonstrated how the suite of protocols can be applied +to form a generalized, peer-to-peer messaging network, +while addressing a wide range of adaptive requirements. +This allowed us to lift several protocol specifications from raw to draft status, +indicating that a reference implementation exists for each. +However, as internal dogfooding increased and more external applications started using nwaku, +we stepped up our focus on the client's stability and performance. +This is especially true where we want nwaku to run unsupervised in a production environment +without any degradation in the services it provides.

Some of the more significant productionization efforts over the last couple of months included:

  1. Reworking the store implementation to maintain stable memory usage +while storing historical messages +and serving multiple clients querying history simultaneously. +Previously, a store node would see gradual service degradation +due to inefficient memory usage when responding to history queries. +Queries that often took longer than 8 mins now complete in under 100 ms.

  2. Improved peer management. +For example, filter nodes will now remove unreachable clients after a number of connection failures, +whereas they would previously keep accumulating dead peers.

  3. Improved disk usage. +nwaku nodes that persist historical messages on disk now manage their own storage size based on the --store-capacity. +This can significantly improve node start-up times.

More stability issues may be addressed in future as nwaku matures, +but we've noticed a marked improvement in the reliability of running nwaku nodes. +These include environments where nwaku nodes are expected to run with a long uptime. +Vac currently operates two long-running fleets of nwaku nodes, wakuv2.prod and wakuv2.test, +for internal dogfooding and +to serve as experimental bootstrapping nodes. +Status has also recently deployed similar fleets for production and testing based on nwaku. +Our goal is to have nwaku be stable, performant and flexible enough +to be an attractive option for operators to run and maintain their own Waku v2 nodes. +See also the future work section below for more on our general goal of nwaku for operators.

3. Improvements in interoperability

We've implemented several features that improve nwaku's usability in different environments +and its interoperability with other Waku v2 clients. +One major step forward here was adding support for both secure and unsecured WebSocket connections as libp2p transports. +This allows direct connectivity with js-waku +and paves the way for native browser usage. +We've also added support for parsing and resolving DNS-type multiaddrs, +i.e. multiaddress protocol schemes dns, dns4, dns6 and dnsaddr. +A nwaku node can now also be configured with its own IPv4 DNS domain name +allowing dynamic IP address allocation without impacting a node's reachability by its peers.

4. Peer discovery

Peer discovery is the method by which nodes become aware of each other’s existence. +The question of peer discovery in a Waku v2 network has been a focus area since the protocol was first conceptualized. +Since then several different approaches to discovery have been proposed and investigated. +We've implemented three discovery mechanisms in nwaku so far:

DNS-based discovery

nwaku nodes can retrieve an authenticated, updateable list of peers via DNS to bootstrap connection to a Waku v2 network. +Our implementation is based on EIP-1459.

GossipSub peer exchange

GossipSub Peer Exchange (PX) is a GossipSub v1.1 mechanism +whereby a pruning peer may provide a pruned peer with a set of alternative peers +where it can connect to reform its mesh. +This is a very suitable mechanism to gradually discover more peers +from an initial connection to a small set of bootstrap peers. +It is enabled in a nwaku node by default.

Waku Node Discovery Protocol v5

This is a DHT-based discovery mechanism adapted to store and relay node records. +Our implementation is based on Ethereum's Discovery v5 protocol +with some minor modifications to isolate our discovery network from that of Ethereum. +The decision to separate the Waku Discovery v5 network from Ethereum's was made on considerations of lookup efficiency. +This comes at a possible tradeoff in network resilience. +We are considering merging with the Ethereum Discovery v5 network in future, +or even implement a hybrid solution. +This post explains the decision and future steps.

5. Spam protection using RLN

An early addition to our suite of protocols was an extension of 11/WAKU-RELAY +that provided spam protection using Rate Limiting Nullifiers (RLN). +The nwaku client now contains a working demonstration and integration of RLN relay. +Check out this tutorial to see the protocol in action using a toy chat application built on nwaku. +We'd love for people to join us in dogfooding RLN spam protection as part of our operator incentive testnet. +Feel free to join our Vac Discord server +and head to the #rln channel for more information.

Future work

As we continue working towards our goal of a fully decentralized, generalized and censorship-resistant messaging protocol, +these are some of the current and future focus areas for nwaku:

Reaching out to operators:

We are starting to push for operators to run and maintain their own Waku v2 nodes, +preferably contributing to the default Waku v2 network as described by the default pubsub topic (/waku/2/default-waku/proto). +Amongst other things, a large fleet of stable operator-run Waku v2 nodes will help secure the network, +provide valuable services to a variety of applications +and ensure the future sustainability of both Vac as a research organization and the Waku suite of protocols.

We are targeting nwaku as the main option for operator-run nodes.
+Specifically, we aim to provide through nwaku:

  1. a lightweight and robust Waku v2 client. +This client must be first in line to support innovative and new Waku v2 protocols, +but configurable enough to serve the adaptive needs of various operators.
  2. an easy-to-follow guide for operators to configure, +set up and maintain their own nodes
  3. a set of operator-focused tools to monitor and maintain a running node

Better conversational security layer guarantees

Conversational security guarantees in Waku v2 are currently designed around the Status application. +Developers building their own applications on top of Waku would therefore +either have to reimplement a set of tools similar to Status +or build their own security solutions on the application layer above Waku. +We are working on a set of features built into Waku +that will provide the general security properties Waku users may desire +and do so in a modern and simple way. +This is useful for applications outside of Status that want similar security guarantees. +As a first step, we've already made good progress toward integrating noise handshakes as a key exchange mechanism in Waku v2.

Protocol incentivization

We want to design incentivization around our protocols to encourage desired behaviors in the Waku network, +rewarding nodes providing costly services +and punishing adversarial actions. +This will increase the overall security of the network +and encourage operators to run their own Waku nodes. +In turn, the sustainability of Vac as an organization will be better guaranteed. +As such, protocol incentivization was a major focus in our recent Vac Sustainability and Business Workshop. +Our first step here is to finish integrating RLN relay into Waku +with blockchain interaction to manage members, +punish spammers +and reward spam detectors. +After this, we want to design monetary incentivization for providers of store, lightpush and filter services. +This may also tie into a reputation mechanism for service nodes based on a network-wide consensus on service quality. +A big challenge for protocol incentivization is doing it in a private fashion, +so we can keep similar metadata protection guarantees as the Waku base layer. +This ties into our focus on Zero Knowledge tech.

Improved store capacity

The nwaku store currently serves as an efficient in-memory store for historical messages, +dimensioned by the maximum number of messages the store node is willing to keep. +This makes the nwaku store appropriate for keeping history over a short term +without any time-based guarantees, +but with the advantage of providing fast responses to history queries. +Some applications, such as Status, require longer-term historical message storage +with time-based dimensioning +to guarantee that messages will be stored for a specified minimum period. +Because of the relatively high cost of memory compared to disk space, +a higher capacity store, with time guarantees, should operate as a disk-only database of historical messages. +This is an ongoing effort.

Multipurpose discovery

In addition to the three discovery methods already implemented in nwaku, +we are working on improving discovery on at least three fronts:

Capability discovery:

Waku v2 nodes may be interested in peers with specific capabilities, for example:

  1. peers within a specific pubsub topic mesh,
  2. peers with store capability,
  3. store peers with x days of history for a specific content topic, etc.

Capability discovery entails mechanisms by which such capabilities can be advertised and discovered/negotiated. +One major hurdle to overcome is the increased complexity of finding a node with specific capabilities within the larger network (a needle in a haystack). +See the original problem statement for more.

Improvements in Discovery v5

Of the implemented discovery methods, +Discovery v5 best addresses our need for a decentralized and scalable discovery mechanism. +With the basic implementation done, +there are some improvements planned for Discovery v5, +including methods to increase security such as merging with the Ethereum Discovery v5 network, +introducing explicit NAT traversal +and utilizing topic advertisement. +The Waku v2 Discovery v5 Roadmap contains more details.

Generalized peer exchange

nwaku already implements GossipSub peer exchange. +We now need a general request-response mechanism outside of GossipSub +by which a node may learn about other Waku v2 nodes +by requesting and receiving a list of peers from a neighbor. +This could, for example, be a suitable way for resource-restricted devices to request a stronger peer +to perform a random Discovery v5 lookup on their behalf +or simply to be informed of a subset of the peers known to that neighbor. +See this issue for more.


This concludes a general outline of some of the main recent developments in the nwaku client +and a summary of the current and future focus areas. +Much more is happening behind the scenes, of course, +so for more information, or to join the conversation, +feel free to join our Vac Discord server +or to check out the nwaku repo on Github. +You can also view the changelog for past releases here.

References

]]>
+ + Hanno Cornelius + +
+ + <![CDATA[Opinion: Pseudo-ethics in the Surveillance Tech Industry]]> + https://vac.dev/rlog/ethics-surveillance-tech + + 2021-12-03T10:00:00.000Z + + A look at typical ethical shortfalls in the global surveillance tech industry.

This is an opinion piece by pseudonymous contributor, circe.

Preface

The Vac team aims to provide a public good in the form of freely available, open source tools and protocols for decentralized communication. +As such, we value our independence and the usefulness of our protocols for a wide range of applications. +At the same time, we realize that all technical development, including ours, has a moral component. +As a diverse team we are guided by a shared devotion to the principles of human rights and liberty. +This explains why we place such a high premium on security, censorship-resistance and privacy - +a stance we share with the wider Status Network. +The post below takes a different approach from our usual more technical analyses, +by starting to peel back the curtain on the ethical shortfalls of the global surveillance tech industry.

Spotlight on an industry

Apple's announcement of their lawsuit against Israel's NSO Group +marks the latest in a series of recent setbacks for the surveillance tech company. +In early November, the United States blacklisted the firm, +citing concerns about the use of their spyware by foreign governments targeting civilians such as "journalists, businesspeople, activists" and more. +The company is already embroiled in a lawsuit with Whatsapp +over their exploit of the chat app's video calling service to install malware on target devices. +NSO Group's most infamous product, Pegasus, operates as a hidden exploit installed on victims' mobile phones, +sometimes without even requiring as much as an unguarded click on a malicious link. +It has the potential to lay bare, and report to its owners, everything within the reach of the infected device. +For most people this amounts to a significant portion of their private lives and thoughts. +Pegasus can read your private messages (even encrypted), collect your passwords, record calls, track your location and access your device's microphone and camera. +No activity or application on an infected phone would be hidden.

The latest controversies are perhaps less because of the novelty of the revelations - +the existence of Pegasus has been known to civil activists since at least 2016. +Rather, the public was reminded again of the potential scope of surveillance tech +in the indiscriminate use of Pegasus on private citizens. +This has far-reaching implications for human freedoms worldwide. +Earlier this year, a leaked list of over 50,000 targets, or possible targets, of Pegasus included +the phone numbers of human rights advocates, independent journalists, lawyers and political activists. +This should have come as no surprise. +The type of autocratically inclined agents, and governments, who would venture to buy and use such invasive cyber-arms often target those they find politically inconvenient. +Pegasus, and similar technologies, simply extend the reach and capacity of such individuals and governments - +no border or distance, no political rank or social advantage, no sanctity of profession or regard for dignity, +provide any indemnity from becoming a victim. +Your best hope is to remain uninteresting enough to escape consideration.

The NSO Group has, of course, denied allegations of culpability and questions the authenticity of the list. +At this stage, the latter is almost beside the point: +Amnesty International's cybersecurity team, Security Lab, did find forensic evidence of Pegasus on the phones of several volunteers whose numbers appeared on the original list, +including those of journalists and human rights activists. +(Security Lab has since opened up their infection finding tool to the public.) +French intelligence has similarly inspected and confirmed infection of at least three devices belonging to journalists. +The phones of several people who were close to the Saudi-American journalist, Jamal Khashoggi, were confirmed hacked +both before and after Khashoggi's brutal murder at the Saudi embassy in Istanbul in 2018. +More reports of confirmed Pegasus hacks are still published with some regularity. +It is now an open secret that many authoritarian governments have bought Pegasus. +It's not difficult to extrapolate from existing reports and such clients' track records +what the potential injuries to human freedoms are that they can inflict with access to such a powerful cyberweapon.

A typical response

NSO's response to the allegations follows a textbook approach +of avoiding earnest ethical introspection on the manufacturing, and selling, of cyber-arms. +Firstly, shift ethical responsibility to a predetermined process, a list of checkboxes of your own making. +The Group, for example, claims to sell only to "vetted governments", following a classification process +of which they have now published some procedural details but no tangible criteria. +The next step is to reaffirm continuously, and repetitively, your dedication to the legal combat against crime, +"legitimate law enforcement agencies" (note the almost tautological phrasing), +adherence to international arms trade laws, +compliance clauses in customer contracts, etc. +Thirdly, having been absolved of any moral suspicions that might exist about product and process, +from conception to engineering to trade, +distance yourself from the consequences of its use in the world. +"NSO does not operate its technology, does not collect, nor possesses, nor has any access to any kind of data of its customers." +It is interesting that directly after this statement they claim with contradictory confidence that +their "technology was not associated in any way with the heinous murder of Jamal Khashoggi". +The unapologetic tone seems hardly appropriate when the same document confirms that the Group had to +shut down customers' systems due to "confirmed misuse" and have had to do so "multiple times" in the past. +Given all this, the response manages to evade any serious interrogation of the "vetting" process itself, +which forced the company to reject "approximately 15% of potential new opportunities for Pegasus" in one year. +Courageous.

We have heard this all before. +There exists a multi-billion dollar industry of private companies and engineering firms thriving on proceeds from +selling surveillance tools and cyber-arms to dubious agencies and foreign governments. +In turn, the most power-hungry and oppressive regimes often rely on such technological innovations - +for which they lack the in-country engineering expertise - +to maintain control, suppress uprisings, intimidate opposing journalists, and track their citizens. +It's a lucrative business opportunity, and resourceful companies have sprung up everywhere to supply this demand, +often in countries where citizens, including employees of the company, would be horrified if they were similarly subject to the oppressions of their own products. +When, in 2014, Italy's HackingTeam were pulsed by the United Nations about their (then alleged) selling of spyware to Sudan, +which would have been a contravention of the UN's weapon export ban, +they simply replied that their product was not controlled as a weapon and therefore not subject to such scrutiny. +They remained within their legal bounds, technically. +Furthermore, they similarly shifted ethical responsibility to external standards of legitimacy, +claiming their "software is not sold to governments that are blacklisted by the EU, the US, NATO, and similar international organizations". +When the company themselves were hacked in 2015, +revelations (confirmations, that is) of widespread misuse by repressive governments were damaging enough to force them to disappear and rebrand as Memento Labs. +Their website boasts an impressive list of statutes, regulations, procedures, export controls and legal frameworks, +all of which the rebranded hackers proudly comply with. +Surely no further ethical scrutiny is necessary?

Ethics != the law

The law is trailing behind

Such recourse to the legality of your action as ethical justification is moot for several reasons. +The first is glaringly obvious - +our laws are ill-equipped to address the implications of modern technology. +Legal systems are a cumbersome inheritance built over generations. +This is especially true of the statutes and regulations governing international trade, behind which these companies so often hide. +Our best legal systems are trailing miles behind the technology for which we seek guidelines. +Legislators are still struggling to make sense of technologies like face recognition, +the repercussions of smart devices acting "on their own" and biases in algorithms. +To claim you are performing ethical due diligence by resorting to an outdated and incomplete system of legal codes is disingenuous.

The law depends on ethics

The second reason is more central to my argument, +and an important flaw in these sleight of hand justifications appearing from time to time in the media. +Ethics can in no way be confused as synonymous with legality or legitimacy. +These are incommensurable concepts. +In an ideal world, of course, the law is meant to track the minimum standards of ethical conduct in a society. +Laws are often drafted exactly from some ethical, and practical, impulse to minimize harmful conduct +and provide for corrective and punitive measures where transgressions do occur. +The law, however, has a much narrower scope than ethics. +It can be just or unjust. +In fact, it is in need of ethics to constantly reform. +Ethics and values are born out of collective self-reflection. +It develops in our conversation with ourselves and others about the type of society we strive for. +As such, an ethical worldview summarizes our deepest intuitions about how we should live and measure our impact on the world. +For this reason, ethics is primarily enforced by social and internal pressures, not legal boundaries - +our desire to do what ought to be done, however we define that. +Ethics is therefore a much grander scheme than global legal systems +and the diplomatic frameworks that grants legitimacy to governments. +These are but one limited outflow of the human aspiration to form societies in accordance with our ideologies and ethics.

International law is vague and exploitable

Of course, the cyber-arms trade has a favorite recourse, international law, which is even more limited. +Since such products are seldomly sold to governments and agencies within the country of production, +it enables a further distancing from consequences. +Many private surveillance companies are based in fairly liberal societies with (seemingly) strict emphases on human rights in their domestic laws. +International laws are much more complicated - for opportunists a synonym for "more grey areas in which to hide". +Company conduct can now be governed, and excused, by a system that follows +the whims of autocrats with exploitative intent and vastly different ethical conceptions from the company's purported aims. +International law, and the ways it is most often enforced by way of, say, UN-backed sanctions, +have long been shaped by the compromises of international diplomacy. +To be blunt: these laws are weak and subject to exactly the sort of narrow interests behind which mercenaries have always hidden. +The surveillance tech industry is no exception.

Conclusion

My point is simple: +selling cyber-arms with the potential to become vast tools of oppression to governments and bodies with blatant histories of human rights violations, +and all but the publicly announced intention to continue operating in this way, +is categorically unconscionable. +This seems obvious no matter what ethics system you argue from, +provided it harbors any consideration for human dignity and freedom. +It is a sign of poor moral discourse that such recourses to law and legitimacy are often considered synonymous with ethical justification. +"I have acted within the bounds of law", "We supply only to legitimate law enforcement agencies", etc. are no substitutes. +Ethical conduct requires an honest evaluation of an action against some conception of "the good", +however you define that. +Too often the surveillance tech industry precisely sidesteps this question, +both in internal processes and external rationalisations to a concerned public.

John Locke, he of the life-liberty-and-property, articulated the idea that government exists solely through the consent of the governed. +Towards the end of the 17th century, he wrote in his Second Treatise on Civil Government, +"[w]henever legislators endeavor to take away, +and destroy the property of the people, or to reduce them to slavery under arbitrary power, +they put themselves in a state of war with the people, who are thereupon absolved from any further obedience". +The inference is straightforward and humanist in essence: +legitimacy is not something that is conferred by governments and institutions. +Rather, they derive their legitimacy from us, their citizens, holding them to standards of ethics and societal ideals. +This legitimacy only remains in tact as long as this mandate is honored and continuously extended by a well-informed public. +This is the principle of informed consent on which all reciprocal ethics is based.

The surveillance tech industry may well have nothing more or less noble in mind than profit-making within legal bounds +when developing and selling their products. +However, when such companies are revealed again and again to have supplied tools of gross human rights violations to known human rights violators, +they will do well to remember that ethics always precedes requirements of legality and legitimacy. +It is a fallacy to take normative guidance from the concept of "legitimacy" +if the concept itself depends on such normative guidelines for definition. +Without examining the ethical standards by which institutions, governments, and laws, were created, +no value-judgements about their legitimacy can be made. +Hiding behind legal compliance as substitute for moral justification is not enough. +Targets of increasingly invasive governmental snooping are too often chosen precisely to suppress the mechanisms from which the legitimacy of such governments flow - +the consent of ordinary civilians. +Free and fair elections, free speech, free media, freedom of thought are all at risk.

References

]]>
+ + Circe + +
+ + <![CDATA[Waku v1 vs Waku v2: Bandwidth Comparison]]> + https://vac.dev/rlog/waku-v1-v2-bandwidth-comparison + + 2021-11-03T10:00:00.000Z + + A local comparison of bandwidth profiles showing significantly improved scalability in Waku v2 over Waku v1.

Background

The original plan for Waku v2 suggested theoretical improvements in resource usage over Waku v1, +mainly as a result of the improved amplification factors provided by GossipSub. +In its turn, Waku v1 proposed improvements over its predecessor, Whisper.

Given that Waku v2 is aimed at resource restricted environments, +we are specifically interested in its scalability and resource usage characteristics. +However, the theoretical performance improvements of Waku v2 over Waku v1, +has never been properly benchmarked and tested.

Although we're working towards a full performance evaluation of Waku v2, +this would require significant planning and resources, +if it were to simulate "real world" conditions faithfully and measure bandwidth and resource usage across different network connections, +robustness against attacks/losses, message latencies, etc. +(There already exists a fairly comprehensive evaluation of GossipSub v1.1, +on which 11/WAKU2-RELAY is based.)

As a starting point, +this post contains a limited and local comparison of the bandwidth profile (only) between Waku v1 and Waku v2. +It reuses and adapts existing network simulations for Waku v1 and Waku v2 +and compares bandwidth usage for similar message propagation scenarios.

Theoretical improvements in Waku v2

Messages are propagated in Waku v1 using flood routing. +This means that every peer will forward every new incoming message to all its connected peers (except the one it received the message from). +This necessarily leads to unnecessary duplication (termed amplification factor), +wasting bandwidth and resources. +What's more, we expect this effect to worsen the larger the network becomes, +as each connection will receive a copy of each message, +rather than a single copy per peer.

Message routing in Waku v2 follows the libp2p GossipSub protocol, +which lowers amplification factors by only sending full message contents to a subset of connected peers. +As a Waku v2 network grows, each peer will limit its number of full-message ("mesh") peerings - +libp2p suggests a maximum of 12 such connections per peer. +This allows much better scalability than a flood-routed network. +From time to time, a Waku v2 peer will send metadata about the messages it has seen to other peers ("gossip" peers).

See this explainer for a more detailed discussion.

Methodology

The results below contain only some scenarios that provide an interesting contrast between Waku v1 and Waku v2. +For example, star network topologies do not show a substantial difference between Waku v1 and Waku v2. +This is because each peer relies on a single connection to the central node for every message, +which barely requires any routing: +each connection receives a copy of every message for both Waku v1 and Waku v2. +Hybrid topologies similarly show only a difference between Waku v1 and Waku v2 for network segments with mesh-like connections, +where routing decisions need to be made.

For this reason, the following approach applies to all iterations:

  1. Simulations are run locally. +This limits the size of possible scenarios due to local resource constraints, +but is a way to quickly get an approximate comparison.
  2. Nodes are treated as a blackbox for which we only measure bandwidth, +using an external bandwidth monitoring tool. +In other words, we do not consider differences in the size of the envelope (for v1) or the message (for v2).
  3. Messages are published at a rate of 50 new messages per second to each network, +except where explicitly stated otherwise.
  4. Each message propagated in the network carries 8 bytes of random payload, which is encrypted. +The same symmetric key cryptographic algorithm (with the same keys) are used in both Waku v1 and v2.
  5. Traffic in each network is generated from 10 nodes (randomly-selected) and published in a round-robin fashion to 10 topics (content topics for Waku v2). +In practice, we found no significant difference in average bandwidth usage when tweaking these two parameters (the number of traffic generating nodes and the number of topics).
  6. Peers are connected in a decentralized full mesh topology, +i.e. each peer is connected to every other peer in the network. +Waku v1 is expected to flood all messages across all existing connections. +Waku v2 gossipsub will GRAFT some of these connections for full-message peerings, +with the rest being gossip-only peerings.
  7. After running each iteration, we verify that messages propagated to all peers (comparing the number of published messages to the metrics logged by each peer).

For Waku v1, nodes are configured as "full" nodes (i.e. with full bloom filter), +while Waku v2 nodes are relay nodes, all subscribing and publishing to the same PubSub topic.

Network size comparison

Iteration 1: 10 nodes

Let's start with a small network of 10 nodes only and see how Waku v1 bandwidth usage compares to that of Waku v2. +At this small scale we don't expect to see improved bandwidth usage in Waku v2 over Waku v1, +since all connections, for both Waku v1 and Waku v2, will be full-message connections. +The number of connections is low enough that Waku v2 nodes will likely GRAFT all connections to full-message peerings, +essentially flooding every message on every connection in a similar fashion to Waku v1. +If our expectations are confirmed, it helps validate our methodology, +showing that it gives more or less equivalent results between Waku v1 and Waku v2 networks.

Sure enough, the figure shows that in this small-scale setup, +Waku v1 actually has a lower per-peer bandwidth usage than Waku v2. +One reason for this may be the larger overall proportion of control messages in a gossipsub-routed network such as Waku v2. +These play a larger role when the total network traffic is comparatively low, as in this iteration. +Also note that the average bandwidth remains more or less constant as long as the rate of published messages remains stable.

Iteration 2: 30 nodes

Now, let's run the same scenario for a larger network of highly-connected nodes, this time consisting of 30 nodes. +At this point, the Waku v2 nodes will start pruning some connections to limit the number of full-message peerings (to a maximum of 12), +while the Waku v1 nodes will continue flooding messages to all connected peers. +We therefore expect to see a somewhat improved bandwidth usage in Waku v2 over Waku v1.

Bandwidth usage in Waku v2 has increased only slightly from the smaller network of 10 nodes (hovering between 2000 and 3000 kbps). +This is because there are only a few more full-message peerings than before. +Compare this to the much higher increase in bandwidth usage for Waku v1, which now requires more than 4000 kbps on average.

Iteration 3: 50 nodes

For an even larger network of 50 highly connected nodes, +the divergence between Waku v1 and Waku v2 is even larger. +The following figure shows comparative average bandwidth usage for a throughput of 50 messages per second.

Average bandwidth usage (for the same message rate) has remained roughly the same for Waku v2 as it was for 30 nodes, +indicating that the number of full-message peerings per node has not increased.

Iteration 4: 85 nodes

We already see a clear trend in the bandwidth comparisons above, +so let's confirm by running the test once more for a network of 85 nodes. +Due to local resource constraints, the effective throughput for Waku v1 falls to below 50 messages per second, +so the v1 results below have been normalized and are therefore approximate. +The local Waku v2 simulation maintains the message throughput rate without any problems.

Iteration 5: 150 nodes

Finally, we simulate message propagation in a network of 150 nodes. +Due to local resource constraints, we run this simulation at a lower rate - +35 messages per second - +and for a shorter amount of time.

Notice how the Waku v1 bandwidth usage is now more than 10 times worse than that of Waku v2. +This is to be expected, as each Waku v1 node will try to flood each new message to 149 other peers, +while the Waku v2 nodes limit their full-message peerings to no more than 12.

Discussion

Let's summarize average bandwidth growth against network growth for a constant message propagation rate. +Since we are particularly interested in how Waku v1 compares to Waku v2 in terms of bandwidth usage, +the results are normalised to the Waku v2 average bandwidth usage for each network size.

Extrapolation is a dangerous game, +but it's safe to deduce that the divergence will only grow for even larger network topologies. +Although control signalling contributes more towards overall bandwidth for Waku v2 networks, +this effect becomes less noticeable for larger networks. +For network segments with more than ~18 densely connected nodes, +the advantage of using Waku v2 above Waku v1 becomes clear.

Network traffic comparison

The analysis above controls the average message rate while network size grows. +In reality, however, active users (and therefore message rates) are likely to grow in conjunction with the network. +This will have an effect on bandwidth for both Waku v1 and Waku v2, though not in equal measure. +Consider the impact of an increasing rate of messages in a network of constant size:

The rate of increase in bandwidth for Waku v2 is slower than that for Waku v1 for a corresponding increase in message propagation rate. +In fact, for a network of 30 densely-connected nodes, +if the message propagation rate increases by 1 per second, +Waku v1 requires an increased average bandwidth of almost 70kbps at each node. +A similar traffic increase in Waku v2 requires on average 40kbps more bandwidth per peer, just over half that of Waku v1.

Conclusions

  • Waku v2 scales significantly better than Waku v1 in terms of average bandwidth usage, +especially for densely connected networks.
  • E.g. for a network consisting of 150 or more densely connected nodes, +Waku v2 provides more than 10x better average bandwidth usage rates than Waku v1.
  • As the network continues to scale, both in absolute terms (number of nodes) and in network traffic (message rates) the disparity between Waku v2 and Waku v1 becomes even larger.

Future work

Now that we've confirmed that Waku v2's bandwidth improvements over its predecessor matches theory, +we can proceed to a more in-depth characterisation of Waku v2's resource usage. +Some questions that we want to answer include:

  • What proportion of Waku v2's bandwidth usage is used to propagate payload versus bandwidth spent on control messaging to maintain the mesh?
  • To what extent is message latency (time until a message is delivered to its destination) affected by network size and message rate?
  • How reliable is message delivery in Waku v2 for different network sizes and message rates?
  • What are the resource usage profiles of other Waku v2 protocols (e.g.12/WAKU2-FILTER and 19/WAKU2-LIGHTPUSH)?

Our aim is to get ever closer to a "real world" understanding of Waku v2's performance characteristics, +identify and fix vulnerabilities +and continually improve the efficiency of our suite of protocols.

References

]]>
+ + Hanno Cornelius + +
+ + <![CDATA[[Talk at COSCUP] Vac, Waku v2 and Ethereum Messaging]]> + https://vac.dev/rlog/waku-v2-ethereum-coscup + + 2021-08-06T12:00:00.000Z + + Learn more about Waku v2, its origins, goals, protocols, implementation and ongoing research. Understand how it is used and how it can be useful for messaging in Ethereum.

This is the English version of a talk originally given in Chinese at COSCUP in Taipei.

video recording with Chinese and English subtitles.


Introduction

Hi everyone!

Today I'll talk to you about Waku v2. What it is, what problems it is solving, +and how it can be useful for things such as messaging in Ethereum. First, let me +start with some brief background.

Brief history and background

Back when Ethereum got started, there used to be this concept of the "holy +trinity". You had Ethereum for compute/consensus, Swarm for storage, and Whisper +for messaging. This is partly where the term Web3 comes from.

Status started out as an app with the goal of being a window onto Ethereum and +a secure messenger. As one of the few, if not the only, apps using Whisper in +production, not to mention on a mobile phone, we quickly realized there were +problems with the underlying protocols and infrastructure. Protocols such as +Whisper weren't quite ready for prime time yet when it came to things such as +scalability and working in the real world.

As we started addressing some of these challenges, and moved from app +developement to focusing on protocols, research and infrastructure, we created +Vac. Vac is an r&d unit doing protocol research focused on creating modular p2p +messaging protocols for private, secure, censorship resistant communication.

I won't go into too much detail on the issues with Whisper, if you are +interested in this check out this talk +here or this +article.

In a nutshell, we forked Whisper to address immediate shortcomings and this +became Waku v1. Waku v2 is complete re-thought implementation from scratch on top +of libp2p. This will be the subject of today's talk.

Waku v2

Overview

Waku v2 is a privacy-preserving peer-to-peer messaging protocol for resource +restricted devices. We can look at Waku v2 as several things:

  • Set of protocols
  • Set of implementations
  • Network of nodes

Let's first look at what the goals are.

Goals

Waku v2 provides a PubSub based messaging protocol with the following +characteristics:

  1. Generalized messaging. Applications that require a messaging protocol to +communicate human to human, machine to machine, or a mix.
  2. Peer-to-peer. For applications that require a p2p solution.
  3. Resource restricted. For example, running with limited bandwidth, being +mostly-offline, or in a browser.
  4. Privacy. Applications that have privacy requirements, such as pseudonymity, +metadata protection, etc.

And to provide these properties in a modular fashion, where applications can +choose their desired trade-offs.

Protocols

Waku v2 consists of several protocols. Here we highlight a few of the most +important ones:

  • 10/WAKU2 - main specification, details how all the pieces fit together
  • 11/RELAY - thin layer on top of GossipSub for message dissemination
  • 13/STORE - fetching of historical messages
  • 14/MESSAGE - message payload

This is the recommended subset for a minimal Waku v2 client.

In addition to this there are many other types of specifications at various +stages of maturity, such as: content based filtering, bridge mode to Waku v1, +JSON RPC API, zkSNARKS based spam protection with RLN, accounting and +settlements with SWAP, fault-tolerant store nodes, recommendations around topic +usage, and more.

See https://rfc.vac.dev/ for a full overview.

Implementations

Waku v2 consists of multiple implementations. This allows for client diversity, +makes it easier to strengthen the protocols, and allow people to use Waku v2 in +different contexts.

  • nim-waku - the reference client written in Nim, most full-featured.
  • js-waku - allow usage of Waku v2 from browsers, focus on interacting with dapps.
  • go-waku - subset of Waku v2 to ease integration into the Status app.

Testnet Huilong and dogfooding

In order to test the protocol we have setup a testnet across all implementations +called Huilong. Yes, that's the Taipei subway station!

Among us core devs we have disabled the main #waku Discord channel used for +development, and people run their own node connected to this toy chat application.

Feel free to join and say hi! Instructions can be found here:

Research

While Waku v2 is being used today, we are actively researching improvements. +Since the design is modular, we can gracefully introduce new capabilities. Some +of these research areas are:

  • Privacy-preserving spam protection using zkSNARKs and RLN
  • Accounting and settlement of resource usage to incentivize nodes to provide services with SWAP
  • State synchronization for store protocol to make it easier to run a store node without perfect uptime
  • Better node discovery
  • More rigorous privacy analysis
  • Improving interaction with wallets and dapp

Use cases

Let's look at where Waku v2 is and can be used.

Prelude: Topics in Waku v2

To give some context, there are two different types of topics in Waku v2. One is +a PubSub topic, for routing. The other is a content topic, which is used for +content based filtering. Here's an example of the default PubSub topic:

/waku/2/default-waku/proto

This is recommended as it increases privacy for participants and it is stored by +default, however this is up to the application.

The second type of topic is a content topic, which is application specific. For +example, here's the content topic used in our testnet:

/toychat/2/huilong/proto

For more on topics, see https://rfc.vac.dev/spec/23/

Status app

In the Status protocol, content topics - topics in Whisper/Waku v1 - are used for several things:

  • Contact code topic to discover X3DH bundles for perfect forward secrecy
    • Partitioned into N (currently 5000) content topics to balance privacy with efficiency
  • Public chats correspond to hash of the plaintext name
  • Negotiated topic for 1:1 chat with DHKE derived content topic

See more here https://specs.status.im/spec/10

Currently, Status app is in the process of migrating to and testing Waku v2.

DappConnect: Ethereum messaging

It is easy to think of Waku as being for human messaging, since that's how it is +primarily used in the Status app, but the goal is to be useful for generalized +messaging, which includes Machine-To-Machine (M2M) messaging.

Recall the concept of the holy trinity with Ethereum/Swarm/Whisper and Web3 that +we mentioned in the beginning. Messaging can be used as a building block for +dapps, wallets, and users to communicate with each other. It can be used for +things such as:

  • Multisig and DAO vote transactions only needing one on-chain operation
  • Giving dapps ability to send push notifications to users
  • Giving users ability to directly respond to requests from dapps
  • Decentralized WalletConnect
  • Etc

Basically anything that requires communication and doesn't have to be on-chain.

WalletConnect v2

WalletConnect is an open protocol for connecting dapps to wallets with a QR +code. Version 2 is using Waku v2 as a communication channel to do so in a +decentralized and private fashion.

See for more: https://docs.walletconnect.org/v/2.0/tech-spec

WalletConnect v2 is currently in late alpha using Waku v2.

More examples

  • Gasless voting and vote aggregation off-chain
  • Dapp games using Waku as player discovery mechanism
  • Send encrypted message to someone with an Ethereum key
  • <Your dapp here>

These are all things that are in progress / proof of concept stage.

Contribute

We'd love to see contributions of any form!

Conclusion

In this talk we've gone over the original vision for Web3 and how Waku came to +be. We've also looked at what Waku v2 aims to do. We looked at its protocols, +implementations, the current testnet as well as briefly on some ongoing +research for Vac.

We've also looked at some specific use cases for Waku. First we looked at how +Status uses it with different topics. Then we looked at how it can be useful for +messaging in Ethereum, including for things like WalletConnect.

I hope this talk gives you a better idea of what Waku is, why it exists, and +that it inspires you to contribute, either to Waku itself or by using it in your +own project!

]]>
+ + Oskar + +
+ + <![CDATA[Presenting JS-Waku: Waku v2 in the Browser]]> + https://vac.dev/rlog/presenting-js-waku + + 2021-06-04T12:00:00.000Z + + JS-Waku is bringing Waku v2 to the browser. Learn what we achieved so far and what is next in our pipeline!

For the past 3 months, we have been working on bringing Waku v2 to the browser. +Our aim is to empower dApps with Waku v2, and it led to the creation of a new library. +We believe now is good time to introduce it!

Waku v2

First, let's review what Waku v2 is and what problem it is trying to solve.

Waku v2 comes from a need to have a more scalable, better optimised solution for the Status app to achieve decentralised +communications on resource restricted devices (i.e., mobile phones).

The Status chat feature was initially built over Whisper. +However, Whisper has a number of caveats which makes it inefficient for mobile phones. +For example, with Whisper, all devices are receiving all messages which is not ideal for limited data plans.

To remediate this, a Waku mode (then Waku v1), based on devp2p, was introduced. +To further enable web and restricted resource environments, Waku v2 was created based on libp2p. +The migration of the Status chat feature to Waku v2 is currently in progress.

We see the need of such solution in the broader Ethereum ecosystem, beyond Status. +This is why we are building Waku v2 as a decentralised communication platform for all to use and build on. +If you want to read more about Waku v2 and what it aims to achieve, +checkout What's the Plan for Waku v2?.

Since last year, we have been busy defining and implementing Waku v2 protocols in nim-waku, +from which you can build wakunode2. +Wakunode2 is an adaptive and modular Waku v2 node, +it allows users to run their own node and use the Waku v2 protocols they need. +The nim-waku project doubles as a library, that can be used to add Waku v2 support to native applications.

Waku v2 in the browser

We believe that dApps and wallets can benefit from the Waku network in several ways. +For some dApps, it makes sense to enable peer-to-peer communications. +For others, machine-to-machine communications would be a great asset. +For example, in the case of a DAO, +Waku could be used for gas-less voting. +Enabling the DAO to notify their users of a new vote, +and users to vote without interacting with the blockchain and spending gas.

Murmur was the first attempt to bring Whisper to the browser, +acting as a bridge between devp2p and libp2p. +Once Waku v2 was started and there was a native implementation on top of libp2p, +a chat POC was created to demonstrate the potential of Waku v2 +in web environment. +It showed how using js-libp2p with few modifications enabled access to the Waku v2 network. +There was still some unresolved challenges. +For example, nim-waku only support TCP connections which are not supported by browser applications. +Hence, to connect to other node, the POC was connecting to a NodeJS proxy application using websockets, +which in turn could connect to wakunode2 via TCP.

However, to enable dApp and Wallet developers to easily integrate Waku in their product, +we need to give them a library that is easy to use and works out of the box: +introducing JS-Waku.

JS-Waku is a JavaScript library that allows your dApp, wallet or other web app to interact with the Waku v2 network. +It is available right now on npm:

npm install js-waku.

As it is written in TypeScript, types are included in the npm package to allow easy integration with TypeScript, ClojureScript and other typed languages that compile to JavaScript.

Key Waku v2 protocols are already available: +message, store, relay and light push, +enabling your dApp to:

  • Send and receive near-instant messages on the Waku network (relay),
  • Query nodes for messages that may have been missed, e.g. due to poor cellular network (store),
  • Send messages with confirmations (light push).

JS-Waku needs to operate in the same context from which Waku v2 was born: +a restricted environment were connectivity or uptime are not guaranteed; +JS-Waku brings Waku v2 to the browser.

Achievements so far

We focused the past month on developing a ReactJS Chat App. +The aim was to create enough building blocks in JS-Waku to enable this showcase web app that +we now use for dogfooding purposes.

Most of the effort was on getting familiar with the js-libp2p library +that we heavily rely on. +JS-Waku is the second implementation of Waku v2 protocol, +so a lot of effort on interoperability was needed. +For example, to ensure compatibility with the nim-waku reference implementation, +we run our tests against wakunode2 as part of the CI.

This interoperability effort helped solidify the current Waku v2 specifications: +By clarifying the usage of topics +(#327, #383), +fix discrepancies between specs and nim-waku +(#418, #419) +and fix small nim-waku & nim-libp2p bugs +(#411, #439).

To fully access the waku network, JS-Waku needs to enable web apps to connect to nim-waku nodes. +A standard way to do so is using secure websockets as it is not possible to connect directly to a TCP port from the browser. +Unfortunately websocket support is not yet available in nim-libp2p so +we ended up deploying websockify alongside wakunode2 instances.

As we built the web chat app, +we were able to fine tune the API to provide a simple and succinct interface. +You can start a node, connect to other nodes and send a message in less than ten lines of code:

import { Waku } from 'js-waku'

const waku = await Waku.create({})

const nodes = await getStatusFleetNodes()
await Promise.all(nodes.map((addr) => waku.dial(addr)))

const msg = WakuMessage.fromUtf8String(
'Here is a message!',
'/my-cool-app/1/my-use-case/proto',
)
await waku.relay.send(msg)

We have also put a bounty at 0xHack for using JS-Waku +and running a workshop. +We were thrilled to have a couple of hackers create new software using our libraries. +One of the projects aimed to create a decentralised, end-to-end encrypted messenger app, +similar to what the ETH-DM protocol aims to achieve. +Another project was a decentralised Twitter platform. +Such projects allow us to prioritize the work on JS-Waku and understand how DevEx can be improved.

As more developers use JS-Waku, we will evolve the API to allow for more custom and fine-tune usage of the network +while preserving this out of the box experience.

What's next?

Next, we are directing our attention towards Developer Experience. +We already have documentation available but we want to provide more: +Tutorials, various examples +and showing how JS-Waku can be used with Web3.

By prioritizing DevEx we aim to enable JS-Waku integration in dApps and wallets. +We think JS-Waku builds a strong case for machine-to-machine (M2M) communications. +The first use cases we are looking into are dApp notifications: +Enabling dApp to notify their user directly in their wallets! +Leveraging Waku as a decentralised infrastructure and standard so that users do not have to open their dApp to be notified +of events such as DAO voting.

We already have some POC in the pipeline to enable voting and polling on the Waku network, +allowing users to save gas by not broadcasting each individual vote on the blockchain.

To facilitate said applications, we are looking at improving integration with Web3 providers by providing examples +of signing, validating, encrypting and decrypting messages using Web3. +Waku is privacy conscious, so we will also provide signature and encryption examples decoupled from users' Ethereum identity.

As you can read, we have grand plans for JS-Waku and Waku v2. +There is a lot to do, and we would love some help so feel free to +check out the new role in our team: +js-waku: Wallet & Dapp Integration Developer. +We also have a number of positions open to work on Waku protocol and nim-waku.

If you are as excited as us by JS-Waku, why not build a dApp with it? +You can find documentation on the npmjs page.

Whether you are a developer, you can come chat with us using WakuJS Web Chat +or chat2. +You can get support in #dappconnect-support on Vac Discord or Telegram. +If you have any ideas on how Waku could enable a specific dapp or use case, do share, we are always keen to hear it.

]]>
+ + Franck + +
+ + <![CDATA[Privacy-preserving p2p economic spam protection in Waku v2]]> + https://vac.dev/rlog/rln-relay + + 2021-03-05T12:00:00.000Z + + This post is going to give you an overview of how spam protection can be achieved in Waku Relay through rate-limiting nullifiers. We will cover a summary of spam-protection methods in centralized and p2p systems, and the solution overview and details of the economic spam-protection method. The open issues and future steps are discussed in the end.

Introduction

This post is going to give you an overview of how spam protection can be achieved in Waku Relay protocol2 through Rate-Limiting Nullifiers3 4 or RLN for short.

Let me give a little background about Waku(v2)1. Waku is a privacy-preserving peer-to-peer (p2p) messaging protocol for resource-restricted devices. Being p2p means that Waku relies on No central server. Instead, peers collaboratively deliver messages in the network. Waku uses GossipSub16 as the underlying routing protocol (as of the writeup of this post). At a high level, GossipSub is based on publisher-subscriber architecture. That is, peers, congregate around topics they are interested in and can send messages to topics. Each message gets delivered to all peers subscribed to the topic. In GossipSub, a peer has a constant number of direct connections/neighbors. In order to publish a message, the author forwards its message to a subset of neighbors. The neighbors proceed similarly till the message gets propagated in the network of the subscribed peers. The message publishing and routing procedures are part of the Waku Relay17 protocol. +

Figure 1: An overview of privacy-preserving p2p economic spam protection in Waku v2 RLN-Relay protocol.

What do we mean by spamming?

In centralized messaging systems, a spammer usually indicates an entity that uses the messaging system to send an unsolicited message (spam) to large numbers of recipients. However, in Waku with a p2p architecture, spam messages not only affect the recipients but also all the other peers involved in the routing process as they have to spend their computational power/bandwidth/storage capacity on processing spam messages. As such, we define a spammer as an entity that uses the messaging system to publish a large number of messages in a short amount of time. The messages issued in this way are called spam. In this definition, we disregard the intention of the spammer as well as the content of the message and the number of recipients.

Possible Solutions

Has the spamming issue been addressed before? Of course yes! Here is an overview of the spam protection techniques with their trade-offs and use-cases. In this overview, we distinguish between protection techniques that are targeted for centralized messaging systems and those for p2p architectures.

Centralized Messaging Systems

In traditional centralized messaging systems, spam usually signifies unsolicited messages sent in bulk or messages with malicious content like malware. Protection mechanisms include

  • authentication through some piece of personally identifiable information e.g., phone number
  • checksum-based filtering to protect against messages sent in bulk
  • challenge-response systems
  • content filtering on the server or via a proxy application

These methods exploit the fact that the messaging system is centralized and a global view of the users' activities is available based on which spamming patterns can be extracted and defeated accordingly. Moreover, users are associated with an identifier e.g., a username which enables the server to profile each user e.g., to detect suspicious behavior like spamming. Such profiling possibility is against the user's anonymity and privacy.

Among the techniques enumerated above, authentication through phone numbers is a some-what economic-incentive measure as providing multiple valid phone numbers will be expensive for the attacker. Notice that while using an expensive authentication method can reduce the number of accounts owned by a single spammer, cannot address the spam issue entirely. This is because the spammer can still send bulk messages through one single account. For this approach to be effective, a centralized mediator is essential. That is why such a solution would not fit the p2p environments where no centralized control exists.

P2P Systems

What about spam prevention in p2p messaging platforms? There are two techniques, namely Proof of Work8 deployed by Whisper9 and Peer scoring6 method (namely reputation-based approach) adopted by LibP2P. However, each of these solutions has its own shortcomings for real-life use-cases as explained below.

Proof of work

The idea behind the Proof Of Work i.e., POW8 is to make messaging a computationally costly operation hence lowering the messaging rate of all the peers including the spammers. In specific, the message publisher has to solve a puzzle and the puzzle is to find a nonce such that the hash of the message concatenated with the nonce has at least z leading zeros. z is known as the difficulty of the puzzle. Since the hash function is one-way, peers have to brute-force to find a nonce. Hashing is a computationally-heavy operation so is the brute-force. While solving the puzzle is computationally expensive, it is comparatively cheap to verify the solution.

POW is also used as the underlying mining algorithm in Ethereum and Bitcoin blockchain. There, the goal is to contain the mining speed and allow the decentralized network to come to a consensus, or agree on things like account balances and the order of transactions.

While the use of POW makes perfect sense in Ethereum / Bitcoin blockchain, it shows practical issues in heterogeneous p2p messaging systems with resource-restricted peers. Some peers won't be able to carry the designated computation and will be effectively excluded. Such exclusion showed to be practically an issue in applications like Status, which used to rely on POW for spam-protection, to the extent that the difficulty level had to be set close to zero.

Peer Scoring

The peer scoring method6 that is utilized by libp2p is to limit the number of messages issued by a peer in connection to another peer. That is each peer monitors all the peers to which it is directly connected and adjusts their messaging quota i.e., to route or not route their messages depending on their past activities. For example, if a peer detects its neighbor is sending more than x messages per month, can drop its quota to z.x where z is less than one. The shortcoming of this solution is that scoring is based on peers' local observations and the concept of the score is defined in relation to one single peer. This leaves room for an attack where a spammer can make connections to k peers in the system and publishes k.(x-1) messages by exploiting all of its k connections. Another attack scenario is through botnets consisting of a large number of e.g., a million bots. The attacker rents a botnet and inserts each of them as a legitimate peer to the network and each can publish x-1 messages per month7.

Economic-Incentive Spam protection

Is this the end of our spam-protection journey? Shall we simply give up and leave spammers be? Certainly not! +Waku RLN-Relay gives us a p2p spam-protection method which:

  • suits p2p systems and does not rely on any central entity.
  • is efficient i.e., with no unreasonable computational, storage, memory, and bandwidth requirement! as such, it fits the network of heterogeneous peers.
  • respects users privacy unlike reputation-based and centralized methods.
  • deploys economic-incentives to contain spammers' activity. Namely, there is a financial sacrifice for those who want to spam the system. How? follow along ...

We devise a general rule to save everyone's life and that is

No one can publish more than M messages per epoch without being financially charged!

We set M to 1 for now, but this can be any arbitrary value. You may be thinking "This is too restrictive! Only one per epoch?". Don't worry, we set the epoch to a reasonable value so that it does not slow down the communication of innocent users but will make the life of spammers harder! Epoch here can be every second, as defined by UTC date-time +-20s.

The remainder of this post is all about the story of how to enforce this limit on each user's messaging rate as well as how to impose the financial cost when the limit gets violated. This brings us to the Rate Limiting Nullifiers and how we integrate this technique into Waku v2 (in specific the Waku Relay protocol) to protect our valuable users against spammers.

Technical Terms

Zero-knowledge proof: Zero-knowledge proof (ZKP)14 allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do the trust-minimized computation that is also privacy-preserving. As a basic example, instead of showing your ID when going to a bar you simply give them proof that you are over 18, without showing the doorman your id. In this write-up, by ZKP we essentially mean zkSNARK15 which is one of the many types of ZKPs.

Threshold Secret Sharing Scheme: (m,n) Threshold secret-sharing is a method by which you can split a secret value s into n pieces in a way that the secret s can be reconstructed by having m pieces (m <= n). The economic-incentive spam protection utilizes a (2,n) secret sharing realized by Shamir Secret Sharing Scheme13.

Overview: Economic-Incentive Spam protection through Rate Limiting Nullifiers

Context: We started the idea of economic-incentive spam protection more than a year ago and conducted a feasibility study to identify blockers and unknowns. The results are published in our prior post. Since then major progress has been made and the prior identified blockers that are listed below are now addressed. Kudos to Barry WhiteHat, Onur Kilic, Koh Wei Jie for all of their hard work, research, and development which made this progress possible.

  • the proof time22 which was initially in the order of minutes ~10 mins and now is almost 0.5 seconds
  • the prover key size21 which was initially ~110MB and now is ~3.9MB
  • the lack of Shamir logic19 which is now implemented and part of the RLN repository4
  • the concern regarding the potential multi-party computation for the trusted setup of zkSNARKs which got resolved20
  • the lack of end-to-end integration that now we made it possible, have it implemented, and are going to present it in this post. New blockers are also sorted out during the e2e integration which we will discuss in the Feasibility and Open Issues section.

Now that you have more context, let's see how the final solution works. The fundamental point is to make it economically costly to send more than your share of messages and to do so in a privacy-preserving and e2e fashion. To do that we have the following components:

  • 1- Group: We manage all the peers inside a large group (later we can split peers into smaller groups, but for now consider only one). The group management is done via a smart contract which is devised for this purpose and is deployed on the Ethereum blockchain.
  • 2- Membership: To be able to send messages and in specific for the published messages to get routed by all the peers, publishing peers have to register to the group. Membership involves setting up public and private key pairs (think of it as the username and password). The private key remains at the user side but the public key becomes a part of the group information on the contract (publicly available) and everyone has access to it. Public keys are not human-generated (like usernames) and instead they are random numbers, as such, they do not reveal any information about the owner (think of public keys as pseudonyms). Registration is mandatory for the users who want to publish a message, however, users who only want to listen to the messages are more than welcome and do not have to register in the group.
  • Membership fee: Membership is not for free! each peer has to lock a certain amount of funds during the registration (this means peers have to have an Ethereum account with sufficient balance for this sake). This fund is safely stored on the contract and remains intact unless the peer attempts to break the rules and publish more than one message per epoch.
  • Zero-knowledge Proof of membership: Do you want your message to get routed to its destination, fine, but you have to prove that you are a member of the group (sorry, no one can escape the registration phase!). Now, you may be thinking that should I attach my public key to my message to prove my membership? Absolutely Not! we said that our solution respects privacy! membership proofs are done in a zero-knowledge manner that is each message will carry cryptographic proof asserting that "the message is generated by one of the current members of the group", so your identity remains private and your anonymity is preserved!
  • Slashing through secret sharing: Till now it does not seem like we can catch spammers, right? yes, you are right! now comes the exciting part, detecting spammers and slashing them. The core idea behind the slashing is that each publishing peer (not routing peers!) has to integrate a secret share of its private key inside the message. The secret share is deterministically computed over the private key and the current epoch. The content of this share is harmless for the peer's privacy (it looks random) unless the peer attempts to publish more than one message in the same epoch hence disclosing more than one secret share of its private key. Indeed two distinct shares of the private key under the same epoch are enough to reconstruct the entire private key. Then what should you do with the recovered private key? hurry up! go to the contract and withdraw the private key and claim its fund and get rich!! Are you thinking what if spammers attach junk values instead of valid secret shares? Of course, that wouldn't be cool! so, there is a zero-knowledge proof for this sake as well where the publishing peer has to prove that the secret shares are generated correctly.

A high-level overview of the economic spam protection is shown in Figure 1.

Flow

In this section, we describe the flow of the economic-incentive spam detection mechanism from the viewpoint of a single peer. An overview of this flow is provided in Figure 3.

Setup and Registration

A peer willing to publish a message is required to register. Registration is moderated through a smart contract deployed on the Ethereum blockchain. The state of the contract contains the list of registered members' public keys. An overview of registration is illustrated in Figure 2.

For the registration, a peer creates a transaction that sends x amount of Ether to the contract. The peer who has the "private key" sk associated with that deposit would be able to withdraw x Ether by providing valid proof. Note that sk is initially only known by the owning peer however it may get exposed to other peers in case the owner attempts spamming the system i.e., sending more than one message per epoch. +The following relation holds between the sk and pk i.e., pk = H(sk) where H denotes a hash function. +

Figure 2: Registration

Maintaining the membership Merkle Tree

The ZKP of membership that we mentioned before relies on the representation of the entire group as a Merkle Tree. The tree construction and maintenance is delegated to the peers (the initial idea was to keep the tree on the chain as part of the contract, however, the cost associated with member deletion and insertion was high and unreasonable, please see Feasibility and Open Issues for more details). As such, each peer needs to build the tree locally and sync itself with the contract updates (peer insertion and deletion) to mirror them on its tree. +Two pieces of information of the tree are important as they enable peers to generate zero-knowledge proofs. One is the root of the tree and the other is the membership proof (or the authentication path). The tree root is public information whereas the membership proof is private data (or more precisely the index of the peer in the tree).

Publishing

In order to publish at a given epoch, each message must carry a proof i.e., a zero-knowledge proof signifying that the publishing peer is a registered member, and has not exceeded the messaging rate at the given epoch.

Recall that the enforcement of the messaging rate was through associating a secret shared version of the peer's sk into the message together with a ZKP that the secret shares are constructed correctly. As for the secret sharing part, the peer generates the following data:

  1. shareX
  2. shareY
  3. nullifier

The pair (shareX, shareY) is the secret shared version of sk that are generated using Shamir secret sharing scheme. Having two such pairs for an identical nullifier results in full disclosure of peer's sk and hence burning the associated deposit. Note that the nullifier is a deterministic value derived from sk and epoch therefore any two messages issued by the same peer (i.e., using the same sk) for the same epoch are guaranteed to have identical nullifiers.

Finally, the peer generates a zero-knowledge proof zkProof asserting the membership of the peer in the group and the correctness of the attached secret share (shareX, shareY) and the nullifier. In order to generate a valid proof, the peer needs to have two private inputs i.e., its sk and its authentication path. Other inputs are the tree root, epoch, and the content of the message.

Privacy Hint: Note that the authentication path of each peer depends on the recent list of members (hence changes when new peers register or leave). As such, it is recommended (and necessary for privacy/anonymity) that the publisher updates her authentication path based on the latest status of the group and attempts the proof using the updated version.

An overview of the publishing procedure is provided in Figure 3.

Routing

Upon the receipt of a message, the routing peer needs to decide whether to route it or not. This decision relies on the following factors:

  1. If the epoch value attached to the message has a non-reasonable gap with the routing peer's current epoch then the message must be dropped (this is to prevent a newly registered peer spamming the system by messaging for all the past epochs).
  2. The message MUST contain valid proof that gets verified by the routing peer. +If the preceding checks are passed successfully, then the message is relayed. In case of an invalid proof, the message is dropped. If spamming is detected, the publishing peer gets slashed (see Spam Detection and Slashing).

An overview of the routing procedure is provided in Figure 3.

Spam Detection and Slashing

In order to enable local spam detection and slashing, routing peers MUST record the nullifier, shareX, and shareY of any incoming message conditioned that it is not spam and has valid proof. To do so, the peer should follow the following steps.

  1. The routing peer first verifies the zkProof and drops the message if not verified.
  2. Otherwise, it checks whether a message with an identical nullifier has already been relayed.
    • a) If such message exists and its shareX and shareY components are different from the incoming message, then slashing takes place (if the shareX and shareY fields of the previously relayed message is identical to the incoming message, then the message is a duplicate and shall be dropped).
    • b) If none found, then the message gets relayed.

An overview of the slashing procedure is provided in Figure 3. +

Figure 3: Publishing, Routing and Slashing workflow.

Feasibility and Open Issues

We've come a long way since a year ago, blockers resolved, now we have implemented it end-to-end. We learned lot and could identify further issues and unknowns some of which are blocking getting to production. The summary of the identified issues are presented below.

Storage overhead per peer

Currently, peers are supposed to maintain the entire tree locally and it imposes storage overhead which is linear in the size of the group (see this issue11 for more details). One way to cope with this is to use the light-node and full-node paradigm in which only a subset of peers who are more resourceful retain the tree whereas the light nodes obtain the necessary information by interacting with the full nodes. Another way to approach this problem is through a more storage efficient method (as described in this research issue12) where peers store a partial view of the tree instead of the entire tree. Keeping the partial view lowers the storage complexity to O(log(N)) where N is the size of the group. There are still unknown unknowns to this solution, as such, it must be studied further to become fully functional.

Cost-effective way of member insertion and deletion

Currently, the cost associated with RLN-Relay membership is around 30 USD10. We aim at finding a more cost-effective approach. Please feel free to share with us your solution ideas in this regard in this issue.

Exceeding the messaging rate via multiple registrations

While the economic-incentive solution has an economic incentive to discourage spamming, we should note that there is still expensive attack(s)23 that a spammer can launch to break the messaging rate limit. That is, the attacker can pay for multiple legit registrations e.g., k, hence being able to publish k messages per epoch. We believe that the higher the membership fee is, the less probable would be such an attack, hence a stronger level of spam-protection can be achieved. Following this argument, the high fee associated with the membership (which we listed above as an open problem) can indeed be contributing to a better protection level.

Conclusion and Future Steps

As discussed in this post, Waku RLN Relay can achieve a privacy-preserving economic spam protection through rate-limiting nullifiers. The idea is to financially discourage peers from publishing more than one message per epoch. In specific, exceeding the messaging rate results in a financial charge. Those who violate this rule are called spammers and their messages are spam. The identification of spammers does not rely on any central entity. Also, the financial punishment of spammers is cryptographically guaranteed. +In this solution, privacy is guaranteed since: 1) Peers do not have to disclose any piece of personally identifiable information in any phase i.e., neither in the registration nor in the messaging phase 2) Peers can prove that they have not exceeded the messaging rate in a zero-knowledge manner and without leaving any trace to their membership accounts. +Furthermore, all the computations are light hence this solution fits the heterogenous p2p messaging system. Note that the zero-knowledge proof parts are handled through zkSNARKs and the benchmarking result can be found in the RLN benchmark report5.

Future steps:

We are still at the PoC level, and the development is in progress. As our future steps,

  • we would like to evaluate the running time associated with the Merkle tree operations. Indeed, the need to locally store Merkle tree on each peer was one of the unknowns discovered during this PoC and yet the concrete benchmarking result in this regard is not available.
  • We would also like to pursue our storage-efficient Merkle Tree maintenance solution in order to lower the storage overhead of peers.
  • In line with the storage optimization, the full-node light-node structure is another path to follow.
  • Another possible improvement is to replace the membership contract with a distributed group management scheme e.g., through distributed hash tables. This is to address possible performance issues that the interaction with the Ethereum blockchain may cause. For example, the registration transactions are subject to delay as they have to be mined before being visible in the state of the membership contract. This means peers have to wait for some time before being able to publish any message.

Acknowledgement

Thanks to Onur Kılıç for his explanation and pointers and for assisting with development and runtime issues. Also thanks to Barry Whitehat for his time and insightful comments. Special thanks to Oskar Thoren for his constructive comments and his guides during the development of this PoC and the writeup of this post.

References


  1. RLN-Relay specification: https://rfc.vac.dev/spec/17/
  2. RLN documentation: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?both
  3. RLN repositories: https://github.com/kilic/RLN and https://github.com/kilic/rlnapp
  4. Waku v2: https://rfc.vac.dev/spec/10/
  5. GossipSub: https://docs.libp2p.io/concepts/publish-subscribe/
  6. Waku Relay: https://rfc.vac.dev/spec/11/
  7. Proof of work: http://www.infosecon.net/workshop/downloads/2004/pdf/clayton.pdf and https://link.springer.com/content/pdf/10.1007/3-540-48071-4_10.pdf
  8. EIP-627 Whisper: https://eips.ethereum.org/EIPS/eip-627
  9. Peer Scoring: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring
  10. Peer scoring security issues: https://github.com/vacp2p/research/issues/44
  11. Zero Knowledge Proof: https://dl.acm.org/doi/abs/10.1145/3335741.3335750 and https://en.wikipedia.org/wiki/Zero-knowledge_proof
  12. zkSNARKs: https://link.springer.com/chapter/10.1007/978-3-662-49896-5_11 and https://coinpare.io/whitepaper/zcash.pdf
  13. Shamir Secret Sharing Scheme: https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing
  14. zkSNARKs proof time: https://github.com/vacp2p/research/issues/7
  15. Prover key size: https://github.com/vacp2p/research/issues/8
  16. The lack of Shamir secret sharing in zkSNARKs: https://github.com/vacp2p/research/issues/10
  17. The MPC required for zkSNARKs trusted setup: https://github.com/vacp2p/research/issues/9
  18. Storage overhead per peer: https://github.com/vacp2p/research/issues/57
  19. Storage-efficient Merkle Tree maintenance: https://github.com/vacp2p/research/pull/54
  20. Cost-effective way of member insertion and deletion: https://github.com/vacp2p/research/issues/56
  21. Attack on the messaging rate: https://github.com/vacp2p/specs/issues/251
  22. RLN Benchmark: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Benchmarks
]]>
+ + Sanaz + +
+ + <![CDATA[[Talk] Vac, Waku v2 and Ethereum Messaging]]> + https://vac.dev/rlog/waku-v2-ethereum-messaging + + 2020-11-10T12:00:00.000Z + + Talk from Taipei Ethereum Meetup. Read on to find out about our journey from Whisper to Waku v2, as well as how Waku v2 can be useful for Etherum Messaging.

The following post is a transcript of the talk given at the Taipei Ethereum meetup, November 5. There is also a video recording.


0. Introduction

Hi! My name is Oskar and I'm the protocol research lead at Vac. This talk will be divided into two parts. First I'll talk about the journey from Whisper, to Waku v1 and now to Waku v2. Then I'll talk about messaging in Ethereum. After this talk, you should have an idea of what Waku v2 is, the problems it is trying to solve, as well as where it can be useful for messaging in Ethereum.

PART 1 - VAC AND THE JOURNEY FROM WHISPER TO WAKU V1 TO WAKU V2

1. Vac intro

First, what is Vac? Vac grew out of our efforts Status to create a window on to Ethereum and secure messenger. Vac is modular protocol stack for p2p secure messaging, paying special attention to resource restricted devices, privacy and censorship resistance.

Today we are going to talk mainly about Waku v2, which is the transport privacy / routing aspect of the Vac protocol stack. It sits "above" the p2p overlay, such as libp2p dealing with transports etc, and below a conversational security layer dealing with messaging encryption, such as using Double Ratchet etc.

2. Whisper to Waku v1

In the beginning, there was Whisper. Whisper was part of the holy trinity of Ethereum. You had Ethereum for consensus/computation, Whisper for messaging, and Swarm for storage.

However, for various reasons, Whisper didn't get the attention it deserved. Development dwindled, it promised too much and it suffered from many issues, such as being extremely inefficient and not being suitable for running on e.g. mobile phone. Despite this, Status used it in its app from around 2017 to 2019. As far as I know, it was one of very few, if not the only, production uses of Whisper.

In an effort to solve some of its immediate problems, we forked Whisper into Waku and formalized it with a proper specification. This solved immediate bandwidth issues for light nodes, introduced rate limiting for better spam protection, improved historical message support, etc.

If you are interested in this journey, checkout the EthCC talk Dean and I gave in Paris earlier this year.

Status upgraded to Waku v1 early 2020. What next?

3. Waku v1 to v2

We were far from done. The changes we had made were quite incremental and done in order to get tangible improvements as quickly as possible. This meant we couldn't address more fundamental issues related to full node routing scalability, running with libp2p for more transports, better security, better spam protection and incentivization.

This kickstarted Waku v2 efforts, which is what we've been working on since July. This work was and is initally centered around a few pieces:

(a) Moving to libp2p

(b) Better routing

(c) Accounting and user-run nodes

The general theme was: making the Waku network more scalable and robust.

We also did a scalability study to show at what point the network would run into issues, due to the inherent lack of routing that Whisper and Waku v1 provided.

You can read more about this here.

3.5 Waku v2 - Design goals

Taking a step back, what problem does Waku v2 attempt to solve compared to all the other solutions that exists out there? What type of applications should use it and why? We have the following design goals:

  1. Generalized messaging. Many applications requires some form of messaging protocol to communicate between different subsystems or different nodes. This messaging can be human-to-human or machine-to-machine or a mix.

  2. Peer-to-peer. These applications sometimes have requirements that make them suitable for peer-to-peer solutions.

  3. Resource restricted. These applications often run in constrained environments, where resources or the environment is restricted in some fashion. E.g.:

    • limited bandwidth, CPU, memory, disk, battery, etc
    • not being publicly connectable
    • only being intermittently connected; mostly-offline
  4. Privacy. These applications have a desire for some privacy guarantees, such as pseudonymity, metadata protection in transit, etc.

As well as to do so in a modular fashion. Meaning you can find a reasonable trade-off depending on your exact requirements. For example, you usually have to trade off some bandwidth to get metadata protection, and vice versa.

The concept of designing for resource restricted devices also leads to the concept of adaptive nodes, where you have more of a continuum between full nodes and light nodes. For example, if you switch your phone from mobile data to WiFi you might be able to handle more bandwidth, and so on.

4. Waku v2 - Breakdown

Where is Waku v2 at now, and how is it structured?

It is running over libp2p and we had our second internal testnet last week or so. As a side note, we name our testnets after subway stations in Taipei, the first one being Nangang, and the most recent one being Dingpu.

The main implementation is written in Nim using nim-libp2p, which is also powering Nimbus, an Ethereum 2 client. There is also a PoC for running Waku v2 in the browser. On a spec level, we have the following specifications that corresponds to the components that make up Waku v2:

  • Waku v2 - this is the main spec that explains the goals of providing generalized messaging, in a p2p context, with a focus on privacy and running on resources restricted devices.
  • Relay - this is the main PubSub spec that provides better routing. It builds on top of GossipSub, which is what Eth2 heavily relies on as well.
  • Store - this is a 1-1 protocol for light nodes to get historical messages, if they are mostly-offline.
  • Filter - this is a 1-1 protocol for light nodes that are bandwidth restricted to only (or mostly) get messages they care about.
  • Message - this explains the payload, to get some basic encryption and content topics. It corresponds roughly to envelopes in Whisper/Waku v1.
  • Bridge - this explains how to do bridging between Waku v1 and Waku v2 for compatibility.

Right now, all protocols, with the exception of bridge, are in draft mode, meaning they have been implemented but are not yet being relied upon in production.

You can read more about the breakdown in this update though some progress has been made since then, as well was in the main Waku v2 spec.

5. Waku v2 - Upcoming

What's coming up next? There are a few things.

For Status to use it in production, it needs to be integrated into the main app using the Nim Node API. The bridge also needs to be implemented and tested.

For other users, we are currently overhauling the API to allow usage from a browser, e.g. To make this experience great, there are also a few underlying infrastructure things that we need in nim-libp2p, such as a more secure HTTP server in Nim, Websockets and WebRTC support.

There are also some changes we made to at what level content encryption happens, and this needs to be made easier to use in the API. This means you can use a node without giving your keys to it, which is useful in some environments.

More generally, beyond getting to production-ready use, there are a few bigger pieces that we are working on or will work on soon. These are things like:

  • Better scaling, by using topic sharding.
  • Accounting and user-run nodes, to account for and incentives full nodes.
  • Stronger and more rigorous privacy guarantees, e.g. through study of GossipSub, unlinkable packet formats, etc.
  • Rate Limit Nullifier for privacy preserving spam protection, a la what Barry Whitehat has presented before.

As well as better support for Ethereum M2M Messaging. Which is what I'll talk about next.

PART 2 - ETHEREUM MESSAGING

A lot of what follows is inspired by exploratory work that John Lea has done at Status, previously Head of UX Architecture at Ubuntu.

6. Ethereum Messaging - Why?

It is easy to think that Waku v2 is only for human to human messaging, since that's how Waku is currently primarily used in the Status app. However, the goal is to be useful for generalized messaging, which includes other type of information as well as machine to machine messaging.

What is Ethereum M2M messaging? Going back to the Holy Trinity of Ethereum/Whisper/Swarm, the messaging component was seen as something that could facilitate messages between dapps and acts as a building block. This can help with things such as:

  • Reducing on-chain transactions
  • Reduce latency for operations
  • Decentralize centrally coordinated services (like WalletConnect)
  • Improve UX of dapps
  • Broadcast live information
  • A message transport layer for state channels

And so on.

7. Ethereum Messaging - Why? (Cont)

What are some examples of practical things Waku as used for Ethereum Messaging could solve?

  • Multisig transfers only needing one on chain transaction
  • DAO votes only needing one one chain transaction
  • Giving dapps ability to direct push notifications to users
  • Giving users ability to directly respond to requests from daps
  • Decentralized Wallet Connect

Etc.

8. What's needed to deliver this?

We can break it down into our actors:

  • Decentralized M2M messaging system (Waku)
  • Native wallets (Argent, Metamask, Status, etc)
  • Dapps that benefit from M2M messaging
  • Users whose problems are being solved

Each of these has a bunch of requirements in turn. The messaging system needs to be decentralized, scalable, robust, etc. Wallets need support for messaging layer, dapps need to integrate this, etc.

This is a lot! Growing adoption is a challenge. There is a catch 22 in terms of justifying development efforts for wallets, when no dapps need it, and likewise for dapps when no wallets support Waku. In addition to this, there must be proven usage of Waku before it can be relied on, etc. How can we break this up into smaller pieces of work?

9. Breaking up the problem and a high level roadmap

We can start small. It doesn't and need to be used for critical features first. A more hybrid approach can be taken where it acts more as nice-to-haves.

  1. Forking Whisper and solving scalablity, spam etc issues with it. +This is a work in progress. What we talked about in part 1.
  2. Expose messaging API for Dapp developers.
  3. Implement decentralized version of WalletConnect. +Currently wallets connect ot dapps with centralized service. Great UX.
  4. Solve DAO/Multi-Sig coordination problem. +E.g. send message to wallet-derived key when it is time to sign a transaction.
  5. Extend dapp-to-user and user-to-dapp communication to more dapps. +Use lessons learned and examples to drive adoptation for wallets/dapps.

And then build up from there.

10. We are hiring!

A lot of this will happen in Javascript and browsers, since that's the primarily environment for a lot of wallets and dapps. We are currently hiring for a Waku JS Wallet integration lead to help push this effort further.

Come talk to me after or apply here.

That's it! You can find us on Status, Telegram, vac.dev. I'm on twitter here.

Questions?


]]>
+ + Oskar + +
+ + <![CDATA[Waku v2 Update]]> + https://vac.dev/rlog/waku-v2-update + + 2020-09-28T12:00:00.000Z + + A research log. Read on to find out what is going on with Waku v2, a messaging protocol. What has been happening? What is coming up next?

It has been a while since the last post. It is time for an update on Waku v2. Aside from getting more familiar with libp2p (specifically nim-libp2p) and some vacation, what have we been up to? In this post we'll talk about what we've gotten done since last time, and briefly talk about immediate next steps and future. But first, a recap.

Recap

In the last post (Waku v2 plan) we explained the rationale of Waku v2 - the current Waku network is fragile and doesn't scale. To solve this, Waku v2 aims to reduce amplification factors and get more user run nodes. We broke the work down into three separate tracks.

  1. Track 1 - Move to libp2p
  2. Track 2 - Better routing
  3. Track 3 - Accounting and user-run nodes

As well as various rough components for each track. The primary initial focus is track 1. This means things like: moving to FloodSub, simplify the protocol, core integration, topic interest behavior, historical message caching, and Waku v1<>v2 bridge.

Current state

Let's talk about the state of specs and our main implementation nim-waku. Then we'll go over our recent testnet, Nangang, and finish off with a Web PoC.

Specs

After some back and forth on how to best structure things, we ended up breaking down the specs into a few pieces. While Waku v2 is best thought of as a cohesive whole in terms of its capabilities, it is made up of several protocols. Here's a list of the current specs and their status:

Raw means there is not yet an implementation that corresponds fully to the spec, and draft means there is an implementation that corresponds to the spec. In the interest of space, we won't go into too much detail on the specs here except to note a few things:

  • The relay spec is essentially a thin wrapper on top of PubSub/FloodSub/GossipSub
  • The filter protocol corresponds to previous light client mode in Waku v1
  • The store protocol corresponds to the previous mailserver construct in Waku v1

The filter and store protocol allow for adaptive nodes, i.e. nodes that have various capabilities. For example, a node being mostly offline, or having limited bandwidth capacity. The bridge spec outlines how to bridge the Waku v1 and v2 networks.

Implementation

The main implementation we are working on is nim-waku. This builds on top of libraries such as nim-libp2p and others that the Nimbus team have been working on as part of their Ethereum 2.0 client.

Currently nim-waku implements the relay protocol, and is close to implementing filter and store protocol. It also exposes a Nim Node API that allows libraries such as nim-status to use it. Additionally, there is also a rudimentary JSON RPC API for command line scripting.

Nangang testnet

Last week we launched a very rudimentary internal testnet called Nangang. The goal was to test basic connectivity and make sure things work end to end. It didn't have things like: client integration, encryption, bridging, multiple clients, store/filter protocol, or even a real interface. What it did do is allow Waku developers to "chat" via RPC calls and looking in the log output. Doing this meant we exposed and fixed a few blockers, such as connection issues, deployment, topic subscription management, protocol and node integration, and basic scripting/API usage. After this, we felt confident enough to upgrade the main and relay spec to "draft" status.

Waku Web PoC

As a bonus, we wanted to see what it'd take to get Waku running in a browser. This is a very powerful capability that enables a lot of use cases, and something that libp2p enables with its multiple transport support.

Using the current stack, with nim-waku, would require quite a lot of ground work with WASM, WebRTC, Websockets support etc. Instead, we decided to take a shortcut and hack together a JS implementation called Waku Web Chat. This quick hack wouldn't be possible without the people behind js-libp2p-examples and js-libp2p and all its libraries. These are people like Jacob Heun, Vasco Santos, and Cayman Nava. Thanks!

It consists of a brower implementation, a NodeJS implementation and a bootstrap server that acts as a signaling server for WebRTC. It is largely a bastardized version of GossipSub, and while it isn't completely to spec, it does allow messages originating from a browser to eventually end up at a nim-waku node, and vice versa. Which is pretty cool.

Coming up

Now that we know what the current state is, what is still missing? what are the next steps?

Things that are missing

While we are getting closer to closing out work for track 1, there are still a few things missing from the initial scope:

  1. Store and filter protocols need to be finished. This means basic spec, implementation, API integration and proven to work in a testnet. All of these are work in progress and expected to be done very soon. Once the store protocol is done in a basic form, it needs further improvements to make it production ready, at least on a spec/basic implementation level.

  2. Core integration was mentioned in scope for track 1 initially. This work has stalled a bit, largely due to organizational bandwidth and priorities. While there is a Nim Node API that in theory is ready to be used, having it be used in e.g. Status desktop or mobile app is a different matter. The team responsible for this at Status (status-nim has been making progress on getting nim-waku v1 integrated, and is expected to look into nim-waku v2 integration soon. One thing that makes this a especially tricky is the difference in interface between Waku v1 and v2, which brings +us too...

  3. Companion spec for encryption. As part of simplifying the protocol, the routing is decoupled from the encryption in v2 (1, 2). There are multiple layers of encryption at play here, and we need to figure out a design that makes sense for various use cases (dapps using Waku on their own, Status app, etc).

  4. Bridge implementation. The spec is done and we know how it should work, but it needs to be implemented.

  5. General tightening up of specs and implementation.

While this might seem like a lot, a lot has been done already, and the majority of the remaining tasks are more amendable to be pursued in parallel with other efforts. It is also worth mentioning that part of track 2 and 3 have been started, in the form of moving to GossipSub (amplification factors) and basics of adaptive nodes (multiple protocols). This is in addition to things like Waku Web which were not part of the initial scope.

Upcoming

Aside from the things mentioned above, what is coming up next? There are a few areas of interest, mentioned in no particular order. For track 2 and 3, see previous post for more details.

  1. Better routing (track 2). While we are already building on top of GossipSub, we still need to explore things like topic sharding in more detail to further reduce amplification factors.

  2. Accounting and user-run nodes (track 3). With store and filter protocol getting ready, we can start to implement accounting and light connection game for incentivization in a bottom up and iterative manner.

  3. Privacy research. Study better and more rigorous privacy guarantees. E.g. how FloodSub/GossipSub behaves for common threat models, and how custom packet +format can improve things like unlinkability.

  4. zkSnarks RLN for spam protection and incentivization. We studied this last year and recent developments have made this relevant to study again. Create an experimental spec/PoC as an extension to the relay protocol. Kudos to Barry Whitehat and others like Kobi Gurkan and Koh Wei Jie for pushing this!

  5. Ethereum M2M messaging. Being able to run in the browser opens up a lot of doors, and there is an opportunity here to enable things like a decentralized WalletConnect, multi-sig transactions, voting and similar use cases. This was the original goal of Whisper, and we'd like to deliver on that.

As you can tell, quite a lot of thing! Luckily, we have two people joining as protocol engineers soon, which will bring much needed support for the current team of ~2-2.5 people. More details to come in further updates.


If you are feeling adventurous and want to use early stage alpha software, check out the docs. If you want to read the specs, head over to Waku spec. If you want to talk with us, join us on Status or on Telegram (they are bridged).

]]>
+ + Oskar + +
+ + <![CDATA[What's the Plan for Waku v2?]]> + https://vac.dev/rlog/waku-v2-plan + + 2020-07-01T12:00:00.000Z + + Read about our plans for Waku v2, moving to libp2p, better routing, adaptive nodes and accounting!

tldr: The Waku network is fragile and doesn't scale. Here's how to solve it.

NOTE: This post was originally written with Status as a primary use case in mind, which reflects how we talk about some problems here. However, Waku v2 is a general-purpose private p2p messaging protocol, especially for people running in resource restricted environments.

Problem

The Waku network is fragile and doesn't scale.

As Status is moving into a user-acquisition phase and is improving retention rates for users they need the infrastructure to keep up, specifically when it comes to messaging.

Based on user acquisition models, the initial goal is to support 100k DAU in September, with demand growing from there.

With the Status Scaling Model we have studied the current bottlenecks as a function of concurrent users (CCU) and daily active users (DAU). Here are the conclusions.

**1. Connection limits**. With 100 full nodes we reach ~10k CCU based on connection limits. This can primarily be addressed by increasing the number of nodes (cluster or user operated). This assumes node discovery works. It is also worth investigating the limitations of max number of connections, though this is likely to be less relevant for user-operated nodes. For a user-operated network, this means 1% of users have to run a full node. See Fig 1-2.

**2. Bandwidth as a bottleneck**. We notice that memory usage appears to not be +the primary bottleneck for full nodes, and the bottleneck is still bandwidth. To support 10k DAU, and full nodes with an amplification factor of 25 the required Internet speed is ~50 Mbps, which is a fast home Internet connection. For ~100k DAU only cloud-operated nodes can keep up (500 Mbps). See Fig 3-5.

**3. Amplification factors**. Reducing amplification factors with better routing, would have a high impact, but it is likely we'd need additional measures as well, such as topic sharding or similar. See Fig 8-13.

Figure 1-5:

+
+
+
+

See https://colab.research.google.com/drive/1Fz-oxRxxAFPpM1Cowpnb0nT52V1-yeRu#scrollTo=Yc3417FUJJ_0 for the full report.

What we need to do is:

  1. Reduce amplification factors
  2. Get more user-run full nodes

Doing this means the Waku network will be able to scale, and doing so in the right way, in a robust fashion. What would a fragile way of scaling be? Increasing our reliance on a Status Pte Ltd operated cluster which would paint us in a corner where we:

  • keep increasing requirements for Internet speed for full nodes
  • are vulnerable to censorship and attacks
  • have to control the topology in an artifical manner to keep up with load
  • basically re-invent a traditional centralized client-server app with extra steps
  • deliberately ignore most of our principles
  • risk the network being shut down when we run out of cash

Appetite

Our initial risk appetite for this is 6 weeks for a small team.

The idea is that we want to make tangible progress towards the goal in a limited period of time, as opposed to getting bogged down in trying to find a theoretically perfect generalized solution. Fixed time, variable scope.

It is likely some elements of a complete solution will be done separately. See later sections for that.

Solution

There are two main parts of the solution. One is to reduce amplification factors, and the other is incentivization to get more user run full nodes with desktop, etc.

What does a full node provide? It provides connectivity to the network, can act as a bandwidth "barrier" and be high or reasonably high availability. What this means right now is essentially topic interest and storing historical messages.

The goal is here to improve the status quo, not get a perfect solution from the get go. All of this can be iterated on further, for stronger guarantees, as well as replaced by other new modules.

Let's first look at the baseline, and then go into some of the tracks and their phases. Track 1 is best done first, after which track 2 and 3 can be executed in parallel. Track 1 gives us more options for track 2 and 3. The work in track 1 is currently more well-defined, so it is likely the specifics of track 2 and 3 will get refined at a later stage.

Baseline

Here's where we are at now. In reality, the amplification factor are likely even worse than this (15 in the graph below), up to 20-30. Especially with an open network, where we can't easily control connectivity and availability of nodes. Left unchecked, with a full mesh, it could even go as high x100, though this is likely excessive and can be dialed down. See scaling model for more details.

Track 1 - Move to libp2p

Moving to PubSub over libp2p wouldn't improve amplification per se, but it would be stepping stone. Why? It paves the way for GossipSub, and would be a checkpoint on this journey. Additionally, FloodSub and GossipSub are compatible, and very likely other future forms of PubSub such as GossipSub 1.1 (hardened/more secure), EpiSub, forwarding Kademlia / PubSub over Kademlia, etc. Not to mention security This would also give us access to the larger libp2p ecosystem (multiple protocols, better encryption, quic, running in the browser, security audits, etc, etc), as well as be a joint piece of infrastructured used for Eth2 in Nimbus. More wood behind fewer arrows.

See more on libp2p PubSub here: https://docs.libp2p.io/concepts/publish-subscribe/

As part of this move, there are a few individual pieces that are needed.

1. FloodSub

This is essentially what Waku over libp2p would look like in its most basic form.

One difference that is worth noting is that the app topics would not be the same as Waku topics. Why? In Waku we currently don't use topics for routing between full nodes, but only for edge/light nodes in the form of topic interest. In FloodSub, these topics are used for routing.

Why can't we use Waku topics for routing directly? PubSub over libp2p isn't built for rare and ephemeral topics, and nodes have to explicitly subscribe to a topic. See topic sharding section for more on this.

Moving to FloodSub over libp2p would also be an opportunity to clean up and simplify some components that are no longer needed in the Waku v1 protocol, see point below.

Very experimental and incomplete libp2p support can be found in the nim-waku repo under v2: https://github.com/status-im/nim-waku

2. Simplify the protocol

Due to Waku's origins in Whisper, devp2p and as a standalone protocol, there are a lot of stuff that has accumulated (https://rfc.vac.dev/spec/6/). Not all of it serves it purpose anymore. For example, do we still need RLP here when we have Protobuf messages? What about extremely low PoW when we have peer scoring? What about key management / encryption when have encryption at libp2p and Status protocol level?

Not everything has to be done in one go, but being minimalist at this stage will the protocol lean and make us more adaptable.

The essential characteristic that has to be maintained is that we don't need to change the upper layers, i.e. we still deal with (Waku) topics and some envelope like data unit.

3. Core integration

As early as possible we want to integrate with Core via Stimbus in order to mitigate risk and catch integration issues early in the process. What this looks like in practice is some set of APIs, similar to how Whisper and Waku were working in parallel, and experimental feature behind a toggle in core/desktop.

4. Topic interest behavior

While we target full node traffic here, we want to make sure we maintain the existing bandwidth requirements for light nodes that Waku v1 addressed (https://vac.dev/fixing-whisper-with-waku). This means implementing topic-interest in the form of Waku topics. Note that this would be separate from app topics notes above.

5. Historical message caching

Basically what mailservers are currently doing. This likely looks slightly different in a libp2p world. This is another opportunity to simplify things with a basic REQ-RESP architecture, as opposed to the roundabout way things are now. Again, not everything has to be done in one go but there's no reason to reimplement a poor API if we don't have to.

Also see section below on adaptive nodes and capabilities.

6. Waku v1 <> Libp2p bridge

To make the transition complete, there has to a be bridge mode between current Waku and libp2p. This is similar to what was done for Whisper and Waku, and allows any nodes in the network to upgrade to Waku v2 at their leisure. For example, this would likely look different for Core, Desktop, Research and developers.

Track 2 - Better routing

This is where we improve the amplification factors.

1. GossipSub

This is a subprotocol of FloodSub in the libp2p world. Moving to GossipSub would allow traffic between full nodes to go from an amplification factor of ~25 to ~6. This basically creates a mesh of stable bidirectional connections, together with some gossiping capabilities outside of this view.

Explaining how GossipSub works is out of scope of this document. It is implemented in nim-libp2p and used by Nimbus as part of Eth2. You can read the specs here in more detail if you are interested: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md and https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md

+
+
+

While we technically could implement this over existing Waku, we'd have to re-implement it, and we'd lose out on all the other benefits libp2p would provide, as well as the ecosystem of people and projects working on improving the scalability and security of these protocols.

2. Topic sharding

This one is slightly more speculative in terms of its ultimate impact. The basic idea is to split the application topic into N shards, say 10, and then each full node can choose which shards to listen to. This can reduce amplification factors by another factor of 10.

+

Note that this means a light node that listens to several topics would have to be connected to more full nodes to get connectivity. For a more exotic version of this, see https://forum.vac.dev/t/rfc-topic-propagation-extension-to-libp2p-pubsub/47

This is orthogonal from the choice of FloodSub or GossipSub, but due to GossipSub's more dynamic nature it is likely best combined with it.

3. Other factors

Not a primary focus, but worth a look. Looking at the scaling model, there might be other easy wins to improve overall bandwidth consumption between full nodes. For example, can we reduce envelope size by a significant factor?

Track 3 - Accounting and user-run nodes

This is where we make sure the network isn't fragile, become a true p2p app, get our users excited and engaged, and allow us to scale the network without creating an even bigger cluster.

To work in practice, this has a soft dependency on node discovery such as DNS based discovery (https://eips.ethereum.org/EIPS/eip-1459) or Discovery v5 (https://vac.dev/feasibility-discv5).

1. Adaptive nodes and capabilities

We want to make the gradation between light nodes, full nodes, storing (partial set of) historical messages, only acting for a specific shard, etc more flexible and explicit. This is required to identify and discover the nodes you want. See https://github.com/vacp2p/specs/issues/87

Depending on how the other tracks come together, this design should allow for a desktop node to identify as a full relaying node for some some app topic shard, but also express waku topic interest and retrieve historical messages itself.

E.g. Disc v5 can be used to supply node properties through ENR.

2. Accounting

This is based on a few principles:

  1. Some nodes contribute a lot more than other nodes in the network
  2. We can account for the difference in contribution in some fashion
  3. We want to incentivize nodes to tell the true, and be incentivized not to lie

Accounting here is a stepping stone, where accounting is the raw data upon which some settlement later occurs. It can have various forms of granularity. See https://forum.vac.dev/t/accounting-for-resources-in-waku-and-beyond/31 for discussion.

We also note that in GossipSub, the mesh is bidrectional. Additionally, it doesn't appears to be a high priority issue in terms of nodes misreporting. What is an issue is having people run full nodes in the first place. There are a few points to that. It has to be possible in the end-user UX, nodes have to be discovered, and it has to be profitable/visible that you are contributing. UX and discovery are out of scope for this work, whereas visibility/accounting is part of this scope. Settlement is a stretch goal here.

The general shape of the solution is inspired by the Swarm model, where we do accounting separate from settlement. It doesn't require any specific proofs, but nodes are incentivized to tell the truth in the following way:

  1. Both full node and light node do accounting in a pairwise, local fashion
  2. If a light node doesn't ultimately pay or lie about reporting, they get disconnected (e.g.)
  3. If a full node doesn't provide its service the light node may pick another full node (e.g.)

While accounting for individual resource usage is useful, for the ultimate end user experience we can ideally account for other things such as:

  • end to end delivery
  • online time
  • completeness of storage

This can be gradually enhanced and strengthened, for example with proofs, consistency checks, Quality of Service, reputation systems. See https://discuss.status.im/t/network-incentivisation-first-draft/1037 for one attempt to provide stronger guarantees with periodic consistency checks and a shared fund mechanism. And https://forum.vac.dev/t/incentivized-messaging-using-validity-proofs/51 for using validity proofs and removing liveness requirement for settlement.

All of this is optional at this stage, because our goal here is to improve the status quo for user run nodes. Accounting at this stage should be visible and correspond to the net benefit a node provides to another.

As a concrete example: a light node has some topic interest and cares about historical messages on some topic. A full node communicates envelopes as they come in, communicates their high availability (online time) and stores/forward stored messages. Both nodes have this information, and if they agree settlement (initially just a mock message) can be sending a payment to an address at some time interval / over some defined volume. See future sections for how this can be improved upon.

Also see below in section 4, using constructs such as eigentrust as a local reputation mechanism.

3. Relax high availability requirement

If we want desktop nodes to participate in the storing of historical messages, high availability is a problem. It is a problem for any node, especially if they lie about it, but assuming they are honest it is still an issue.

By being connected to multiple nodes, we can get an overlapping online window. Then these can be combined together to get consistency. This is obviously experimental and would need to be tested before being deployed, but if it works it'd be very useful.

Additionally or alternatively, instead of putting a high requirement on message availability, focus on detection of missing information. This likely requires re-thinking how we do data sync / replication.

4. Incentivize light and full nodes to tell the truth (policy, etc)

In accounting phase it is largely assumed nodes are honest. What happens when they lie, and how do we incentivize them to be honest? In the case of Bittorrent this is done with tit-for-tat, however this is a different kind of relationship. What follows are some examples of how this can be done.

For light nodes:

  • if they don't, they get disconnected
  • prepayment (especially to "high value" nodes)

For full nodes:

  • multiple nodes reporting to agree, where truth becomes a shelling point
  • use eigentrust
  • staking for discovery visibility with slashing

5. Settlement PoC

Can be done after phase 2 if so desired. Basically integrate payments based on accounting and policy.

Out of scope

  1. We assume the Status Base model requirements are accurate.
  2. We assume Core will improve retention rates.
  3. We assume the Stimbus production team will enable integration of nim-waku.
  4. We assume Discovery mechanisms such as DNS and Discovery v5 will be worked on separately.
  5. We assume Core will, at some point, provide an UX for integrating payment of services.
  6. We assume the desktop client is sufficiently usable.
  7. We assume Core and Infra will investigate ways of improving MaxPeers.
]]>
+ + Oskar + +
+ + <![CDATA[Feasibility Study: Discv5]]> + https://vac.dev/rlog/feasibility-discv5 + + 2020-04-27T12:00:00.000Z + + Looking at discv5 and the theoretical numbers behind finding peers.

Disclaimer: some of the numbers found in this write-up could be inaccurate. They are based on the current understanding of theoretical parts of the protocol itself by the author and are meant to provide a rough overview rather than bindable numbers.

This post serves as a more authoritative overview of the discv5 study, for a discussionary post providing more context make sure to check out the corresponding discuss post. Additionally, if you are unfamiliar with discv5, check out my previous write-up: "From Kademlia to Discv5".

Motivating Problem

The discovery method currently used by Status, is made up of various components and grew over time to solve a mix of problems. We want to simplify this while maintaining some of the properties we currently have.

Namely, we want to ensure censorship resistance to state-level adversaries. One of the issues Status had which caused us them add to their discovery method was the fact that addresses from providers like AWS and GCP were blocked both in Russia and China. Additionally, one of the main factors required is the ability to function on resource restricted devices.

Considering we are talking about resource restricted devices, let's look at the implications and what we need to consider:

  • Battery consumption - constant connections like websockets consume a lot of battery life.
  • CPU usage - certain discovery methods may be CPU incentive, slowing an app down and making it unusable.
  • Bandwidth consumption - a lot of users will be using data plans, the discovery method needs to be efficient in order to accommodate those users without using up significant portions of their data plans.
  • Short connection windows - the discovery algorithm needs to be low latency, that means it needs to return results fast. This is because many users will only have the app open for a short amount of time.
  • Not publicly connectable - There is a good chance that most resource restricted devices are not publicly connectable.

For a node to be able to participate as both a provider, and a consumer in the discovery method. Meaning a node both reads from other nodes' stored DHTs and hosts the DHT for other nodes to read from, it needs to be publically connectable. This means another node must be able to connect to some public IP of the given node.

With devices that are behind a NAT, this is easier said than done. Especially mobile devices, that when connected to 4G LTE networks are often stuck behind a symmetric NAT, drastically reducing the the succeess rate of NAT traversal. Keeping this in mind, it becomes obvious that most resource restricted devices will be consumers rather than providers due to this technical limitation.

In order to answer our questions, we formulated the problem with a simple method for testing. The "needle in a haystack" problem was formulated to figure out how easily a specific node can be found within a given network. This issue was fully formulated in vacp2p/research#15.

Overview

The main things we wanted to investigate was the overhead on finding a peer. This means we wanted to look at both the bandwidth, latency and effectiveness of this. There are 2 methods which we can use to find a peer:

  • We can find a peer with a specific ID, using normal lookup methods as documented by Kademlia.
  • We can find a peer that advertises a capability, this is possible using either capabilities advertised in the ENR or through topic tables.

Feasbility

To be able to investigate the feasibility of discv5, we used various methods including rough calculations which can be found in the notebook, and a simulation isolated in vacp2p/research#19.

CPU & Memory Usage

The experimental discv5 has already been used within Status, however what was noticed was that the CPU and memory usage was rather high. It therefore should be investiaged if this is still the case, and if it is, it should be isolated where this stems from. Additionally it is worth looking at whether or not this is the case with both the go and nim implementation.

See details: vacp2p/research#31

NAT on Cellular Data

If a peer is not publically connectable it can not participate in the DHT both ways. A lot of mobile phones are behind symmetric NATs which UDP hole-punching close to impossible. It should be investigated whether or not mobile phones will be able to participate both ways and if there are good methods for doing hole-punching.

See details: vacp2p/research#29

Topic Tables

Topic Tables allow us the ability to efficiently find nodes given a specific topic. However, they are not implemented in the status-im/nim-eth implementation nor are they fully finalized in the spec. These are important if the network grows past a size where the concentration of specific nodes is relatively low making them hard to find.

See details: vacp2p/research#26

Finding a node

It is important to note, that given a network is relatively small sized, eg 100-500 nodes, then finding a node given a specific address is relatively managable. Additionally, if the concentration of a specific capability in a network is reasonable, then finding a node advertising its capabilities using an ENR rather than the topic table is also managable. A reasonable concentration for example would be 10%, which would give us an 80% chance of getting a node with that capability in the first lookup request. This can be explored more using our discv5 notebook.

Results

Research has shown that finding a node in the DHT has a relatively low effect on bandwidth, both inbound and outbound. For example when trying to find a node in a network of 100 nodes, it would take roughly 5668 bytes total. Additionally if we assume 100ms latency per request it would range at ≈ 300ms latency, translating to 3 requests to find a specific node.

General Thoughts

One of the main blockers right now is figuring out what the CPU and memory usage of discv5 is on mobile phones, this is a large blocker as it affects one of the core problems for us. We need to consider whether discv5 is an upgrade as it allows us to simplify our current discovery process or if it is too much of an overhead for resource restricted devices. The topic table feature could largely enhance discovery however it is not yet implemented. Given that CPU and memory isn't too high, discv5 could probably be used as the other issues are more "features" than large scale issues. Implementing it would already reduce the ability for state level adversaries to censor our nodes.

Acknowledgements

  • Oskar Thoren
  • Dmitry Shmatko
  • Kim De Mey
  • Corey Petty
]]>
+ + Dean + +
+ + <![CDATA[What Would a WeChat Replacement Need?]]> + https://vac.dev/rlog/wechat-replacement-need + + 2020-04-16T12:00:00.000Z + + What would a self-sovereign, private, censorship-resistant and open alternative to WeChat look like?

What would it take to replace WeChat? More specifically, what would a self-sovereign, private, censorship-resistant and open alternative look like? One that allows people to communicate, coordinate and transact freely.

Background

What WeChat provides to the end-user

Let's first look at some of the things that WeChat providers. It is a lot:

  • Messaging: 1:1 and group chat. Text, as well as voice and video. Post gifs. Share location.
  • Group chat: Limited to 500 people; above 100 people people need to verify with a bank account. Also has group video chat and QR code to join a group.
  • Timeline/Moments: Post comments with attachments and have people like/comment on it.
  • Location Discovery: See WeChat users that are nearby.
  • Profile: Nickname and profile picture; can alias people.
  • "Broadcast" messages: Send one message to many contacts, up to 200 people (spam limited).
  • Contacts: Max 5000 contacts (people get around it with multiple accounts and sim cards).
  • App reach: Many diferent web apps, extensions, native apps, etc. Scan QR code to access web app from phone.
  • Selective posting: Decide who can view your posts and who can view your comments on other people's post.
  • Transact: Send money gifts through red envelopes.
  • Transact: Use WeChat pay to transfer money to friends and businesses; linked account with Alipay that is connected to your bank account.
  • Services: Find taxis and get notifications; book flights, train tickets, hotels etc.
  • Mini apps: API for all kinds of apps that allow you to provide services etc.
  • Picture in picture: allowing you to have a video call while using the app.

And much more. Not going to through it all in detail, and there are probably many things I don't know about WeChat since I'm not a heavy user living in mainland China.

How WeChat works - a toy model

This is an overly simplistic model of how WeChat works, but it is sufficient for our purposes. This general design applies to most traditional client-server apps today.

To sign up for account you need a phone number or equivalent. To get access to some features you need to verify your identity further, for example with official ID and/or bank account.

When you signup this creates an entry in the WeChat server, from now on treated as a black box. You authenticate with that box, and thats where you get your messages from. If you go online the app asks that box for messages you have received while you were offline. If you login from a different app your contacts and conversations are synced from that box.

The box gives you an account, it deals with routing to your contacts, it stores messages and attachments and gives access to mini apps that people have uploaded. For transacting money, there is a partnership with a different company that has a different box which talks to your bank account.

This is done in a such a way that they can support a billion users with the features above, no sweat.

Whoever controls that box can sees who you are talking with and what the content of those messages are. There is no end to end encryption. If WeChat/Tencent disagrees with you for some reason they can ban you. This means you can't interact with the box under that name anymore.

What do we want?

We want something that is self-sovereign, private, censorship-resistant and open that allows individuals and groups of people to communicate and transact freely. To explore what this means in more detail, without getting lost in the weeds, we provide the following list of properties. A lot of these are tied together, and some fall out of the other requirements. Some of them stand in slight opposition to each other.

Self-sovereignity identity. Exercises authority within your own sphere. If you aren't harming anyone, you should be able to have an account and communicate with other people.

Pseudonymity, and ideally total anonymity. Not having your identity tied to your real name (e.g. through phone number, bank account, ID, etc). This allows people to act more freely without being overly worried about censorship and coercion in the real world. While total anonymity is even more desirable - especially to break multiple hops to a true-name action - real-world constraints sometimes makes this more challenging.

Private and secure communication. Your communication and who you transact with should be for your eyes only. This includes transactions (transfer of value) as a form of communication.

Censorship-resistance. Not being able to easily censor individuals on the platform. Both at an individual, group and collective level. Not having single points of failure that allow service to be disrupted.

Decentralization. Partly falls out of censorship-resistance and other properties. If infrastructure isn't decentralized it means there's a single point of failure that can be disrupted. This is more of a tool than a goal on its own, but it is an important tool.

Built for mass adoption. Includes scalabiltiy, UX (latency, reliability, bandwidth consumption, UI etc), and allowing for people to stick around. One way of doing this is to allow users to discover people they want to talk to.

Scalability. Infrastructure needs to support a lot of users to be a viabile alternative. Like, a billion of them (eventually).

Fundamentals in place to support great user experience. To be a viable alternative, aside from good UI and distribution, fundamentals such as latency, bandwidth usage, consistency etc must support great UX to be a viable alternative.

Works for resource restricted devices, including smartphones. Most people will use a smartphone to use this. This means it has to work well on them and similar devices, without becoming a second-class citizen where we ignore properties such as censorship-resistance and privacy. Some concession to reality will be necessary due to additional constraints, which leads us to...

Adaptive nodes. Nodes will have different capabilities, and perhaps at different times. To maintain a lot of the properties described here it is desirable if as many participants as possible are first-class citizens. If a phone is switching from a limited data plan to a WiFi network or from battery to AC power it can do more useful work, and so on. Likewise for a laptop with a lot of free disk space and spare compute power, etc.

Sustainable. If there's no centralized, top down ad-driven model, this means all the infrastructure has to be sustainable somehow. Since these are individual entitites, this means it has to be paid for. While altruistic modes and similar can be used, this likely requires some form of incentivization scheme for useful services provided in the network. Related: free rider problem.

Spam resistant. Relates to sustainability, scalability and built for mass adoption. Made more difficult by pseudonymous identity due to whitewashing attacks.

Trust-minimized. To know that properties are provided for and aren't compromised, various ways of minimizing trust requirements are useful. This also related to mass adoption and social cohesion. Examples include: open and audited protocols, open source, reproducible builds, etc. This also relates to how mini apps are provided for, since we may not know their source but want to be able to use them anyway.

Open source. Related to above, where we must be able to inspect the software to know that it functions as advertised and hasn't been compromised, e.g. by uploading private data to a third party.

Some of these are graded and a bit subtle, i.e.:

  • Censorship resistance would ideally be able to absorb Internet shutdowns. This would require an extensive MANET/meshnet infrastructure, which while desirable, requires a lot of challenges to be overcome to be feasible.
  • Privacy would ideally make all actions (optionally) totally anoymous, though this may incur undue costs on bandwidth and latency, which impacts user experience.
  • Decentralization, certain topologies, such as DHTs, are efficient and quite decentralized but still have some centralized aspects, which makes it attackable in various ways. Ditto for blockchains compared with bearer instruments which requires some coordinating infrastructure, compared with naturally occuring assets such as precious metals.
  • "Discover people" and striving for "total anonymity" might initially seem incompatible. The idea is to provide for sane defaults, and then allow people to decide how much information they want to disclose. This is the essence of privacy.
  • Users often want some form of moderation to get a good user experience, which can be seen as a form of censorship. The idea to raise the bar on the basics, the fundamental infrastructure. If individuals or specific communities want certain moderation mechanisms, that is still a compatible requirement.

Counterpoint 1

We could refute the above by saying that the design goals are undesirable. We want a system where people can censor others, and where everyone is tied to their real identity. Or we could say something like, freedom of speech is a general concept, and it doesn't apply to Internet companies, even if they provide a vital service. You can survive without it and you should've read the terms of service. This roughly charactericizes the mainstream view.

Additional factor here is the idea that a group of people know more about what's good for you then you do, so they are protecting you.

Counterpoint 2

We could agree with all these design goals, but think they are too extreme in terms of their requirements. For example, we could operate as a non profit, take donations and volunteers, and then host the whole infrastructure ourselves. We could say we are in a friendly legislation, so we won't be a single point of failure. Since we are working on this and maybe even our designs are open, you can trust us and we'll provide service and infrastructure that gives you what you want without having to pay for it or solve all these complex decentralized computation and so on problems. If you don't trust us for some reason, you shouldn't use us regardless. Also, this is better than status quo. And we are more likely to survive by doing this, either by taking shortcuts or by being less ambituous in terms of scope.

Principal components

There are many ways to skin a cat, but this is one way of breaking down the problem. We have a general direction with the properties listed above, together with some understanding of how WeChat works for the everday user. Now the question is, what infrastructure do we need to support this? How do we achieve the above properties, or at least get closer to them? We want to figure out the necessary building blocks, and one of doing this is to map out likely necessary components.

Background: Ethereum and Web3 stack

It is worth noting that a lot of the required infrastructure has been developed, at least as concepts, in the original Ethereum / Web3 vision. In it there is Ethereum for consensus/compute/transact, storage through Swarm, and communication through Whisper. That said, the main focus has been on the Ethereum blockchain itself, and a lot of things have happened in the last 5y+ with respect to technology around privacy and scalabilty. It is worth revisiting things from a fresh point of view, with the WeChat alternative in mind as a clear use case.

Account - self-sovereign identity and the perils of phone numbers

Starting from the most basic: what is an account and how do you get one? With most internet services today, WeChat and almost all popular messaging apps included, you need to signup with some centralized authority. Usually you also have to verify this with some data that ties this account to you as an individual. E.g. by requiring a phone number, which in most jurisdictions 1 means giving out your real ID. This also means you can be banned from using the service by a somewhat arbitrary process, with no due process.

Now, we could argue these app providers can do what they want. And they are right, in a very narrow sense. As apps like WeChat (and Google) become general-purpose platforms, they become more and more ingrained in our everyday lives. They start to provide utilities that we absolutely require to work to go about our day, such as paying for food or transportation. This means we need higher standard than this.

Justifications for requiring phone numbers are usually centered around three claims:

  1. Avoiding spam
  2. Tying your account to your real name, for various reasons
  3. Using as a commonly shared identifier as a social network discovery mechanism

Of course, many services require more than phone numbers. E.g. email, other forms of personal data such as voice recording, linking a bank account, and so on.

In contrast, a self-sovereign system would allow you to "create an account" completely on your own. This can easily be done with public key cryptograpy, and it also paves the way for end-to-end encryption to make your messages private.

The main issue with this that you need to get more creative about avoiding spam (e.g. through white washing attacks), and ideally there is some other form of social discovery mechanism.

Just having a public key as an account isn't enough though. If it goes through a central server, then nothing is stopping that server from arbitrarly blocking requests related to that public key. Of course, this also depends on how transparent such requests are. Fundamentally, lest we rely completely on goodwill, there needs to be multiple actors by which you can use the service. This naturally points to decentralization as a requirement. See counterpoint.

Even so, if the system is closed source we don't know what it is doing. Perhaps the app communicating is also uploading data to another place, or somehow making it possible to see who is who and act accordingly.

You might notice that just one simple property, self-sovereign identity, leads to a slew of other requirements and properties. You might also notice that WeChat is far from alone in this, even if their identity requirements might be a bit stringent than, say, Telegram. Their control aspects are also a bit more extreme, at least for someone with western sensibilities 2.

Most user facing applications have similar issues, Google Apps/FB/Twitter etc. For popular tools that have this built in, we can look at git - which is truly decentralized and have keypair at the bottom. It is for a very specific technical domain, and even then people rely on Github. Key management is fairly difficult even for technical people, and for normal people even more so. Banks are generally far behind on this tech, relying on arcane procedures and special purpose hardware for 2FA. That's another big issue.

Let's shift gears a bit and talk about some other functional requirements.

Routing - packets from A to B

In order to get a lot of the features WeChat provides, we need the ability to do three things: communicate, store data, and transact with people. We need a bit more than that, but let's focus on this for now.

To communicate with people, in the base case, we need to go from one phone to another phone that is separated by a large distance. This requires some form of routing. The most natural platform to build this on is the existing Internet, though not the only one. Most phones are resource restricted, and are only "on" for brief periods of time. This is needed to preserve battery and bandwidth. Additionally, Internet uses IPs as endpoints, which change as a phones move through space. NAT punching etc isn't always perfect either. This means we need a way to get a message from one public key to another, and through some intermediate nodes. We can think of these nodes as a form of service network. Similar to how a power grid works, or phone lines, or collection of ISPs.

One important property here is to ensure we don't end up in a situation like the centralized capture scenario above, something we've seen with centralized ISPs 3 4 where they can choose which traffic is good and which is bad. We want to allow the use of different service nodes, just like if a restaurant gives you food poisioning you can go to the one next door and then the first one goes out of business after a while. And the circle of life continues.

We shouldn't be naive though, and think that this is something nodes are likely to do for free. They need to be adequately compensated for their services, in some of incentivization scheme. That can either be monetary, or as in the case of Bittorrent, more of a barter situation where you use game theory to coordinate with strangers 5, and some form of reputation attached to it (for private trackers).

There are many ways of doing routing, and we won't go into too much technical detail here. Suffice to say is that you likely want both a structured and unstructured alternative, and that these comes with several trade-offs when it comes to efficiency, metadata protection, ability to incentivize, compatibility with existing topologies, and suitability for mobilephones (mostly offline, bandwidth restricted, not directly connectable). Expect more on this in a future article.

Some of these considerations naturally leads us into the storage and transaction components.

Storage - available and persistant for later

If mobile phones are mostly offline, we need some way to store these messages so they can be retrieved when online again. The same goes for various kinds attachments as well, and for when people are switching devices. A user might control their timeline, but in the WeChat case that timeline is stored on Tencent's servers, and queried from there as well. This naturally needs to happen by some other service nodes. In the WeChat case, and for most IMs, the way these servers are paid for is through some indirect ad mechanism. The entity controlling these ads and so on is the same one as the one operating the servers for storage. A more direct model with different entities would see these services being compensated for their work.

We also need storage for attachments, mini-apps, as well as a way of understanding the current state of consensus when it comes to the compute/transact module. In the WeChat case, this state is completely handled by the bank institution or one of their partners, such as Alibaba. When it comes to bearer instruments like cash, no state needs to be kept as that's a direct exchange in the physical world. This isn't directly compatible with transfering value over a distance.

All of this state requires availability and persistance. It should be done in a trust minimized fashion and decentralized, which requires some form of incentivization for keeping data around. If it isn't, you are relying on social cohesion which breaks down at very large scales.

Since data will be spread out across multiple nodes, you need a way to sync data and transfer it in the network. As well as being able to add and query data from it. All of this requires a routing component.

To make it more censorship resistant it might be better to keep it as a general-purpose store, i.e. individuals don't need to know what they storing. Otherwise, you naturally end up in a situation where individual nodes can be pressured to not store certain content.

Messaging - from me to you to all of us (not them)

This builds on top of routing, but it has a slightly different focus. The goal is to allow for individuals and groups to communicate in a private, secure and censorship-resistant manner.

It also needs to provide a decent interface to the end user, in terms of dealing seamlessly with offline messages, providing reliable and timely messaging.

In order to get closer to the ideal of total anonymity, it is useful to be able to hide metadata of who is talking to whom. This applies to both normal communication as well as for transactions. Ideally, no one but the parties involved can see who is taking part in a conversation. This can be achieved through various techniques such as mixnets, anonymous credentials, private information retrieval, and so on. Many of these techniques have a fundamental trade-off with latency and bandwidth, something that is a big concern for mobilephones. Being able to do some form of tuning, in an adaptive node manner, depending on your threat model and current capabilities is useful here.

The baseline here is pseudonymity, and having tools to allow individuals to "cut off" ties to their real world identity and transactions. People act different in different circles in the real world, and this should be mimicked online as well. Your company, family or government shouldn't be able to know what exactly you use your paycheck for, and who you are talking to.

Compute - transact, contract and settle

The most immediate need here is transaction from A to B. Direct exchange. There is also a more indirect need for private lawmaking and contracting.

We talked about routing and storage and how they likely need to be incentivized to work properly. How are they going to be compensated? While this could in theory work via existing banking system and so on, this would be rather heavy. It'd also very likely require tying your identifier to your legal name, something that goes against what we want to achieve. What we want is something that acts more as right-to-access, similar to the way cash functions in a society 6. I pay for a fruit with something that is valuable to you and then I'm on my way.

While there might be other candidates, such as pre-paid debit cards and so on, this transaction mode pretty much requires a cryptocurrency component. The alternative is to do it on a reputation basis, which might work for small communities, due to social cohesion, but quickly detoriates for large ones 7. Ad hoc models like private Bittorrent trackers are centralized and easy to censor.

Now, none of the existing cryptocurrency models are ideal. They also all suffer from lack of widespread use, and it is difficult to get onboarded to them in the first place. Transactions in Bitcoin are slow. Ethereum is faster and has more capabilities, but it still suffers from linking payments over time, which makes the privacy part of this more difficult. Zcash, Monero and similar are interesting, but also require more use. For Zcash, shielded transactions appear to only account for less than 2% of all transactions in 2019 8 9.

Another dimension is what sets general purpose cryptocurrencies like Ethereum apart. Aside from just paying from A to B, you can encode rules about when something should be paid out and not. This is very useful for doing a form of private lawmaking, contracting, for setting up service agreements with these nodes. If there's no trivial recourse as in the meatspace world, where you know someone's name and you can sue them, you need a different kind of model.

What makes something like Zcash interesting is that it works more like digital cash. Instead of leaving a public trail for everyone, where someone can see where you got the initial money from and then trace you across various usage, for Zcash every hop is privacy preserving.

To fulfill the general goals of being censorship resistance and secure, it is also vital that the system being used stays online and can't be easily disrupted. That points to disintermediation, as opposed to using gateways and exchanges. This is a case where something like cash, or gold, is more direct, since no one can censor this transaction without being physically present where this direct exchange is taking place. However, like before, this doesn't work over distance.

Secure chat - just our business

Similar to the messaging module above. The distinction here is that we assume the network part has already taken place. Here we are interested in keeping the contents of messages private, so that means confidentiality/end-to-end encryption, integrity, authentication, as well as forward secrecy and plausible deniability. This means that even if there's some actor that gets some private key material, or confiscated your phone, there is some level of...ephemerality to your conversations. Another issue here in terms of scalable private group chat.

Extensible mini apps

This relates to the compute and storage module above. Essentially we want to provide mini apps as in WeChat, but to do so in a way that is compatible with what we want to achieve more generally. This allows individuals and small businesses to create small tools for various purposes, and coordinate with strangers. E.g. booking a cab or getting an insurance, and so on.

This has a higher dependency on the contracting/general computation aspect. I.e. often it isn't only a transaction, but you might want to encode some specific rules here that strangers can abide by without having too high trust requirements. As a simple example: escrows.

This also needs an open API that anyone can use. It should be properly secured, so using one doesn't compromise the rest of the system it is operating in. To be censorship resistant it requires the routing and storage component to work properly.

Where are we now?

Let's look back at some of desirable properties we set out in the beginning and see how close we are to building out the necessary components. Is it realistic at all or just a pipe dream? We'll see that there are many building blocks in place, and there's reason for hope.

Self-sovereignity identity. Public key crypto and web of trust like constructs makes this possible.

Pseudonymity, and ideally total anonymity. Pseudonymity can largely be achieved with public key crypto and open systems that allow for permissionless participation. For transactions, pseudonymity exists in most cryptocurrencies. The challenge is linkage across time, especially when interfacing with other "legacy" system. There are stronger constructs that are actively being worked on and are promising here, such as mixnets (Nym), mixers (Wasabi Wallet, Tornado.Cash) and zero knowledge proofs (Zcash, Ethereum, Starkware). This area of applied research has exploded over the last few years.

Private and secure communication. Signal has pioneered a lot of this, following OTR. Double Ratchet, X3DH. E2EE is minimum these days, and properties like PFS and PD are getting better. For metadata protection, you have Tor, with its faults, and more active research on mixnets and private information retrieval, etc.

Censorship-resistance. This covers a lot of ground across the spectrum. You have technologies like Bittorrent, Bitcoin/Ethereum, Tor obfuscated transports, E2EE by default, partial mesh networks in production, abilit to move/replicate host machines more quickly have all made this more of a reality than it used to be. this easier. Of course, techniques such as deep packet inspection and internet shutdowns have increased.

Decentralization. Cryptocurrencies, projects like libp2p and IPFS. Need to be mindful here of many projects that claim decentralization but are still vulnerable to single points of failures, such as relying on gateways.

Built for mass adoption. This one is more subjective. There's definitely a lot of work to be done here, both when it comes to fundamental performance, key management and things like social discoverability. Directionally these things are improving and becoming easier for the average person but there is a lot ot be done here.

Scalability. With projects like Ethereum 2.0 and IPFS more and more resources are a being put into this, both at the consensus/compute layer as well as networking (gossip, scalable Kademlia) layer. Also various layer 2 solutions for transactions.

Fundamentals in place to support great user experience. Similar to built for mass adoption. As scalability becomes more important, more applied research is being done in the p2p area to improve things like latency, bandwidth.

Works for resource restricted devices, including smartphones. Work in progress and not enough focus here, generally an after thought. Also have stateless clients etc.

Adaptive nodes. See above. With subprotocols and capabilities in Ethereum and libp2p, this is getting easier.

Sustainable. Token economics is a thing. While a lot of it won't stay around, there are many more projects working on making themselves dispensable. Being open source, having an engaged community and enabling users run their own infrastructure. Users as stakeholders.

Spam resistant. Tricky problem if you want to be pseudonymous, but some signs of hope with incentivization mechanisms, zero knowledge based signaling, etc. Together with various forms of rate limiting and better controlling of topology and network amplification. And just generally being battle-tested by real world attacks, such as historical Ethereum DDoS attacks.

Trust minimized. Bitcoin. Zero knowledge provable computation. Open source. Reproducible builds. Signed binaries. Incentive compatible structures. Independent audits. Still a lot of work, but getting better.

Open source. Big and only getting bigger. Including mainstream companies.

What's next?

We've look at what WeChat provides and what we'd like an alternative to look like. We've also seen a few principal modules that are necessary to achieve those goals. To achieve all of this is a daunting task, and one might call it overly ambitiuous. We've also seen how far we've come with some of the goals, and how a lot of the pieces are there, in one form or another. Then it is a question of putting them all together in the right mix.

The good news is that a lot of people are working all these building blocks and thinking about these problems. Compared to a few years ago we've come quite far when it comes to p2p infrastructure, privacy, security, scalability, and general developer mass and mindshare. If you want to join us in building some of these building blocks, and assembling them, check out our forum.

PS. We are hiring protocol engineers. DS

Acknowledgements

Corey, Dean, Jacek.

References

]]>
+ + Oskar + +
+ + <![CDATA[From Kademlia to Discv5]]> + https://vac.dev/rlog/kademlia-to-discv5 + + 2020-04-09T16:00:00.000Z + + A quick history of discovery in peer-to-peer networks, along with a look into discv4 and discv5, detailing what they are, how they work and where they differ.

If you've been working on Ethereum or adjacent technologies you've probably heard of discv4 or discv5. But what are they actually? How do they work and what makes them different? To answer these questions, we need to start at the beginning, so this post will assume that there is little knowledge on the subject so the post should be accessible for anyone.

The Beginning

Let's start right at the beginning: the problem of discovery and organization of nodes in peer-to-peer networks.

Early P2P file sharing technologies, such as Napster, would share information about who holds what file using a single server. A node would connect to the central server and give it a list of the files it owns. Another node would then connect to that central server, find a node that has the file it is looking for and contact that node. This however was a flawed system -- it was vulnerable to attacks and left a single party open to lawsuits.

It became clear that another solution was needed, and after years of research and experimentation, we were given the distributed hash table or DHT.

Distributed Hash Tables

In 2001 4 new protocols for such DHTs were conceived, Tapestry, Chord, CAN and Pastry, all of which made various trade-offs and changes in their core functionality, giving them unique characteristics.

But as said, they're all DHTs. So what is a DHT?

A distributed hash table (DHT) is essentially a distributed key-value list. Nodes participating in the DHT can easily retrieve the value for a key.

If we have a network with 9 key-value pairs and 3 nodes, ideally each node would store 3 (optimally 6 for redundancy) of those key-value pairs, meaning that if a key-value pair were to be updated, only part of the network would responsible for ensuring that it is. The idea is that any node in the network would know where to find the specific key-value pair it is looking for based on how things are distributed amongst the nodes.

Kademlia

So now that we know what DHTs are, let's get to Kademlia, the predecessor of discv4. Kademlia was created by Petar Maymounkov and David Mazières in 2002. I will naively say that this is probably one of the most popular and most used DHT protocols. It's quite simple in how it works, so let's look at it.

In Kademlia, nodes and values are arranged by distance (in a very mathematical definition). This distance is not a geographical one, but rather based on identifiers. It is calculated how far 2 identifiers are from eachother using some distance function.

Kademlia uses an XOR as its distance function. An XOR is a function that outputs true only when inputs differ. Here is an example with some binary identifiers:

XOR 10011001
00110010
--------
10101011

The top in decimal numbers means that the distance between 153 and 50 is 171.

There are several reasons why XOR was taken:

  1. The distance from one ID to itself will be 0.
  2. Distance is symmetric, A to B is the same as B to A.
  3. Follows triangle inequality, if A, B and C are points on a triangle then the distance A to B is closer or equal to that of A to C plus the one from B to C.

In summary, this distance function allows a node to decide what is "close" to it and make decisions based on that "closeness".

Kademlia nodes store a routing table. This table contains multiple lists. Each subsequent list contains nodes which are a little further distanced than the ones included in the previous list. Nodes maintain detailed knowledge about nodes closest to them, and the further away a node is, the less knowledge the node maintains about it.

So let's say I want to find a specific node. What I would do is go to any node which I already know and ask them for all their neighbours closest to my target. I repeat this process for the returned neighbours until I find my target.

The same thing happens for values. Values have a certain distance from nodes and their IDs are structured the same way so we can calculate this distance. If I want to find a value, I simply look for the neighbours closest to that value's key until I find the one storing said value.

For Kademlia nodes to support these functions, there are several messages with which the protocol communicates.

  • PING - Used to check whether a node is still running.
  • STORE - Stores a value with a given key on a node.
  • FINDNODE - Returns the closest nodes requested to a given ID.
  • FINDVALUE - The same as FINDNODE, except if a node stores the specific value it will return it directly.

This is a very simplified explanation of Kademlia and skips various important details. For the full description, make sure to check out the paper or a more in-depth design specification

Discv4

Now after that history lesson, we finally get to discv4 (which stands for discovery v4), Ethereum's current node discovery protocol. The protocol itself is essentially based off of Kademlia, however it does away with certain aspects of it. For example, it does away with any usage of the value part of the DHT.

Kademlia is mainly used for the organisation of the network, so we only use the routing table to locate other nodes. Due to the fact that discv4 doesn't use the value portion of the DHT at all, we can throw away the FINDVALUE and STORE commands described by Kademlia.

The lookup method previously described by Kademlia describes how a node gets its peers. A node contacts some node and asks it for the nodes closest to itself. It does so until it can no longer find any new nodes.

Additionally, discv4 adds mutual endpoint verification. This is meant to ensure that a peer calling FINDNODE also participates in the discovery protocol.

Finally, all discv4 nodes are expected to maintain up-to-date ENR records. These contain information about a node. They can be requested from any node using a discv4-specific packet called ENRRequest.

If you want some more details on ENRs, check out one of my posts "Network Addresses in Ethereum"

Discv4 comes with its own range of problems however. Let's look at a few of them.

Firstly, the way discv4 works right now, there is no way to differentiate between node sub-protocols. This means for example that an Ethereum node could add an Ethereum Classic Node, Swarm or Whisper node to its DHT without realizing that it is invalid until more communication has happened. This inability to differentiate sub-protocols makes it harder to find specific nodes, such as Ethereum nodes with light-client support.

Next, in order to prevent replay attacks, discv4 uses timestamps. This however can lead to various issues when a host's clock is wrong. For more details, see the "Known Issues" section of the discv4 specification.

Finally, we have an issue with the way mutual endpoint verification works. Messages can get dropped and there is no way to tell if both peers have verified eachother. This means that we could consider our peer verified while it does not consider us so making them drop the FINDNODE packet.

Discv5

Finally, let's look at discv5. The next iteration of discv4 and the discovery protocol which will be used by Eth 2.0. It aims at fixing various issues present in discv4.

The first change is the way FINDNODE works. In traditional Kademlia as well as in discv5, we pass an identifier. However, in discv5 we instead pass the logarithmic distance, meaning that a FINDNODE request gets a response containing all nodes at the specified logarithmic distance from the called node.

Logarithmic distance means we first calculate the distance and then run it through our log base 2 function. See:

log2(A xor B)

And the second, more important change, is that discv5 aims at solving one of the biggest issues of discv4: the differentiation of sub-protocols. It does this by adding topic tables. Topic tables are first in first out lists that contain nodes which have advertised that they provide a specific service. Nodes get themselves added to this list by registering ads on their peers.

As of writing, there is still an issue with this proposal. There is currently no efficient way for a node to place ads on multiple peers, since it would require separate requests for every peer which is inefficient in a large-scale network.

Additionally, it is unclear how many peers a node should place these ads on and exactly which peers to place them on. For more details, check out the issue devp2p#136.

There are a bunch more smaller changes to the protocol, but they are less important hence they were ommitted from this summary.

Nevertheless, discv5 still does not resolve a couple issues present in discv4, such as unreliable endpoint verification. As of writing this post, there is currently no new method in discv5 to improve the endpoint verification process.

As you can see discv5 is still a work in progress and has a few large challenges to overcome. However if it does, it will most likely be a large improvement to a more naive Kademlia implementations.


Hopefully this article helped explain what these discovery protocols are and how they work. If you're interested in their full specifications you can find them on github.

]]>
+ + Dean + +
+ + <![CDATA[Waku Update]]> + https://vac.dev/rlog/waku-update + + 2020-02-14T12:00:00.000Z + + A research log. What's the current state of Waku? How many users does it support? What are the bottlenecks? What's next?

Waku is our fork of Whisper where we address the shortcomings of Whisper in an iterative manner. We've seen a in previous post that Whisper doesn't scale, and why. In this post we'll talk about what the current state of Waku is, how many users it can support, and future plans.

Current state

Specs:

We released Waku spec v0.3 this week! You can see the full changelog here.

The main change from 0.2 is making the handshake more flexible. This enables us to communicate topic interest immediately without ambiguity. We also did the following:

  • added recommendation for DNS based discovery
  • added an upgradability and compatibility policy
  • cut the spec up into several components

We cut the spec up in several components to make Vac as modular as possible. The components right now are:

We can probably factor these out further as the main spec is getting quite big, but this is good enough for now.

Clients:

There are currently two clients that implement Waku v0.3, these are Nimbus (Update: now nim-waku) in Nim and status-go in Go.

For more details on what each client support and don't, you can follow the work in progress checklist.

Work is currently in progress to integrate it into the Status core app. Waku is expected to be part of their upcoming 1.1 release (see Status app roadmap (link deprecated)).

Simulation:

We have a simulation that verifies - or rather, fails to falsify - our scalability model. More on the simulation and what it shows below.

How many users does Waku support?

This is our current understanding of how many users a network running Waku can support. Specifically in the context of the Status chat app, since that's the most immediate consumer of Waku. It should generalize fairly well to most deployments.

tl;dr (for Status app):

  • beta: 100 DAU
  • v1: 1k DAU
  • v1.1 (waku only): 10k DAU (up to x10 with deployment hotfixes)
  • v1.2 (waku+dns): 100k DAU (can optionally be folded into v1.1)

Assuming 10 concurrent users = 100 DAU. Estimate uncertainty increases for each order of magnitude until real-world data is observed.

As far as we know right now, these are the bottlenecks we have:

  • Immediate bottleneck - Receive bandwidth for end user clients (aka ‘Fixing Whisper with Waku’)
  • Very likely bottleneck - Nodes and cluster capacity (aka ‘DNS based node discovery’)
  • Conjecture but not unlikely to appear- Full node traffic (aka ‘the routing / partition problem’)

We've already seen the first bottleneck being discussed in the initial post. Dean wrote a post on DNS based discovery which explains how we will address the likely second bottleneck. More on the third one in future posts.

For more details on these bottlenecks, see Scalability estimate: How many users can Waku and the Status app support?.

Simulation

The ultimate test is real-world usage. Until then, we have a simulation thanks to Kim De Mey from the Nimbus team!

We have two network topologies, Star and full mesh. Both networks have 6 full nodes, one traditional light node with bloom filter, and one Waku light node.

One of the full nodes sends 1 envelope over 1 of the 100 topics that the two light nodes subscribe to. After that, it sends 10000 envelopes over random topics.

For light node, bloom filter is set to almost 10% false positive (bloom filter: n=100, k=3, m=512). It shows the number of valid and invalid envelopes received for the different nodes.

Star network:

DescriptionPeersValidInvalid
Master node7100010
Full node 13100010
Full node 21100010
Full node 31100010
Full node 41100010
Full node 51100010
Light node28150
Waku light node210

Full mesh:

DescriptionPeersValidInvalid
Full node 071000120676
Full node 17100019554
Full node 251000123304
Full node 351000111983
Full node 451000124425
Full node 551000123472
Light node2803803
Waku light node211

Things to note:

  • Whisper light node with ~10% false positive gets ~10% of total traffic
  • Waku light node gets ~1000x less envelopes than Whisper light node
  • Full mesh results in a lot more duplicate messages, expect for Waku light node

Run the simulation yourself here. The parameters are configurable, and it is integrated with Prometheus and Grafana.

Difference between Waku and Whisper

Summary of main differences between Waku v0 spec and Whisper v6, as described in EIP-627:

  • Handshake/Status message not compatible with shh/6 nodes; specifying options as association list
  • Include topic-interest in Status handshake
  • Upgradability policy
  • topic-interest packet code
  • RLPx subprotocol is changed from shh/6 to waku/0.
  • Light node capability is added.
  • Optional rate limiting is added.
  • Status packet has following additional parameters: light-node, confirmations-enabled and rate-limits
  • Mail Server and Mail Client functionality is now part of the specification.
  • P2P Message packet contains a list of envelopes instead of a single envelope.

Next steps and future plans

Several challenges remain to make Waku a robust and suitable base +communication protocol. Here we outline a few challenges that we are addressing and will continue to work on:

  • scalability of the network
  • incentived infrastructure and spam-resistance
  • build with resource restricted devices in mind, including nodes being mostly offline

For the third bottleneck, a likely candidate for fixing this is Kademlia routing. This is similar to what is done in Swarm's PSS. We are in the early stages of experimenting with this over libp2p in nim-libp2p. More on this in a future post!

Acknowledgements

Image from "caged sky" by mh.xbhd.org is licensed under CC BY 2.0 (https://ccsearch.creativecommons.org/photos/a9168311-78de-4cb7-a6ad-f92be8361d0e)

]]>
+ + Oskar + +
+ + <![CDATA[DNS Based Discovery]]> + https://vac.dev/rlog/dns-based-discovery + + 2020-02-07T12:00:00.000Z + + A look at EIP-1459 and the benefits of DNS based discovery.

Discovery in p2p networks is the process of how nodes find each other and specific resources they are looking for. Popular discovery protocols, such as Kademlia which utilizes a distributed hash table or DHT, are highly inefficient for resource restricted devices. These methods use short connection windows, and it is quite battery intensive to keep establishing connections. Additionally, we cannot expect a mobile phone for example to synchronize an entire DHT using cellular data.

Another issue is how we do the initial bootstrapping. In other words, how does a client find its first node to then discover the rest of the network? In most applications, including Status right now, this is done with a static list of nodes that a client can connect to.

In summary, we have a static list that provides us with nodes we can connect to which then allows us to discover the rest of the network using something like Kademlia. But what we need is something that can easily be mutated, guarantees a certain amount of security, and is efficient for resource restricted devices. Ideally our solution would also be robust and scalable.

How do we do this?

EIP 1459: Node Discovery via DNS, which is one of the strategies we are using for discovering waku nodes. EIP-1459 is a DNS-based discovery protocol that stores merkle trees in DNS records which contain connection information for nodes.

Waku is our fork of Whisper. Oskar recently wrote an entire post explaining it. In short, Waku is our method of fixing the shortcomings of Whisper in a more iterative fashion. You can find the specification here

DNS-based methods for bootstrapping p2p networks are quite popular. Even Bitcoin uses it, but it uses a concept called DNS seeds, which are just DNS servers that are configured to return a list of randomly selected nodes from the network upon being queried. This means that although these seeds are hardcoded in the client, the IP addresses of actual nodes do not have to be.

> dig dnsseed.bluematt.me +short
129.226.73.12
107.180.78.111
169.255.56.123
91.216.149.28
85.209.240.91
66.232.124.232
207.55.53.96
86.149.241.168
193.219.38.57
190.198.210.139
74.213.232.234
158.181.226.33
176.99.2.207
202.55.87.45
37.205.10.3
90.133.4.73
176.191.182.3
109.207.166.232
45.5.117.59
178.211.170.2
160.16.0.30

The above displays the result of querying on of these DNS seeds. All the nodes are stored as A records for the given domain name. This is quite a simple solution which Bitcoin almost soley relies on since removing the IRC bootstrapping method in v0.8.2.

What makes this DNS based discovery useful? It allows us to have a mutable list of bootstrap nodes without needing to ship a new version of the client every time a list is mutated. It also allows for a more lightweight method of discovering nodes, something very important for resource restricted devices.

Additionally, DNS provides us with a robust and scalable infrastructure. This is due to its hierarchical architecture. This hierarchical architecture also already makes it distributed such that the failure of one DNS server does not result in us no longer being able to resolve our name.

As with every solution though, there is a trade-off. By storing the list in DNS name an adversary would simply need to censor the DNS records for a specific name. This would prevent any new client trying to join the network from being able to do so.

One thing you notice when looking at EIP-1459 is that it is a lot more technically complex than Bitcoin's way of doing this. So if Bitcoin uses this simple method and has proven that it works, why did we need a new method?

There are multiple reasons, but the main one is security. In the Bitcoin example, an attacker could create a new list and no one querying would be able to tell. This is however mitigated in EIP-1459 where we can verify the integrity of the entire returned list by storing an entire merkle tree in the DNS records.

Let's dive into this. Firstly, a client that is using these DNS records for discovery must know the public key corresponding to the private key controlled by the entity creating the list. This is because the entire list is signed using a secp256k1 private key, giving the client the ability to authenticate the list and know that it has not been tampered with by some external party.

So that already makes this a lot safer than the method Bitcoin uses. But how are these lists even stored? As previously stated they are stored using merkle trees as follows:

  • The root of the tree is stored in a TXT record, this record contains the tree's root hash, a sequence number which is incremented every time the tree is updated and a signature as stated above.

    Additionally, there is also a root hash to a second tree called a link tree, it contains the information to different lists. This link tree allows us to delegate trust and build a graph of multiple merkle trees stored across multiple DNS names.

    The sequence number ensures that an attacker cannot replace a tree with an older version because when a client reads the tree, they should ensure that the sequence number is greater than the last synchronized version.

  • Using the root hash for the tree, we can find the merkle tree's first branch, the branch is also stored in a TXT record. The branch record contains all the hashes of the branch's leafs.

  • Once a client starts reading all the leafs, they can find one of two things: either a new branch record leading them further down the tree or an Ethereum Name Records (ENR) which means they now have the address of a node to connect to! To learn more about ethereum node records you can have a look at EIP-778, or read a short blog post I wrote explaining them here.

Below is the zone file taken from the EIP-1459, displaying how this looks in practice.

; name                        ttl     class type  content
@ 60 IN TXT enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA
C7HRFPF3BLGF3YR4DY5KX3SMBE 86900 IN TXT enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org
JWXYDBPXYWG6FX3GMDIBFA6CJ4 86900 IN TXT enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24
2XS2367YHAXJFGLZHVAWLQD4ZY 86900 IN TXT enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA
H4FHT4B454P6UXFD7JCYQ5PWDY 86900 IN TXT enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI
MHTDO6TMUBRIA2XWG5LUDACK24 86900 IN TXT enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o

All of this has already been introduced into go-ethereum with the pull request #20094, created by Felix Lange. There's a lot of tooling around it that already exists too which is really cool. So if your project is written in Golang and wants to use this, it's relatively simple! Additionally, here's a proof of concept that shows what this might look like with libp2p on github.

I hope this was a helpful explainer into DNS based discovery, and shows EIP-1459's benefits over more traditional DNS-based discovery schemes.

]]>
+ + Dean + +
+ + <![CDATA[Fixing Whisper with Waku]]> + https://vac.dev/rlog/fixing-whisper-with-waku + + 2019-12-03T12:00:00.000Z + + A research log. Why Whisper doesn't scale and how to fix it.

This post will introduce Waku. Waku is a fork of Whisper that attempts to +addresses some of Whisper's shortcomings in an iterative fashion. We will also +introduce a theoretical scaling model for Whisper that shows why it doesn't +scale, and what can be done about it.

Introduction

Whisper is a gossip-based communication protocol or an ephemeral key-value store +depending on which way you look at it. Historically speaking, it is the +messaging pilllar of Web3, together with +Ethereum for consensus and Swarm for storage.

Whisper, being a somewhat esoteric protocol and with some fundamental issues, +hasn't seen a lot of usage. However, applications such as Status are using it, +and have been making minor ad hoc modifications to it to make it run on mobile +devices.

What are these fundamental issues? In short:

  1. scalability, most immediately when it comes to bandwidth usage
  2. spam-resistance, proof of work is a poor mechanism for heterogeneous nodes
  3. no incentivized infrastructure, leading to centralized choke points
  4. lack of formal and unambiguous specification makes it hard to analyze and implement
  5. running over devp2p, which limits where it can run and how

In this post, we'll focus on the first problem, which is scalability through bandwidth usage.

Whisper theoretical scalability model

(Feel free to skip this section if you want to get right to the results).

There's widespread implicit knowledge that Whisper "doesn't scale", but it is less understood exactly why. This theoretical model attempts to encode some characteristics of it. Specifically for use case such as one by Status (see Status Whisper usage +spec).

Caveats

First, some caveats: this model likely contains bugs, has wrong assumptions, or completely misses certain dimensions. However, it acts as a form of existence proof for unscalability, with clear reasons.

If certain assumptions are wrong, then we can challenge them and reason about them in isolation. It doesn’t mean things will definitely work as the model predicts, and that there aren’t unknown unknowns.

The model also only deals with receiving bandwidth for end nodes, uses mostly static assumptions of averages, and doesn’t deal with spam resistance, privacy guarantees, accounting, intermediate node or network wide failures.

Goals

  1. Ensure network scales by being user or usage bound, as opposed to bandwidth growing in proportion to network size.
  2. Staying with in a reasonable bandwidth limit for limited data plans.
  3. Do the above without materially impacting existing nodes.

It proceeds through various case with clear assumptions behind them, starting from the most naive assumptions. It shows results for 100 users, 10k users and 1m users.

Model

Case 1. Only receiving messages meant for you [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A4. Only receiving messages meant for you.

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1000.0KB/day
For 1m users, receiving bandwidth is 1000.0KB/day

------------------------------------------------------------

Case 2. Receiving messages for everyone [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A5. Received messages for everyone.

For 100 users, receiving bandwidth is 97.7MB/day
For 10k users, receiving bandwidth is 9.5GB/day
For 1m users, receiving bandwidth is 953.7GB/day

------------------------------------------------------------

Case 3. All private messages go over one discovery topic [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A8. All private messages are received by everyone (same topic) (static).

For 100 users, receiving bandwidth is 49.3MB/day
For 10k users, receiving bandwidth is 4.8GB/day
For 1m users, receiving bandwidth is 476.8GB/day

------------------------------------------------------------

Case 4. All private messages are partitioned into shards [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

Case 5. 4 + Bloom filter with false positive rate

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1

For 100 users, receiving bandwidth is 10.7MB/day
For 10k users, receiving bandwidth is 978.0MB/day
For 1m users, receiving bandwidth is 95.5GB/day

NOTE: Traffic extremely sensitive to bloom false positives
This completely dominates network traffic at scale.
With p=1% we get 10k users ~100MB/day and 1m users ~10gb/day)

------------------------------------------------------------

Case 6. Case 5 + Benign duplicate receives

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).

For 100 users, receiving bandwidth is 21.5MB/day
For 10k users, receiving bandwidth is 1.9GB/day
For 1m users, receiving bandwidth is 190.9GB/day

------------------------------------------------------------

Case 7. 6 + Mailserver under good conditions; small bloom fp; mostly offline

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).
- A17. User is offline p% of the time (static) p=0.9
- A18. No bad request, dup messages for mailservers; overlap perfect (static).
- A19. Mailserver requests can change false positive rate to be p=0.01

For 100 users, receiving bandwidth is 3.9MB/day
For 10k users, receiving bandwidth is 284.8MB/day
For 1m users, receiving bandwidth is 27.8GB/day

------------------------------------------------------------

Case 8. No metadata protection w bloom filter; 1 node connected; static shard

Aka waku mode.

Next step up is to either only use contact code, or shard more aggressively.
Note that this requires change of other nodes behavior, not just local node.

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

See source +for more detail on the model and its assumptions.

Takeaways

  1. Whisper as it currently works doesn’t scale, and we quickly run into unacceptable bandwidth usage.
  2. There are a few factors of this, but largely it boils down to noisy topics usage and use of bloom filters. Duplicate (e.g. see Whisper vs PSS) and bad envelopes are also factors, but this depends a bit more on specific deployment configurations.
  3. Waku mode (case 8) is an additional capability that doesn’t require other nodes to change, for nodes that put a premium on performance.
  4. The next bottleneck after this is the partitioned topics (app/network specific), which either needs to gracefully (and potentially quickly) grow, or an alternative way of consuming those messages needs to be deviced.

The results are summarized in the graph above. Notice the log-log scale. The +colored backgrounds correspond to the following bandwidth usage:

  • Blue: <10mb/d (<~300mb/month)
  • Green: <30mb/d (<~1gb/month)
  • Yellow: <100mb/d (<~3gb/month)
  • Red: >100mb/d (>3gb/month)

These ranges are somewhat arbitrary, but are based on user +requirements for users +on a limited data plan, with comparable usage for other messaging apps.

Introducing Waku

Motivation for a new protocol

Apps such as Status will likely use something like Whisper for the forseeable +future, and we want to enable them to use it with more users on mobile devices +without bandwidth exploding with minimal changes.

Additionally, there's not a clear cut alternative that maps cleanly to the +desired use cases (p2p, multicast, privacy-preserving, open, etc).

We are actively researching, developing and collaborating with more greenfield +approaches. It is likely that Waku will either converge to those, or Waku will +lay the groundwork (clear specs, common issues/components) necessary to make +switching to another protocol easier. In this project we want to emphasize +iterative work with results on the order of weeks.

Briefly on Waku mode

  • Doesn’t impact existing clients, it’s just a separate node and capability.
  • Other nodes can still use Whisper as is, like a full node.
  • Sacrifices metadata protection and incurs higher connectivity/availability requirements for scalbility

Requirements:

  • Exposes API to get messages from a set of list of topics (no bloom filter)
  • Way of being identified as a Waku node (e.g. through version string)
  • Option to statically encode this node in app, e.g. similar to custom bootnodes/mailserver
  • Only node that needs to be connected to, possibly as Whisper relay / mailserver hybrid

Provides:

  • likely provides scalability of up to 10k users and beyond
  • with some enhancements to partition topic logic, can possibly scale up to 1m users (app/network specific)

Caveats:

  • hasn’t been tested in a large-scale simulation
  • other network and intermediate node bottlenecks might become apparent (e.g. full bloom filter and private cluster capacity; can likely be dealt with in isolation using known techniques, e.g. load balancing) (deployment specific)

Progress so far

In short, we have a Waku version 0 spec up as well as a PoC for backwards compatibility. In the coming weeks, we are going to solidify the specs, get a more fully featured PoC for Waku mode. See rough roadmap, project board [link deprecated] and progress thread on the Vac forum.

The spec has been rewrittten for clarity, with ABNF grammar and less ambiguous language. The spec also incorporates several previously ad hoc implemented features, such as light nodes and mailserver/client support. This has already caught a few incompatibilities between the geth (Go), status/whisper (Go) and nim-eth (Nim) versions, specifically around light node usage and the handshake.

If you are interested in this effort, please check out our forum for questions, comments and proposals. We already have some discussion for better spam protection (see previous post for a more complex but privacy-preserving proposal), something that is likely going to be addressed in future versions of Waku, along with many other fixes and enhancement.

]]>
+ + Oskar + +
+ + <![CDATA[Feasibility Study: Semaphore rate limiting through zkSNARKs]]> + https://vac.dev/rlog/feasibility-semaphore-rate-limiting-zksnarks + + 2019-11-08T12:00:00.000Z + + A research log. Zero knowledge signaling as a rate limiting mechanism to prevent spam in p2p networks.

tldr: Moon math promising for solving spam in Whisper, but to get there we need to invest more in performance work and technical upskilling.

Motivating problem

In open p2p networks for messaging, one big problem is spam-resistance. Existing solutions, such as Whisper's proof of work, are insufficient, especially for heterogeneous nodes. Other reputation-based approaches might not be desirable, due to issues around arbitrary exclusion and privacy.

One possible solution is to use a right-to-access staking-based method, where a node is only able to send a message, signal, at a certain rate, and otherwise they can be slashed. One problem with this is in terms of privacy-preservation, where we specifically don't want a user to be tied to a specific payment or unique fingerprint.

In addition to above, there are a lot of related problems that share similarities in terms of their structure and proposed solution.

  • Private transactions (Zcash, AZTEC)
  • Private voting (Semaphore)
  • Private group membership (Semaphore)
  • Layer 2 scaling, poss layer 1 (ZK Rollup; StarkWare/Eth2-3)

Overview

Basic terminology

A zero-knowledge proof allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do trust-minimized computation that is also privacy preserving. As a basic example, instead of showing your ID when going to a bar you simply give them a proof that you are over 18, without showing the doorman your id.

zkSNARKs is a form of zero-knowledge proofs. There are many types of zero-knowledge proofs, and the field is evolving rapidly. They come with various trade-offs in terms of things such as: trusted setup, cryptographic assumptions, proof/verification key size, proof/verification time, proof size, etc. See section below for more.

Semaphore is a framework/library/construct on top of zkSNARks. It allows for zero-knowledge signaling, specifically on top of Ethereum. This means an approved user can broadcast some arbitrary string without revealing their identity, given some specific constraints. An approved user is someone who has been added to a certain merkle tree. See current Github home for more.

Circom is a DSL for writing arithmetic circuits that can be used in zkSNARKs, similar to how you might write a NAND gate. See Github for more.

Basic flow

We start with a private voting example, and then extend it to the slashable rate limiting example.

  1. A user registers an identity (arbitrary keypair), along with a small fee, to a smart contract. This adds them to a merkle tree and allows them to prove that they are member of that group, without revealing who they are.

  2. When a user wants to send a message, they compute a zero-knowledge proof. This ensures certain invariants, have some public outputs, and can be verified by anyone (including a smart contract).

  3. Any node can verify the proof, including smart contracts on chain (as of Byzantinum HF). Additionally, a node can have rules for the public output. In the case of voting, one such rule is that a specific output hash has to be equal to some predefined value, such as "2020-01-01 vote on Foo Bar for president".

  4. Because of how the proof is constructed, and the rules around output values, this ensures that: a user is part of the approved set of voters and that a user can only vote once.

  5. As a consequence of above, we have a system where registered users can only vote once, no one can see who voted for what, and this can all be proven and verified.

Rate limiting example

In the case of rate limiting, we do want nodes to send multiple messages. This changes step 3-5 above somewhat.

NOTE: It is a bit more involved than this, and if we precompute proofs the flow might look a bit different. But the general idea is the same.

  1. Instead of having a rule that you can only vote once, we have a rule that you can only send a message per epoch. Epoch here can be every second, as defined by UTC date time +-20s.

  2. Additionally, if a users sends more than one message per epoch, one of the public outputs is a random share of a private key. Using Shamir's Secret Sharing (similar to a multisig) and 2/3 key share as an example threshold: in the normal case only 1/3 private keys is revealed, which is insufficient to have access. In the case where two messages are sent in an epoch, probabilistically 2/3 shares is sufficient to have access to the key (unless you get the same random share of the key).

  3. This means any untrusted user who detects a spamming user, can use it to access their private key corresponding to funds in the contract, and thus slash them.

  4. As a consequence of above, we have a system where registered users can only messages X times per epoch, and no one can see who is sending what messages. Additionally, if a user is violating the above rate limit, they can be punished and any user can profit from it.

Briefly on scope of 'approved users'

In the case of an application like Status, this construct can either be a global StatusNetwork group, or one per chat, or network, etc. It can be applied both at the network and user level. There are no specific limitations on where or who deploys this, and it is thus more of a UX consideration.

Technical details

For a fairly self-contained set of examples above, see exploration in Vac research repo. Note that the Shamir secret sharing is not inside the SNARK, but out-of-band for now.

The current version of Semaphore is using NodeJS and Circom from Iden3 for Snarks.

For more on rate limiting idea, see ethresearch post.

Feasibility

The above repo was used to exercise the basic paths and to gain intution of feasibility. Based on it and related reading we outline a few blockers and things that require further study.

Technical feasibility

Proof time

Prove time for Semaphore (https://github.com/kobigurk/semaphore) zKSNARKs using circom, groth and snarkjs is currently way too long. It takes on the order of ~10m to generate a proof. With Websnark, it is likely to take 30s, which might still be too long. We should experiment with native code on mobile here.

See details.

Proving key size

Prover key size is ~110mb for Semaphore. Assuming this is embedded on mobile device, it bloats the APK a lot. Current APK size is ~30mb and even that might be high for people with limited bandwidth.

See details.

Trusted setup

Using zkSNARKs a trusted setup is required to generate prover and verifier keys. As part of this setup, a toxic parameter lambda is generated. If a party gets access to this lambda, they can prove anything. This means people using zKSNARKs usually have an elaborate MPC ceremony to ensure this parameter doesn't get discovered.

See details.

Shamir logic in SNARK

For Semaphore RLN we need to embed the Shamir logic inside the SNARK in order to do slashing for spam. Currently the implementation is trusted and very hacky.

See details.

End to end integation

Currently is standalone and doesn't touch multiple users, deployed contract with merkle tree and verification, actual transactions, a mocked network, add/remove members, etc. There are bound to be edge cases and unknown unknowns here.

See details.

Licensing issues

Currently Circom uses a GPL license, which can get tricky when it comes to the App Store etc.

See details.

Alternative ZKPs?

Some of the isolated blockers for zKSNARKs (#7, #8, #9) might be mitigated by the use of other ZKP technology. However, they likely have their own issues.

See details.

Social feasibility

Technical skill

zkSNARKs and related technologies are quite new. To learn how they work and get an intuition for them requires individuals to dedicate a lot of time to studying them. This means we must make getting competence in these technologies if we wish to use them to our advantage.

Time and resources

In order for this and related projects (such as private transaction) to get anywhere, it must be made an explicit area of focus for an extend period of time.

General thoughts

Similar to Whisper, and in line with moving towards protocol and infrastructure, we need to upskill and invest resources into this. This doesn't mean developing all of the technologies ourselves, but gaining enough competence to leverage and extend existing solutions by the growing ZKP community.

For example, this might also include leveraging largely ready made solutions such as AZTEC for private transaction; more fundamental research into ZK rollup and similar; using Semaphore for private group membership and private voting; Nim based wrapper aronud Bellman, etc.

Acknowledgement

Thanks to Barry Whitehat for patient explanation and pointers. Thanks to WJ for helping with runtime issues.

Peacock header image from [Tonos](<https://en.wikipedia.org/wiki/File:Flickr-lo.tangelini-Tonos(1).jpg>)._

]]>
+ + Oskar + +
+ + <![CDATA[P2P Data Sync with a Remote Log]]> + https://vac.dev/rlog/remote-log + + 2019-10-04T12:00:00.000Z + + A research log. Asynchronous P2P messaging? Remote logs to the rescue!

A big problem when doing end-to-end data sync between mobile nodes is that most devices are offline most of the time. With a naive approach, you quickly run into issues of 'ping-pong' behavior, where messages have to be constantly retransmitted. We saw some basic calculations of what this bandwidth multiplier looks like in a previous post.

While you could do some background processing, this is really battery-draining, and on iOS these capabilities are limited. A better approach instead is to loosen the constraint that two nodes need to be online at the same time. How do we do this? There are two main approaches, one is the store and forward model, and the other is a remote log.

In the store and forward model, we use an intermediate node that forward messages on behalf of the recipient. In the remote log model, you instead replicate the data onto some decentralized storage, and have a mutable reference to the latest state, similar to DNS. While both work, the latter is somewhat more elegant and "pure", as it has less strict requirements of an individual node's uptime. Both act as a highly-available cache to smoothen over non-overlapping connection windows between endpoints.

In this post we are going to describe how such a remote log schema could work. Specifically, how it enhances p2p data sync and takes care of the following requirements:

  1. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices +that are resource restricted, mostly-offline and often changing network.
  1. MAY use helper services in order to be more mobile-friendly. Examples of +helper services are decentralized file storage solutions such as IPFS and +Swarm. These help with availability and latency of data for mostly-offline +devices.

Remote log

A remote log is a replication of a local log. This means a node can read data from a node that is offline.

The spec is in an early draft stage and can be found here. A very basic spike / proof-of-concept can be found here.

Definitions

TermDefinition
CASContent-addressed storage. Stores data that can be addressed by its hash.
NSName system. Associates mutable data to a name.
Remote logReplication of a local log at a different location.

Roles

There are four fundamental roles:

  1. Alice
  2. Bob
  3. Name system (NS)
  4. Content-addressed storage (CAS)

The remote log is the data format of what is stored in the name system.

"Bob" can represent anything from 0 to N participants. Unlike Alice, Bob only needs read-only access to NS and CAS.

Flow

Figure 1: Remote log data synchronization.

Data format

The remote log lets receiving nodes know what data they are missing. Depending on the specific requirements and capabilities of the nodes and name system, the information can be referred to differently. We distinguish between three rough modes:

  1. Fully replicated log
  2. Normal sized page with CAS mapping
  3. "Linked list" mode - minimally sized page with CAS mapping

A remote log is simply a mapping from message identifiers to their corresponding address in a CAS:

Message Identifier (H1)CAS Hash (H2)
H1_3H2_3
H1_2H2_2
H1_1H2_1
address to next page

The numbers here corresponds to messages. Optionally, the content itself can be included, just like it normally would be sent over the wire. This bypasses the need for a dedicated CAS and additional round-trips, with a trade-off in bandwidth usage.

Message Identifier (H1)Content
H1_3C3
H1_2C2
H1_1C1
address to next page

Both patterns can be used in parallel, e,g. by storing the last k messages directly and use CAS pointers for the rest. Together with the next_page page semantics, this gives users flexibility in terms of bandwidth and latency/indirection, all the way from a simple linked list to a fully replicated log. The latter is useful for things like backups on durable storage.

Interaction with MVDS

vac.mvds.Message payloads are the only payloads that MUST be uploaded. Other messages types MAY be uploaded, depending on the implementation.

Future work

The spec is still in an early draft stage, so it is expected to change. Same with the proof of concept. More work is needed on getting a fully featured proof of concept with specific CAS and NAS instances. E.g. Swarm and Swarm Feeds, or IPFS and IPNS, or something else.

For data sync in general:

  • Make consistency guarantees more explicit for app developers with support for sequence numbers and DAGs, as well as the ability to send non-synced messages. E.g. ephemeral typing notifications, linear/sequential history and casual consistency/DAG history
  • Better semantics and scalability for multi-user sync contexts, e.g. CRDTs and joining multiple logs together
  • Better usability in terms of application layer usage (data sync clients) and supporting more transports

PS1. Thanks everyone who submitted great logo proposals for Vac!

PPS2. Next week on October 10th decanus and I will be presenting Vac at Devcon, come say hi :)

]]>
+ + Oskar + +
+ + <![CDATA[Vac - A Rough Overview]]> + https://vac.dev/rlog/vac-overview + + 2019-08-02T12:00:00.000Z + + Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. Overview of terms, stack and open problems.

Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. What does that mean? Let's unpack it a bit.

Basic terms

messaging stack. While the initial focus is on data sync, we are concerned with all layers in the stack. That means all the way from underlying transports, p2p overlays and routing, to initial trust establishment and semantics for things like group chat. The ultimate goal is to give application developers the tools they need to provide secure messaging for their users, so they can focus on their domain expertise.

modular. Unlike many other secure messaging applications, our goal is not to have a tightly coupled set of protocols, nor is it to reinvent the wheel. Instead, we aim to provide options at each layer in the stack, and build on the shoulders of giants, putting a premimum on interoperability. It's similar in philosophy to projects such as libp2p or Substrate in that regard. Each choice comes with different trade-offs, and these look different for different applications.

peer-to-peer. The protocols we work on are pure p2p, and aim to minimize centralization. This too is in opposition to many initiatives in the secure messaging space.

messaging. By messaging we mean messaging in a generalized sense. This includes both human to human communication, as well machine to machine communication. By messaging we also mean something more fundamental than text messages, we also include things like transactions (state channels, etc) under this moniker.

secure messaging. Outside of traditional notions of secure messaging, such as ensuring end to end encryption, forward secrecy, avoiding MITM-attacks, etc, we are also concerned with two other forms of secure messaging. We call these private messaging and censorship-resistance. Private messaging means viewing privacy as a security property, with all that entails. Censorship resistance ties into being p2p, but also in terms of allowing for transports and overlays that can't easily be censored by port blocking, traffic analysis, and similar.

Vāc. Is a Vedic goddess of speech. It also hints at being a vaccine.

Protocol stack

What does this stack look like? We take inspiration from core internet architecture, existing survey work and other efforts that have been done to decompose the problem into orthogonal pieces. Each layer provides their own set of properties and only interact with the layers it is adjacent to. Note that this is a rough sketch.

Layer / ProtocolPurposeExamples
Application layerEnd user semantics1:1 chat, group chat
Data SyncData consistencyMVDS, BSP
Secure TransportConfidentiality, PFS, etcDouble Ratchet, MLS
Transport PrivacyTransport and metadata protectionWhisper, Tor, Mixnet
P2P OverlayOverlay routing, NAT traversaldevp2p, libp2p
Trust EstablishmentEstablishing end-to-end trustTOFU, web of trust

As an example, end user semantics such as group chat or moderation capabilities can largely work regardless of specific choices further down the stack. Similarly, using a mesh network or Tor doesn't impact the use of Double Ratchet at the Secure Transport layer.

Data Sync plays a similar role to what TCP does at the transport layer in a traditional Internet architecture, and for some applications something more like UDP is likely to be desirable.

In terms of specific properties and trade-offs at each layer, we'll go deeper down into them as we study them. For now, this is best treated as a rough sketch or mental map.

Problems and rough priorities

With all the pieces involved, this is quite an undertaking. Luckily, a lot of pieces are already in place and can be either incorporated as-is or iterated on. In terms of medium and long term, here's a rough sketch of priorities and open problems.

  1. Better data sync. While the current MVDS works, it is lacking in a few areas:
  • Lack of remote log for mostly-offline offline devices
  • Better scalability for multi-user chat contexts
  • Better usability in terms of application layer usage and supporting more transports
  1. Better transport layer support. Currently MVDS runs primarily over Whisper, which has a few issues:
  • scalability, being able to run with many nodes
  • spam-resistance, proof of work is a poor mechanism for heterogeneous devices
  • no incentivized infrastructure, leading to centralized choke points

In addition to these most immediate concerns, there are other open problems. Some of these are overlapping with the above.

  1. Adaptive nodes. Better support for resource restricted devices and nodes of varying capabilities. Light connection strategy for resources and guarantees. Security games to outsource processing with guarantees.

  2. Incentivized and spam-resistant messaging. Reasons to run infrastructure and not relying on altruistic nodes. For spam resistance, in p2p multicast spam is a big attack vector due to amplification. There are a few interesting directions here, such as EigenTrust, proof of burn with micropayments, and leveraging zero-knowledge proofs.

  3. Strong privacy guarantees at transport privacy layer. More rigorous privacy guarantees and explicit trade-offs for metadata protection. Includes Mixnet.

  4. Censorship-resistant and robust P2P overlay. NAT traversal; running in the browser; mesh networks; pluggable transports for traffic obfuscation.

  5. Scalable and decentralized secure conversational security. Strong security guarantees such as forward secrecy, post compromise security, for large group chats. Includes projects such MLS and extending Double Ratchet.

  6. Better trust establishment and key handling. Avoiding MITM attacks while still enabling a good user experience. Protecting against ghost users in group chat and providing better ways to do key handling.

There is also a set of more general problems, that touch multiple layers:

  1. Ensuring modularity and interoperability. Providing interfaces that allow for existing and new protocols to be at each layer of the stack.

  2. Better specifications. Machine-readable and formally verified specifications. More rigorous analysis of exact guarantees and behaviors. Exposing work in such a way that it can be analyzed by academics.

  3. Better simulations. Providing infrastructure and tooling to be able to test protocols in adverse environments and at scale.

  4. Enabling excellent user experience. A big reason for the lack of widespread adoption of secure messaging is the fact that more centralized, insecure methods provide a better user experience. Given that incentives can align better for users interested in secure messaging, providing an even better user experience should be doable.


We got some work to do. Come help us if you want. See you in the next update!

]]>
+ + Oskar + +
+ + <![CDATA[P2P Data Sync for Mobile]]> + https://vac.dev/rlog/p2p-data-sync-for-mobile + + 2019-07-19T12:00:00.000Z + + A research log. Reliable and decentralized, pick two.

Together with decanus, I've been working on the problem of data sync lately.

In building p2p messaging systems, one problem you quickly come across is the problem of reliably transmitting data. If there's no central server with high availability guarantees, you can't meaningfully guarantee that data has been transmitted. One way of solving this problem is through a synchronization protocol.

There are many synchronization protocols out there and I won't go into detail of how they differ with our approach here. Some common examples are Git and Bittorrent, but there are also projects like IPFS, Swarm, Dispersy, Matrix, Briar, SSB, etc.

Problem motivation

Why do we want to do p2p sync for mobilephones in the first place? There are three components to that question. One is on the value of decentralization and peer-to-peer, the second is on why we'd want to reliably sync data at all, and finally why mobilephones and other resource restricted devices.

Why p2p?

For decentralization and p2p, there are both technical and social/philosophical reasons. Technically, having a user-run network means it can scale with the number of users. Data locality is also improved if you query data that's close to you, similar to distributed CDNs. The throughput is also improved if there are more places to get data from.

Socially and philosophically, there are several ways to think about it. Open and decentralized networks also relate to the idea of open standards, i.e. compare the longevity of AOL with IRC or Bittorrent. One is run by a company and is shut down as soon as it stops being profitable, the others live on. Additionally increasingly control of data and infrastructure is becoming a liability. By having a network with no one in control, everyone is. It's ultimately a form of democratization, more similar to organic social structures pre Big Internet companies. This leads to properties such as censorship resistance and coercion resistance, where we limit the impact a 3rd party might have a voluntary interaction between individuals or a group of people. Examples of this are plentiful in the world of Facebook, Youtube, Twitter and WeChat.

Why reliably sync data?

At risk of stating the obvious, reliably syncing data is a requirement for many problem domains. You don't get this by default in a p2p world, as it is unreliable with nodes permissionslessly join and leave the network. In some cases you can get away with only ephemeral data, but usually you want some kind of guarantees. This is a must for reliable group chat experience, for example, where messages are expected to arrive in a timely fashion and in some reasonable order. The same is true for messages there represent financial transactions, and so on.

Why mobilephones?

Most devices people use daily are mobile phones. It's important to provide the same or at least similar guarantees to more traditional p2p nodes that might run on a desktop computer or computer. The alternative is to rely on gateways, which shares many of the drawbacks of centralized control and prone to censorship, control and surveillence.

More generally, resource restricted devices can differ in their capabilities. One example is smartphones, but others are: desktop, routers, Raspberry PIs, POS systems, and so on. The number and diversity of devices are exploding, and it's useful to be able to leverage this for various types of infrastructure. The alternative is to centralize on big cloud providers, which also lends itself to lack of democratization and censorship, etc.

Minimal Requirements

For requirements or design goals for a solution, here's what we came up with.

  1. MUST sync data reliably between devices. By reliably we mean having the ability to deal with messages being out of order, dropped, duplicated, or delayed.

  2. MUST NOT rely on any centralized services for reliability. By centralized services we mean any single point of failure that isn’t one of the endpoint devices.

  3. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices that are resource restricted, mostly-offline and often changing network.

  4. MAY use helper services in order to be more mobile-friendly. Examples of helper services are decentralized file storage solutions such as IPFS and Swarm. These help with availability and latency of data for mostly-offline devices.

  5. MUST have the ability to provide casual consistency. By casual consistency we mean the commonly accepted definition in distributed systems literature. This means messages that are casually related can achieve a partial ordering.

  6. MUST support ephemeral messages that don’t need replication. That is, allow for messages that don’t need to be reliabily transmitted but still needs to be transmitted between devices.

  7. MUST allow for privacy-preserving messages and extreme data loss. By privacy-preserving we mean things such as exploding messages (self-destructing messages). By extreme data loss we mean the ability for two trusted devices to recover from a, deliberate or accidental, removal of data.

  8. MUST be agnostic to whatever transport it is running on. It should not rely on specific semantics of the transport it is running on, nor be tightly coupled with it. This means a transport can be swapped out without loss of reliability between devices.

MVDS - a minimium viable version

The first minimum viable version is in an alpha stage, and it has a specification, implementation and we have deployed it in a console client for end to end functionality. It's heavily inspired by Bramble Sync Protocol.

The spec is fairly minimal. You have nodes that exchange records over some secure transport. These records are of different types, such as OFFER, MESSAGE, REQUEST, and ACK. A peer keep tracks of the state of message for each node it is interacting with. There's also logic for message retransmission with exponential delay. The positive ACK and retransmission model is quite similar to how TCP is designed.

There are two different modes of syncing, interactive and batch mode. See sequence diagrams below.

Interactive mode: +

Interactive mode

Batch mode: +

Batch mode

Which mode should you choose? It's a tradeoff of latency and bandwidth. If you want to minimize latency, batch mode is better. If you care about preserving bandwidth interactive mode is better. The choice is up to each node.

Basic simulation

Initial ad hoc bandwidth and latency testing shows some issues with a naive approach. Running with the default simulation settings:

  • communicating nodes: 2
  • nodes using interactive mode: 2
  • interval between messages: 5s
  • time node is offine: 90%
  • nodes each node is sharing with: 2

we notice a huge overhead. More specifically, we see a ~5 minute latency overhead and a bandwidth multiplier of x100-1000, i.e. 2-3 orders of magnitude just for receiving a message with interactive mode, without acks.

Now, that seems terrible. A moment of reflection will reveal why that is. If each node is offline uniformly 90% of the time, that means that each record will be lost 90% of the time. Since interactive mode requires offer, request, payload (and then ack), that's three links just for Bob to receive the actual message.

Each failed attempt implies another retransmission. That means we have (1/0.1)^3 = 1000 expected overhead to receive a message in interactive mode. The latency follows naturally from that, with the retransmission logic.

Mostly-offline devices

The problem above hints at the requirements 3 and 4 above. While we did get reliable syncing (requirement 1), it came at a big cost.

There are a few ways of getting around this issue. One is having a store and forward model, where some intermediary node picks up (encrypted) messages and forwards them to the recipient. This is what we have in production right now at Status.

Another, arguably more pure and robust, way is having a remote log, where the actual data is spread over some decentralized storage layer, and you have a mutable reference to find the latest messages, similar to DNS.

What they both have in common is that they act as a sort of highly-available cache to smooth over the non-overlapping connection windows between two endpoints. Neither of them are required to get reliable data transmission.

Basic calculations for bandwidth multiplier

While we do want better simulations, and this is a work in progress, we can also look at the above scenarios using some basic calculations. This allows us to build a better intuition and reason about the problem without having to write code. Let's start with some assumptions:

  • two nodes exchanging a single message in batch mode
  • 10% uniformly random uptime for each node
  • in HA cache case, 100% uptime of a piece of infrastructure C
  • retransmission every epoch (with constant or exponential backoff)
  • only looking at average (p50) case

First case, no helper services

A sends a message to B, and B acks it.

A message -> B (10% chance of arrival)
A <- ack B (10% chance of arrival)

With a constant backoff, A will send messages at epoch 1, 2, 3, .... With exponential backoff and a multiplier of 2, this would be 1, 2, 4, 8, .... Let's assume constant backoff for now, as this is what will influence the success rate and thus the bandwidth multiplier.

There's a difference between time to receive and time to stop sending. Assuming each send attempt is independent, it takes on average 10 epochs for A's message to arrive with B. Furthermore:

  1. A will send messages until it receives an ACK.
  2. B will send ACK if it receives a message.

To get an average of one ack through, A needs to send 100 messages, and B send on average 10 acks. That's a multiplier of roughly a 100. That's roughly what we saw with the simulation above for receiving a message in interactive mode.

Second case, high-availability caching layer

Let's introduce a helper node or piece of infrastructure, C. Whenever A or B sends a message, it also sends it to C. Whenever A or B comes online, it queries for messages with C.

A message    -> B (10% chance of arrival)
A message -> C (100% chance of arrival)
B <- req/res -> C (100% chance of arrival)
A <- ack B (10% chance of arrival)
C <- ack B (100% chance of arrival)
A <- req/res -> C (100% chance of arrival)

What's the probability that A's messages will arrive at B? Directly, it's still 10%. But we can assume it's 100% that C picks up the message. (Giving C a 90% chance success rate doesn't materially change the numbers).

B will pick up A's message from C after an average of 10 epochs. Then B will send ack to A, which will also be picked up by C 100% of the time. Once A comes online again, it'll query C and receive B's ack.

Assuming we use exponential backoff with a multiplier of 2, A will send a message directly to B at epoch 1, 2, 4, 8 (assuming it is online). At this point, epoch 10, B will be online in the average case. These direct sends will likely fail, but B will pick the message up from C and send one ack, both directly to A and to be picked up by C. Once A comes online, it'll query C and receive the ack from B, which means it won't do any more retransmits.

How many messages have been sent? Not counting interactions with C, A sends 4 (at most) and B 1. Depending on if the interaction with C is direct or indirect (i.e. multicast), the factor for interaction with C will be ~2. This means the total bandwidth multiplier is likely to be <10, which is a lot more acceptable.

Since the syncing semantics are end-to-end, this is without relying on the reliablity of C.

Caveat

Note that both of these are probabilistic argument. They are also based on heuristics. More formal analysis would be desirable, as well as better simulations to experimentally verify them. In fact, the calculations could very well be wrong!

Future work

There are many enhancements that can be made and are desirable. Let's outline a few.

  1. Data sync clients. Examples of actual usage of data sync, with more interesting domain semantics. This also includes usage of sequence numbers and DAGs to know what content is missing and ought to be synced.

  2. Remote log. As alluded to above, this is necessary. It needs a more clear specification and solid proof of concepts.

  3. More efficient ways of syncing with large number of nodes. When the number of nodes goes up, the algorithmic complexity doesn't look great. This also touches on things such as ambient content discovery.

  4. More robust simulations and real-world deployments. Exisiting simulation is ad hoc, and there are many improvements that can be made to gain more confidence and identify issues. Additionally, better formal analysis.

  5. Example usage over multiple transports. Including things like sneakernet and meshnets. The described protocol is designed to work over unstructured, structured and private p2p networks. In some cases it can leverage differences in topology, such as multicast, or direct connections.

]]>
+ + Oskar + +
+
\ No newline at end of file diff --git a/rlog/building-privacy-protecting-infrastructure/index.html b/rlog/building-privacy-protecting-infrastructure/index.html new file mode 100644 index 00000000..5aec0d71 --- /dev/null +++ b/rlog/building-privacy-protecting-infrastructure/index.html @@ -0,0 +1,267 @@ + + + + + +Building Privacy-Protecting Infrastructure | Vac Research + + + + + + + + + + +
+

Building Privacy-Protecting Infrastructure

by
19 min read

What is privacy-protecting infrastructure? Why do we need it and how we can build it? We'll look at Waku, the communication layer for Web3. We'll see how it uses ZKPs to incentivize and protect the Waku network. We'll also look at Zerokit, a library that makes it easier to use ZKPs in different environments. After reading this, I hope you'll better understand the importance of privacy-protecting infrastructure and how we can build it.

This write-up is based on a talk given at DevCon 6 in Bogota, a video can be found here

Intro

In this write-up, we are going to talk about building privacy-protecting +infrastructure. What is it, why do we need it and how can we build it?

We'll look at Waku, the communication layer for Web3. We'll look at how we are +using Zero Knowledge (ZK) technology to incentivize and protect the Waku +network. We'll also look at Zerokit, a library we are writing to make ZKP easier +to use in different environments.

At the end of this write-up, I hope you'll come away with an understanding of +the importance of privacy-protecting infrastructure and how we can build it.

About

First, briefly about Vac. We build public good protocols for the decentralized +web, with a focus on privacy and communication. We do applied research based on +which we build protocols, libraries and publications. We are also the custodians +of protocols that reflect a set of principles.

Principles

It has its origins in the Status app and trying to improve +the underlying protocols and infrastructure. We build Waku, +among other things.

Why build privacy-protecting infrastructure?

Privacy is the power to selectively reveal yourself. It is a requirement for +freedom and self-determination.

Just like you need decentralization in order to get censorship-resistance, you +need privacy to enable freedom of expression.

To build applications that are decentralized and privacy-protecting, you need +the base layer, the infrastructure itself, to have those properties.

We see this a lot. It is easier to make trade-offs at the application layer than +doing them at the base layer. You can build custodial solutions on top of a +decentralized and non-custodial network where participants control their own +keys, but you can't do the opposite.

If you think about it, buildings can be seen as a form of privacy-protecting +infrastructure. It is completely normal and obvious in many ways, but when it +comes to the digital realm our mental models and way of speaking about it hasn't +caught up yet for most people.

I'm not going too much more into the need for privacy or what happens when you +don't have it, but suffice to say it is an important property for any open +society.

When we have conversations, true peer-to-peer offline conversations, we can talk +privately. If we use cash to buy things we can do commerce privately.

On the Internet, great as it is, there are a lot of forces that makes this +natural state of things not the default. Big Tech has turned users into a +commodity, a product, and monetized user's attention for advertising. To +optimize for your attention they need to surveil your habits and activities, and +hence breach your privacy. As opposed to more old-fashioned models, where +someone is buying a useful service from a company and the incentives are more +aligned.

We need to build credibly neutral infrastructure that protects your privacy at +the base layer, in order to truly enable applications that are +censorship-resistant and encourage meaningful freedom of expression.

Web3 infrastructure

Infrastructure is what lies underneath. Many ways of looking at this but I'll +keep it simple as per the original Web3 vision. You had Ethereum for +compute/consensus, Swarm for storage, and Whisper for messaging. Waku has taken +over the mantle from Whisper and is a lot more +usable today than Whisper ever was, +for many reasons.

Web3 Infrastructure

On the privacy-front, we see how Ethereum is struggling. It is a big UX problem, +especially when you try to add privacy back "on top". It takes a lot of effort +and it is easier to censor. We see this with recent action around Tornado Cash. +Compare this with something like Zcash or Monero, where privacy is there by +default.

There are also problems when it comes to the p2p networking side of things, for +example with Ethereum validator privacy and hostile actors and jurisdictions. If +someone can easily find out where a certain validator is physically located, +that's a problem in many parts of the world. Being able to have stronger +privacy-protection guarantees would be very useful for high-value targets.

This doesn't begin to touch on the so called "dapps" that make a lot of +sacrifices in how they function, from the way domains work, to how websites are +hosted and the reliance on centralized services for communication. We see this +time and time again, where centralized, single points of failure systems work +for a while, but then eventually fail.

In many cases an individual user might not care enough though, and for platforms +the lure to take shortcuts is strong. That is why it is important to be +principled, but also pragmatic in terms of the trade-offs that you allow on top. +We'll touch more on this in the design goals around modularity that Waku has.

ZK for privacy-protecting infrastructure

ZKPs are a wonderful new tool. Just like smart contracts enables programmable +money, ZKPs allow us to express fundamentally new things. In line with the great +tradition of trust-minimization, we can prove statement while revealing the +absolute minimum information necessary. This fits the definition of privacy, the +power to selectively reveal yourself, perfectly. I'm sure I don't need to tell +anyone reading this but this is truly revolutionary. The technology is advancing +extremely fast and often it is our imagination that is the limit.

Zero knowledge

Waku

What is Waku? It is a set of modular protocols for p2p communication. It has a +focus on privacy, security and being able to run anywhere. It is the spiritual +success to Whisper.

By modular we mean that you can pick and choose protocols and how you use them +depending on constraints and trade-offs. For example, bandwidth usage vs +privacy.

It is designed to work in resource restricted environments, such as mobile +phones and in web browsers. It is important that infrastructure meets users +where they are and supports their real-world use cases. Just like you don't need +your own army and a castle to have your own private bathroom, you shouldn't need +to have a powerful always-on node to get reasonable privacy and +censorship-resistance. We might call this self-sovereignty.

Waku - adaptive nodes

One way of looking at Waku is as an open service network. There are nodes with +varying degrees of capabilities and requirements. For example when it comes to +bandwidth usage, storage, uptime, privacy requirements, latency requirements, +and connectivity restrictions.

We have a concept of adaptive nodes that can run a variety of protocols. A node +operator can choose which protocols they want to run. Naturally, there'll be +some nodes that do more consumption and other nodes that do more provisioning. +This gives rise to the idea of a service network, where services are provided +for and consumed.

Adaptive Nodes

Waku - protocol interactions

There are many protocols that interact. Waku Relay protocol is based on libp2p +GossipSub for p2p messaging. We have filter for bandwidth-restricted nodes to +only receive subset of messages. Lightpush for nodes with short connection +windows to push messages into network. Store for nodes that want to retrieve +historical messages.

On the payload layer, we provide support for Noise handshakes/key-exchanges. +This means that as a developers, you can get end-to-end encryption and expected +guarantees out of the box. We have support for setting up a secure channel from +scratch, and all of this paves the way for providing Signal's Double Ratchet at +the protocol level much easier. We also have experimental support for +multi-device usage. Similar features have existed in for example the Status app +for a while, but with this we make it easier for any platform using Waku to use +it.

There are other protocols too, related to peer discovery, topic usage, etc. See +specs for more details.

Protocol Interactions

Waku - Network

For the Waku network, there are a few problems. For example, when it comes to +network spam and incentivizing service nodes. We want to address these while +keeping privacy-guarantees of the base layer. I'm going to go into both of +these.

The spam problem arises on the gossip layer when anyone can overwhelm the +network with messages. The service incentivization is a problem when nodes don't +directly benefit from the provisioning of a certain service. This can happen if +they are not using the protocol directly themselves as part of normal operation, +or if they aren't socially inclined to provide a certain service. This depends a +lot on how an individual platform decides to use the network.

Waku Network

Dealing with network spam and RLN Relay

Since the p2p relay network is open to anyone, there is a problem with spam. If +we look at existing solutions for dealing with spam in traditional messaging +systems, a lot of entities like Google, Facebook, Twitter, Telegram, Discord use +phone number verification. While this is largely sybil-resistant, it is +centralized and not private at all.

Historically, Whisper used PoW which isn't good for heterogenerous networks. +Peer scoring is open to sybil attacks and doesn't directly address spam +protection in an anonymous p2p network.

The key idea here is to use RLN for private economic spam protection using +zkSNARKs.

I'm not going to go into too much detail of RLN here. If you are interested, I +gave a talk in Amsterdam at +Devconnect about this. We have some write-ups on RLN +here by Sanaz who has been pushing a lot of this +from our side. There's also another talk at Devcon by Tyler going into RLN in +more detail. Finally, here's the RLN spec.

I'll briefly go over what it is, the interface and circuit and then talk about +how it is used in Waku.

RLN - Overview and Flow

RLN stands for Rate Limiting Nullifier. It is an anonyomous rate limiting +mechanism based on zkSNARKs. By rate limiting we mean you can only send N +messages in a given period. By anonymity we mean that you can't link message to +a publisher. We can think of it as a voting booth, where you are only allowed to +vote once every election.

Voting Booth

It can be used for spam protection in p2p messaging systems, and also rate +limiting in general, such as for a decentralized captcha.

There are three parts to it. You register somewhere, then you can signal and +finally there's a verification/slashing phase. You put some capital at risk, +either economic or social, and if you double signal you get slashed.

RLN - Circuit

Here's what the private and public inputs to the circuit look like. The identity +secret is generated locally, and we create an identity commitment that is +inserted into a Merkle tree. We then use Merkle proofs to prove membership. +Registered member can only signal once for a given epoch or external nullifier, +for example every ten seconds in Unix time. RLN identifer is for a specific RLN +app.

We also see what the circuit output looks like. This is calculated locally. y +is a share of the secret equation, and the (internal) nullifier acts as a unique +fingerprint for a given app/user/epoch combination. How do we calculate y and +the internal nullifier?

// Private input
signal input identity_secret;
signal input path_elements[n_levels][1];
signal input identity_path_index[n_levels];

// Public input
signal input x; // signal_hash
signal input epoch; // external_nullifier
signal input rln_identifier;

// Circuit output
signal output y;
signal output root;
signal output nullifier;

RLN - Shamir's secret sharing

This is done using Shamir's secret +sharing. Shamir’s +secret sharing is based on idea of splitting a secret into shares. This is how +we enable slashing of funds.

In this case, we have two shares. If a given identity a0 signals twice in +epoch/external nullifier, a1 is the same. For a given RLN app, +internal_nullifier then stays the same. x is signal hash which is different, +and y is public, so we can reconstruct identity_secret. With the identity +secret revealed, this gives access to e.g. financial stake.

a_0 = identity_secret // secret S
a_1 = poseidonHash([a0, external_nullifier])

y = a_0 + x * a_1

internal_nullifier = poseidonHash([a_1, rln_identifier])

Shamir&#39;s secret sharing

RLN Relay

This is how RLN is used with Relay/GossipSub protocol. A node registers and +locks up funds, and after that it can send messages. It publishes a message +containing the Zero Knowledge proof and some other details.

Each relayer node listens to the membership contract for new members, and it +also keeps track of relevant metadata and merkle tree. Metadata is needed to be +able to detect double signaling and perform slashing.

Before forwarding a message, it does some verification checks to ensure there +are no duplicate messages, ZKP is valid and no double signaling has occured. It +is worth noting that this can be combined with peer scoring, for example for +duplicate messages or invalid ZK proofs.

In line of Waku's goals of modularity, RLN Relay is applied on a specific subset +of pubsub and content topics. You can think of it as an extra secure channel.

RLN Relay

RLN Relay cross-client testnet

Where are we with RLN Relay deployment? We've recently launched our second +testnet. This is using RLN Relay with a smart contract on Goerli. It integrates +with our example p2p chat application, and it does so through three different +clients, nwaku, go-waku and js-waku for browsers. This is our first p2p +cross-client testnet for RLN Relay.

Here's a video that shows a user +registering in a browser, signaling through JS-Waku. It then gets relayed to a +nwaku node, that verifies the proof. The second +video shows what happens in the +spam case. when more than one message is sent in a given epoch, it detects it as +spam and discards it. Slashing hasn't been implemented fully yet in the client +and is a work in progress.

If you are curious and want to participate, you can join the effort on our Vac +Discord. We also have +tutorials +setup for all clients so you can play around with it.

As part of this, and to make it work in multiple different environments, we've +also been developing a new library called Zerokit. I'll talk about this a bit +later.

Private settlement / Service credentials

Going back to the service network idea, let's talk about service credentials. +The idea behind service credentials and private settlement is to enable two +actors to pay for and provide services without compromising their privacy. We do +not want the payment to create a direct public link between the service provider +and requester.

Recall the Waku service network illustration with adaptive nodes that choose +which protocols they want to run. Many of these protocols aren't very heavy and +just work by default. For example the relay protocol is enabled by default. +Other protocols are much heavier to provide, such as storing historical +messages.

It is desirable to have additional incentives for this, especially for platforms +that aren't community-based where some level of altruism can be assumed (e.g. +Status Communities, or WalletConnect cloud infrastructure).

You have a node Alice that is often offline and wants to consume historical +messages on some specific content topics. You have another node Bob that runs a +server at home where they store historical messages for the last several weeks. +Bob is happy to provide this service for free because he's excited about running +privacy-preserving infrastructure and he's using it himself, but his node is +getting overwhelmed by freeloaders and he feels like he should be paid something +for continuing to provide this service.

Alice deposits some funds in a smart contract which registers it in a tree, +similar to certain other private settlement mechanisms. A fee is taken or +burned. In exchange, she gets a set of tokens or service credentials. When she +wants to do a query with some criteria, she sends this to Bob. Bob responds with +size of response, cost, and receiver address. Alice then sends a proof of +delegation of a service token as a payment. Bob verifies the proof and resolves +the query.

The end result is that Alice has consumed some service from Bob, and Bob has +received payment for this. There's no direct transaction link between Alice and +Bob, and gas fees can be minimized by extending the period before settling on +chain.

This can be complemented with altruistic service provisioning, for example by +splitting the peer pool into two slots, or only providing a few cheap queries +for free.

The service provisioning is general, and can be generalized for any kind of +request/response service provisoning that we want to keep private.

This isn't a perfect solution, but it is an incremental improvement on top of +the status quo. It can be augmented with more advanced techniques such as better +non-repudiable node reputation, proof of correct service provisioning, etc.

We are currently in the raw spec / proof of concept stage of this. We expect to +launch a testnet of this later this year or early next year.

Service credentials flow

Zerokit

Zerokit is a set of Zero Knowledge modules, +written in Rust and designed to be used in many different environments. The +initial goal is to get the best of both worlds with Circom/Solidity/JS and +Rust/ZK ecosystem. This enables people to leverage Circom-based constructs from +non-JS environments.

For the RLN module, it is using Circom circuits via ark-circom and Rust for +scaffolding. It exposes a C FFI API that can be used through other system +programming environments, like Nim and Go. It also exposes an experimental WASM +API that can be used through web browsers.

Waku is p2p infrastructure running in many different environments, such as +Nim/JS/Go/Rust, so this a requirement for us.

Circom and JS strengths are access to Dapp developers, tooling, generating +verification code, circuits etc. Rust strengths is that it is systems-based and +easy to interface with other language runtime such as Nim, Go, Rust, C. It also +gives access to other Rust ZK ecosystems such as arkworks. This opens door for +using other constructs, such as Halo2. This becomes especially relevant for +constructs where you don't want to do a trusted setup or where circuits are more +complex/custom and performance requirements are higher.

In general with Zerokit, we want to make it easy to build and use ZKP in a +multitude of environments, such as mobile phones and web browsers. Currently it +is too complex to write privacy-protecting infrastructure with ZKPs considering +all the languages and tools you have to learn, from JS, Solidity and Circom to +Rust, WASM and FFI. And that isn't even touching on things like secure key +storage or mobile dev. Luckily more and more projects are working on this, +including writing DSLs etc. It'd also be exciting if we can make a useful +toolstack for JS-less ZK dev to reduce cognitive overhead, similar to what we +have with something like Foundry.

Other research

I also want to mention a few other things we are doing. One thing is +protocol specifications. We think this is very important +for p2p infra, and we see a lot of other projects that claim to do it p2p +infrastructure but they aren't clear about guarantees or how stable something +is. That makes it hard to have multiple implementations, to collaborate across +different projects, and to analyze things objectively.

Related to that is publishing papers. We've put +out three so far, related to Waku and RLN-Relay. This makes it easier to +interface with academia. There's a lot of good researchers out there and we want +to build a better bridge between academia and industry.

Another thing is network +privacy. Waku is modular with +respect to privacy guarantees, and there are a lot of knobs to turn here +depending on specific deployments. For example, if you are running the full +relay protocol you currently have much stronger receiver anonymity than if you +are running filter protocol from a bandwidth or connectivity-restricted node.

We aim to make this pluggable depending on user needs. E.g. mixnets such as Nym +come with some trade-offs but are a useful tool in the arsenal. A good mental +model to keep in mind is the anonymity trilemma, where you can only pick 2/3 out +of low latency, low bandwidth usage and strong anonymity.

We are currently exploring Dandelion-like +additions to the relay/gossip +protocol, which would provide for stronger sender anonymity, especially in a +multi-node/botnet attacker model. As part of this we are looking into different +parameters choices and general possibilities for lower latency usage. This could +make it more amenable for latency sensitive environments, such as validator +privacy, for specific threat models. The general theme here is we want to be +rigorous with the guarantees we provide, under what conditions and for what +threat models.

Another thing mentioned earlier is Noise payload +encryption, and specifically things like allowing +for pairing different devices with e.g. QR codes. This makes it easier for +developers to provide secure messaging in many realistic scenarios in a +multi-device world.

Other research

Summary

We've gone over what privacy-protecting infrastructure is, why we want it and +how we can build it. We've seen how ZK is a fundamental building block for this. +We've looked at Waku, the communication layer for Web3, and how it uses Zero +Knowledge proofs to stay private and function better. We've also looked at +Zerokit and how we can make it easier to do ZKP in different environments.

Finally we also looked at some other research we've been doing. All of the +things mentioned in this article, and more, is available as +write-ups, specs, or +discussions on our forum or Github.

If you find any of this exciting to work on, feel free to reach out on our +Discord. We are also hiring, and we have started +expanding into other privacy infrastructure tech like private and provable +computation with ZK-WASM.

+ + + + \ No newline at end of file diff --git a/rlog/device-pairing-in-js-waku-and-go-waku/index.html b/rlog/device-pairing-in-js-waku-and-go-waku/index.html new file mode 100644 index 00000000..9206444f --- /dev/null +++ b/rlog/device-pairing-in-js-waku-and-go-waku/index.html @@ -0,0 +1,26 @@ + + + + + +Device Pairing in Js-waku and Go-waku | Vac Research + + + + + + + + + + +
+

Device Pairing in Js-waku and Go-waku

by
5 min read

Device pairing and secure message exchange using Waku and noise protocol.

As the world becomes increasingly connected through the internet, the need for secure and reliable communication becomes paramount. In this article it is described how the Noise protocol can be used as a key-exchange mechanism for Waku.

Recently, this feature was introduced in js-waku and go-waku, providing a simple API for developers to implement secure communication protocols using the Noise Protocol framework. These open-source libraries provide a solid foundation for building secure and decentralized applications that prioritize data privacy and security.

This functionality is designed to be simple and easy to use, even for developers who are not experts in cryptography. The library offers a clear and concise API that abstracts away the complexity of the Noise Protocol framework and provides an straightforward interface for developers to use. Using this, developers can effortlessly implement secure communication protocols on top of their JavaScript and Go applications, without having to worry about the low-level details of cryptography.

One of the key benefits of using Noise is that it provides end-to-end encryption, which means that the communication between two parties is encrypted from start to finish. This is essential for ensuring the security and privacy of sensitive information

Device Pairing

In today's digital world, device pairing has become an integral part of our lives. Whether it's connecting our smartphones with other computers or web applications, the need for secure device pairing has become more crucial than ever. With the increasing threat of cyber-attacks and data breaches, it's essential to implement secure protocols for device pairing to ensure data privacy and prevent unauthorized access.

To demonstrate how device pairing can be achieved using Waku and Noise, we have examples available at https://examples.waku.org/noise-js/. You can try pairing different devices, such as mobile and desktop, via a web application. This can be done by scanning a QR code or opening a URL that contains the necessary data for a secure handshake.

The process works as follows:

Actors:

  • Alice the initiator
  • Bob the responder
  1. The first step in achieving secure device pairing using Noise and Waku is for Bob generate the pairing information which could be transmitted out-of-band. For this, Bob opens https://examples.waku.org/noise-js/ and a QR code is generated, containing the data required to do the handshake. This pairing QR code is timeboxed, meaning that after 2 minutes, it will become invalid and a new QR code must be generated
  2. Alice scans the QR code using a mobile phone. This will open the app with the QR code parameters initiating the handshake process which is described in 43/WAKU2-DEVICE-PAIRING. These messages are exchanged between two devices over Waku to establish a secure connection. The handshake messages consist of three main parts: the initiator's message, the responder's message, and the final message, which are exchanged to establish a secure connection. While using js-noise, the developer is abstracted of this process, since the messaging happens automatically depending on the actions performed by the actors in the pairing process.
  3. Both Alice and Bob will be asked to verify each other's identity. This is done by confirming if an 8-digits authorization code match in both devices. If both actors confirm that the authorization code is valid, the handshake concludes succesfully
  4. Alice and Bob receive a set of shared keys that can be used to start exchanging encrypted messages. The shared secret keys generated during the handshake process are used to encrypt and decrypt messages sent between the devices. This ensures that the messages exchanged between the devices are secure and cannot be intercepted or modified by an attacker.

The above example demonstrates device pairing using js-waku. Additionally, You can also try building and experimenting with other noise implementations like nwaku, or go-waku, with an example available at https://github.com/waku-org/go-waku/tree/master/examples/noise in which the same flow described before is done with Bob (the receiver) using go-waku instead of js-waku.

Conclusion

With its easy to use API built on top of the Noise Protocol framework and the LibP2P networking stack, if you are a developer looking to implement secure messaging in their applications that are both decentralized and censorship resistant, Waku is definitely an excellent choice worth checking out!

Waku is also Open source with a MIT and APACHEv2 licenses, which means that developers are encouraged to contribute code, report bugs, and suggest improvements to make it even better.

Don't hesitate to try the live example at https://examples.waku.org/noise-js and build your own webapp using https://github.com/waku-org/js-noise, https://github.com/waku-org/js-waku and https://github.com/waku-org/go-waku. This will give you a hands-on experience of implementing secure communication protocols using the Noise Protocol framework in a practical setting. Happy coding!

References

+ + + + \ No newline at end of file diff --git a/rlog/dns-based-discovery/index.html b/rlog/dns-based-discovery/index.html new file mode 100644 index 00000000..18dacc2c --- /dev/null +++ b/rlog/dns-based-discovery/index.html @@ -0,0 +1,26 @@ + + + + + +DNS Based Discovery | Vac Research + + + + + + + + + + +
+

DNS Based Discovery

by
6 min read

A look at EIP-1459 and the benefits of DNS based discovery.

Discovery in p2p networks is the process of how nodes find each other and specific resources they are looking for. Popular discovery protocols, such as Kademlia which utilizes a distributed hash table or DHT, are highly inefficient for resource restricted devices. These methods use short connection windows, and it is quite battery intensive to keep establishing connections. Additionally, we cannot expect a mobile phone for example to synchronize an entire DHT using cellular data.

Another issue is how we do the initial bootstrapping. In other words, how does a client find its first node to then discover the rest of the network? In most applications, including Status right now, this is done with a static list of nodes that a client can connect to.

In summary, we have a static list that provides us with nodes we can connect to which then allows us to discover the rest of the network using something like Kademlia. But what we need is something that can easily be mutated, guarantees a certain amount of security, and is efficient for resource restricted devices. Ideally our solution would also be robust and scalable.

How do we do this?

EIP 1459: Node Discovery via DNS, which is one of the strategies we are using for discovering waku nodes. EIP-1459 is a DNS-based discovery protocol that stores merkle trees in DNS records which contain connection information for nodes.

Waku is our fork of Whisper. Oskar recently wrote an entire post explaining it. In short, Waku is our method of fixing the shortcomings of Whisper in a more iterative fashion. You can find the specification here

DNS-based methods for bootstrapping p2p networks are quite popular. Even Bitcoin uses it, but it uses a concept called DNS seeds, which are just DNS servers that are configured to return a list of randomly selected nodes from the network upon being queried. This means that although these seeds are hardcoded in the client, the IP addresses of actual nodes do not have to be.

> dig dnsseed.bluematt.me +short
129.226.73.12
107.180.78.111
169.255.56.123
91.216.149.28
85.209.240.91
66.232.124.232
207.55.53.96
86.149.241.168
193.219.38.57
190.198.210.139
74.213.232.234
158.181.226.33
176.99.2.207
202.55.87.45
37.205.10.3
90.133.4.73
176.191.182.3
109.207.166.232
45.5.117.59
178.211.170.2
160.16.0.30

The above displays the result of querying on of these DNS seeds. All the nodes are stored as A records for the given domain name. This is quite a simple solution which Bitcoin almost soley relies on since removing the IRC bootstrapping method in v0.8.2.

What makes this DNS based discovery useful? It allows us to have a mutable list of bootstrap nodes without needing to ship a new version of the client every time a list is mutated. It also allows for a more lightweight method of discovering nodes, something very important for resource restricted devices.

Additionally, DNS provides us with a robust and scalable infrastructure. This is due to its hierarchical architecture. This hierarchical architecture also already makes it distributed such that the failure of one DNS server does not result in us no longer being able to resolve our name.

As with every solution though, there is a trade-off. By storing the list in DNS name an adversary would simply need to censor the DNS records for a specific name. This would prevent any new client trying to join the network from being able to do so.

One thing you notice when looking at EIP-1459 is that it is a lot more technically complex than Bitcoin's way of doing this. So if Bitcoin uses this simple method and has proven that it works, why did we need a new method?

There are multiple reasons, but the main one is security. In the Bitcoin example, an attacker could create a new list and no one querying would be able to tell. This is however mitigated in EIP-1459 where we can verify the integrity of the entire returned list by storing an entire merkle tree in the DNS records.

Let's dive into this. Firstly, a client that is using these DNS records for discovery must know the public key corresponding to the private key controlled by the entity creating the list. This is because the entire list is signed using a secp256k1 private key, giving the client the ability to authenticate the list and know that it has not been tampered with by some external party.

So that already makes this a lot safer than the method Bitcoin uses. But how are these lists even stored? As previously stated they are stored using merkle trees as follows:

  • The root of the tree is stored in a TXT record, this record contains the tree's root hash, a sequence number which is incremented every time the tree is updated and a signature as stated above.

    Additionally, there is also a root hash to a second tree called a link tree, it contains the information to different lists. This link tree allows us to delegate trust and build a graph of multiple merkle trees stored across multiple DNS names.

    The sequence number ensures that an attacker cannot replace a tree with an older version because when a client reads the tree, they should ensure that the sequence number is greater than the last synchronized version.

  • Using the root hash for the tree, we can find the merkle tree's first branch, the branch is also stored in a TXT record. The branch record contains all the hashes of the branch's leafs.

  • Once a client starts reading all the leafs, they can find one of two things: either a new branch record leading them further down the tree or an Ethereum Name Records (ENR) which means they now have the address of a node to connect to! To learn more about ethereum node records you can have a look at EIP-778, or read a short blog post I wrote explaining them here.

Below is the zone file taken from the EIP-1459, displaying how this looks in practice.

; name                        ttl     class type  content
@ 60 IN TXT enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA
C7HRFPF3BLGF3YR4DY5KX3SMBE 86900 IN TXT enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org
JWXYDBPXYWG6FX3GMDIBFA6CJ4 86900 IN TXT enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24
2XS2367YHAXJFGLZHVAWLQD4ZY 86900 IN TXT enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA
H4FHT4B454P6UXFD7JCYQ5PWDY 86900 IN TXT enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI
MHTDO6TMUBRIA2XWG5LUDACK24 86900 IN TXT enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o

All of this has already been introduced into go-ethereum with the pull request #20094, created by Felix Lange. There's a lot of tooling around it that already exists too which is really cool. So if your project is written in Golang and wants to use this, it's relatively simple! Additionally, here's a proof of concept that shows what this might look like with libp2p on github.

I hope this was a helpful explainer into DNS based discovery, and shows EIP-1459's benefits over more traditional DNS-based discovery schemes.

+ + + + \ No newline at end of file diff --git a/rlog/ethics-surveillance-tech/index.html b/rlog/ethics-surveillance-tech/index.html new file mode 100644 index 00000000..f32b7869 --- /dev/null +++ b/rlog/ethics-surveillance-tech/index.html @@ -0,0 +1,170 @@ + + + + + +Opinion: Pseudo-ethics in the Surveillance Tech Industry | Vac Research + + + + + + + + + + +
+

Opinion: Pseudo-ethics in the Surveillance Tech Industry

by
12 min read

A look at typical ethical shortfalls in the global surveillance tech industry.

This is an opinion piece by pseudonymous contributor, circe.

Preface

The Vac team aims to provide a public good in the form of freely available, open source tools and protocols for decentralized communication. +As such, we value our independence and the usefulness of our protocols for a wide range of applications. +At the same time, we realize that all technical development, including ours, has a moral component. +As a diverse team we are guided by a shared devotion to the principles of human rights and liberty. +This explains why we place such a high premium on security, censorship-resistance and privacy - +a stance we share with the wider Status Network. +The post below takes a different approach from our usual more technical analyses, +by starting to peel back the curtain on the ethical shortfalls of the global surveillance tech industry.

Spotlight on an industry

Apple's announcement of their lawsuit against Israel's NSO Group +marks the latest in a series of recent setbacks for the surveillance tech company. +In early November, the United States blacklisted the firm, +citing concerns about the use of their spyware by foreign governments targeting civilians such as "journalists, businesspeople, activists" and more. +The company is already embroiled in a lawsuit with Whatsapp +over their exploit of the chat app's video calling service to install malware on target devices. +NSO Group's most infamous product, Pegasus, operates as a hidden exploit installed on victims' mobile phones, +sometimes without even requiring as much as an unguarded click on a malicious link. +It has the potential to lay bare, and report to its owners, everything within the reach of the infected device. +For most people this amounts to a significant portion of their private lives and thoughts. +Pegasus can read your private messages (even encrypted), collect your passwords, record calls, track your location and access your device's microphone and camera. +No activity or application on an infected phone would be hidden.

The latest controversies are perhaps less because of the novelty of the revelations - +the existence of Pegasus has been known to civil activists since at least 2016. +Rather, the public was reminded again of the potential scope of surveillance tech +in the indiscriminate use of Pegasus on private citizens. +This has far-reaching implications for human freedoms worldwide. +Earlier this year, a leaked list of over 50,000 targets, or possible targets, of Pegasus included +the phone numbers of human rights advocates, independent journalists, lawyers and political activists. +This should have come as no surprise. +The type of autocratically inclined agents, and governments, who would venture to buy and use such invasive cyber-arms often target those they find politically inconvenient. +Pegasus, and similar technologies, simply extend the reach and capacity of such individuals and governments - +no border or distance, no political rank or social advantage, no sanctity of profession or regard for dignity, +provide any indemnity from becoming a victim. +Your best hope is to remain uninteresting enough to escape consideration.

The NSO Group has, of course, denied allegations of culpability and questions the authenticity of the list. +At this stage, the latter is almost beside the point: +Amnesty International's cybersecurity team, Security Lab, did find forensic evidence of Pegasus on the phones of several volunteers whose numbers appeared on the original list, +including those of journalists and human rights activists. +(Security Lab has since opened up their infection finding tool to the public.) +French intelligence has similarly inspected and confirmed infection of at least three devices belonging to journalists. +The phones of several people who were close to the Saudi-American journalist, Jamal Khashoggi, were confirmed hacked +both before and after Khashoggi's brutal murder at the Saudi embassy in Istanbul in 2018. +More reports of confirmed Pegasus hacks are still published with some regularity. +It is now an open secret that many authoritarian governments have bought Pegasus. +It's not difficult to extrapolate from existing reports and such clients' track records +what the potential injuries to human freedoms are that they can inflict with access to such a powerful cyberweapon.

A typical response

NSO's response to the allegations follows a textbook approach +of avoiding earnest ethical introspection on the manufacturing, and selling, of cyber-arms. +Firstly, shift ethical responsibility to a predetermined process, a list of checkboxes of your own making. +The Group, for example, claims to sell only to "vetted governments", following a classification process +of which they have now published some procedural details but no tangible criteria. +The next step is to reaffirm continuously, and repetitively, your dedication to the legal combat against crime, +"legitimate law enforcement agencies" (note the almost tautological phrasing), +adherence to international arms trade laws, +compliance clauses in customer contracts, etc. +Thirdly, having been absolved of any moral suspicions that might exist about product and process, +from conception to engineering to trade, +distance yourself from the consequences of its use in the world. +"NSO does not operate its technology, does not collect, nor possesses, nor has any access to any kind of data of its customers." +It is interesting that directly after this statement they claim with contradictory confidence that +their "technology was not associated in any way with the heinous murder of Jamal Khashoggi". +The unapologetic tone seems hardly appropriate when the same document confirms that the Group had to +shut down customers' systems due to "confirmed misuse" and have had to do so "multiple times" in the past. +Given all this, the response manages to evade any serious interrogation of the "vetting" process itself, +which forced the company to reject "approximately 15% of potential new opportunities for Pegasus" in one year. +Courageous.

We have heard this all before. +There exists a multi-billion dollar industry of private companies and engineering firms thriving on proceeds from +selling surveillance tools and cyber-arms to dubious agencies and foreign governments. +In turn, the most power-hungry and oppressive regimes often rely on such technological innovations - +for which they lack the in-country engineering expertise - +to maintain control, suppress uprisings, intimidate opposing journalists, and track their citizens. +It's a lucrative business opportunity, and resourceful companies have sprung up everywhere to supply this demand, +often in countries where citizens, including employees of the company, would be horrified if they were similarly subject to the oppressions of their own products. +When, in 2014, Italy's HackingTeam were pulsed by the United Nations about their (then alleged) selling of spyware to Sudan, +which would have been a contravention of the UN's weapon export ban, +they simply replied that their product was not controlled as a weapon and therefore not subject to such scrutiny. +They remained within their legal bounds, technically. +Furthermore, they similarly shifted ethical responsibility to external standards of legitimacy, +claiming their "software is not sold to governments that are blacklisted by the EU, the US, NATO, and similar international organizations". +When the company themselves were hacked in 2015, +revelations (confirmations, that is) of widespread misuse by repressive governments were damaging enough to force them to disappear and rebrand as Memento Labs. +Their website boasts an impressive list of statutes, regulations, procedures, export controls and legal frameworks, +all of which the rebranded hackers proudly comply with. +Surely no further ethical scrutiny is necessary?

Ethics != the law

The law is trailing behind

Such recourse to the legality of your action as ethical justification is moot for several reasons. +The first is glaringly obvious - +our laws are ill-equipped to address the implications of modern technology. +Legal systems are a cumbersome inheritance built over generations. +This is especially true of the statutes and regulations governing international trade, behind which these companies so often hide. +Our best legal systems are trailing miles behind the technology for which we seek guidelines. +Legislators are still struggling to make sense of technologies like face recognition, +the repercussions of smart devices acting "on their own" and biases in algorithms. +To claim you are performing ethical due diligence by resorting to an outdated and incomplete system of legal codes is disingenuous.

The law depends on ethics

The second reason is more central to my argument, +and an important flaw in these sleight of hand justifications appearing from time to time in the media. +Ethics can in no way be confused as synonymous with legality or legitimacy. +These are incommensurable concepts. +In an ideal world, of course, the law is meant to track the minimum standards of ethical conduct in a society. +Laws are often drafted exactly from some ethical, and practical, impulse to minimize harmful conduct +and provide for corrective and punitive measures where transgressions do occur. +The law, however, has a much narrower scope than ethics. +It can be just or unjust. +In fact, it is in need of ethics to constantly reform. +Ethics and values are born out of collective self-reflection. +It develops in our conversation with ourselves and others about the type of society we strive for. +As such, an ethical worldview summarizes our deepest intuitions about how we should live and measure our impact on the world. +For this reason, ethics is primarily enforced by social and internal pressures, not legal boundaries - +our desire to do what ought to be done, however we define that. +Ethics is therefore a much grander scheme than global legal systems +and the diplomatic frameworks that grants legitimacy to governments. +These are but one limited outflow of the human aspiration to form societies in accordance with our ideologies and ethics.

International law is vague and exploitable

Of course, the cyber-arms trade has a favorite recourse, international law, which is even more limited. +Since such products are seldomly sold to governments and agencies within the country of production, +it enables a further distancing from consequences. +Many private surveillance companies are based in fairly liberal societies with (seemingly) strict emphases on human rights in their domestic laws. +International laws are much more complicated - for opportunists a synonym for "more grey areas in which to hide". +Company conduct can now be governed, and excused, by a system that follows +the whims of autocrats with exploitative intent and vastly different ethical conceptions from the company's purported aims. +International law, and the ways it is most often enforced by way of, say, UN-backed sanctions, +have long been shaped by the compromises of international diplomacy. +To be blunt: these laws are weak and subject to exactly the sort of narrow interests behind which mercenaries have always hidden. +The surveillance tech industry is no exception.

Conclusion

My point is simple: +selling cyber-arms with the potential to become vast tools of oppression to governments and bodies with blatant histories of human rights violations, +and all but the publicly announced intention to continue operating in this way, +is categorically unconscionable. +This seems obvious no matter what ethics system you argue from, +provided it harbors any consideration for human dignity and freedom. +It is a sign of poor moral discourse that such recourses to law and legitimacy are often considered synonymous with ethical justification. +"I have acted within the bounds of law", "We supply only to legitimate law enforcement agencies", etc. are no substitutes. +Ethical conduct requires an honest evaluation of an action against some conception of "the good", +however you define that. +Too often the surveillance tech industry precisely sidesteps this question, +both in internal processes and external rationalisations to a concerned public.

John Locke, he of the life-liberty-and-property, articulated the idea that government exists solely through the consent of the governed. +Towards the end of the 17th century, he wrote in his Second Treatise on Civil Government, +"[w]henever legislators endeavor to take away, +and destroy the property of the people, or to reduce them to slavery under arbitrary power, +they put themselves in a state of war with the people, who are thereupon absolved from any further obedience". +The inference is straightforward and humanist in essence: +legitimacy is not something that is conferred by governments and institutions. +Rather, they derive their legitimacy from us, their citizens, holding them to standards of ethics and societal ideals. +This legitimacy only remains in tact as long as this mandate is honored and continuously extended by a well-informed public. +This is the principle of informed consent on which all reciprocal ethics is based.

The surveillance tech industry may well have nothing more or less noble in mind than profit-making within legal bounds +when developing and selling their products. +However, when such companies are revealed again and again to have supplied tools of gross human rights violations to known human rights violators, +they will do well to remember that ethics always precedes requirements of legality and legitimacy. +It is a fallacy to take normative guidance from the concept of "legitimacy" +if the concept itself depends on such normative guidelines for definition. +Without examining the ethical standards by which institutions, governments, and laws, were created, +no value-judgements about their legitimacy can be made. +Hiding behind legal compliance as substitute for moral justification is not enough. +Targets of increasingly invasive governmental snooping are too often chosen precisely to suppress the mechanisms from which the legitimacy of such governments flow - +the consent of ordinary civilians. +Free and fair elections, free speech, free media, freedom of thought are all at risk.

References

+ + + + \ No newline at end of file diff --git a/rlog/feasibility-discv5/index.html b/rlog/feasibility-discv5/index.html new file mode 100644 index 00000000..f2140ed4 --- /dev/null +++ b/rlog/feasibility-discv5/index.html @@ -0,0 +1,26 @@ + + + + + +Feasibility Study: Discv5 | Vac Research + + + + + + + + + + +
+

Feasibility Study: Discv5

by
6 min read

Looking at discv5 and the theoretical numbers behind finding peers.

Disclaimer: some of the numbers found in this write-up could be inaccurate. They are based on the current understanding of theoretical parts of the protocol itself by the author and are meant to provide a rough overview rather than bindable numbers.

This post serves as a more authoritative overview of the discv5 study, for a discussionary post providing more context make sure to check out the corresponding discuss post. Additionally, if you are unfamiliar with discv5, check out my previous write-up: "From Kademlia to Discv5".

Motivating Problem

The discovery method currently used by Status, is made up of various components and grew over time to solve a mix of problems. We want to simplify this while maintaining some of the properties we currently have.

Namely, we want to ensure censorship resistance to state-level adversaries. One of the issues Status had which caused us them add to their discovery method was the fact that addresses from providers like AWS and GCP were blocked both in Russia and China. Additionally, one of the main factors required is the ability to function on resource restricted devices.

Considering we are talking about resource restricted devices, let's look at the implications and what we need to consider:

  • Battery consumption - constant connections like websockets consume a lot of battery life.
  • CPU usage - certain discovery methods may be CPU incentive, slowing an app down and making it unusable.
  • Bandwidth consumption - a lot of users will be using data plans, the discovery method needs to be efficient in order to accommodate those users without using up significant portions of their data plans.
  • Short connection windows - the discovery algorithm needs to be low latency, that means it needs to return results fast. This is because many users will only have the app open for a short amount of time.
  • Not publicly connectable - There is a good chance that most resource restricted devices are not publicly connectable.

For a node to be able to participate as both a provider, and a consumer in the discovery method. Meaning a node both reads from other nodes' stored DHTs and hosts the DHT for other nodes to read from, it needs to be publically connectable. This means another node must be able to connect to some public IP of the given node.

With devices that are behind a NAT, this is easier said than done. Especially mobile devices, that when connected to 4G LTE networks are often stuck behind a symmetric NAT, drastically reducing the the succeess rate of NAT traversal. Keeping this in mind, it becomes obvious that most resource restricted devices will be consumers rather than providers due to this technical limitation.

In order to answer our questions, we formulated the problem with a simple method for testing. The "needle in a haystack" problem was formulated to figure out how easily a specific node can be found within a given network. This issue was fully formulated in vacp2p/research#15.

Overview

The main things we wanted to investigate was the overhead on finding a peer. This means we wanted to look at both the bandwidth, latency and effectiveness of this. There are 2 methods which we can use to find a peer:

  • We can find a peer with a specific ID, using normal lookup methods as documented by Kademlia.
  • We can find a peer that advertises a capability, this is possible using either capabilities advertised in the ENR or through topic tables.

Feasbility

To be able to investigate the feasibility of discv5, we used various methods including rough calculations which can be found in the notebook, and a simulation isolated in vacp2p/research#19.

CPU & Memory Usage

The experimental discv5 has already been used within Status, however what was noticed was that the CPU and memory usage was rather high. It therefore should be investiaged if this is still the case, and if it is, it should be isolated where this stems from. Additionally it is worth looking at whether or not this is the case with both the go and nim implementation.

See details: vacp2p/research#31

NAT on Cellular Data

If a peer is not publically connectable it can not participate in the DHT both ways. A lot of mobile phones are behind symmetric NATs which UDP hole-punching close to impossible. It should be investigated whether or not mobile phones will be able to participate both ways and if there are good methods for doing hole-punching.

See details: vacp2p/research#29

Topic Tables

Topic Tables allow us the ability to efficiently find nodes given a specific topic. However, they are not implemented in the status-im/nim-eth implementation nor are they fully finalized in the spec. These are important if the network grows past a size where the concentration of specific nodes is relatively low making them hard to find.

See details: vacp2p/research#26

Finding a node

It is important to note, that given a network is relatively small sized, eg 100-500 nodes, then finding a node given a specific address is relatively managable. Additionally, if the concentration of a specific capability in a network is reasonable, then finding a node advertising its capabilities using an ENR rather than the topic table is also managable. A reasonable concentration for example would be 10%, which would give us an 80% chance of getting a node with that capability in the first lookup request. This can be explored more using our discv5 notebook.

Results

Research has shown that finding a node in the DHT has a relatively low effect on bandwidth, both inbound and outbound. For example when trying to find a node in a network of 100 nodes, it would take roughly 5668 bytes total. Additionally if we assume 100ms latency per request it would range at ≈ 300ms latency, translating to 3 requests to find a specific node.

General Thoughts

One of the main blockers right now is figuring out what the CPU and memory usage of discv5 is on mobile phones, this is a large blocker as it affects one of the core problems for us. We need to consider whether discv5 is an upgrade as it allows us to simplify our current discovery process or if it is too much of an overhead for resource restricted devices. The topic table feature could largely enhance discovery however it is not yet implemented. Given that CPU and memory isn't too high, discv5 could probably be used as the other issues are more "features" than large scale issues. Implementing it would already reduce the ability for state level adversaries to censor our nodes.

Acknowledgements

  • Oskar Thoren
  • Dmitry Shmatko
  • Kim De Mey
  • Corey Petty
+ + + + \ No newline at end of file diff --git a/rlog/feasibility-semaphore-rate-limiting-zksnarks/index.html b/rlog/feasibility-semaphore-rate-limiting-zksnarks/index.html new file mode 100644 index 00000000..f2ed019b --- /dev/null +++ b/rlog/feasibility-semaphore-rate-limiting-zksnarks/index.html @@ -0,0 +1,26 @@ + + + + + +Feasibility Study: Semaphore rate limiting through zkSNARKs | Vac Research + + + + + + + + + + +
+

Feasibility Study: Semaphore rate limiting through zkSNARKs

by
8 min read

A research log. Zero knowledge signaling as a rate limiting mechanism to prevent spam in p2p networks.

tldr: Moon math promising for solving spam in Whisper, but to get there we need to invest more in performance work and technical upskilling.

Motivating problem

In open p2p networks for messaging, one big problem is spam-resistance. Existing solutions, such as Whisper's proof of work, are insufficient, especially for heterogeneous nodes. Other reputation-based approaches might not be desirable, due to issues around arbitrary exclusion and privacy.

One possible solution is to use a right-to-access staking-based method, where a node is only able to send a message, signal, at a certain rate, and otherwise they can be slashed. One problem with this is in terms of privacy-preservation, where we specifically don't want a user to be tied to a specific payment or unique fingerprint.

In addition to above, there are a lot of related problems that share similarities in terms of their structure and proposed solution.

  • Private transactions (Zcash, AZTEC)
  • Private voting (Semaphore)
  • Private group membership (Semaphore)
  • Layer 2 scaling, poss layer 1 (ZK Rollup; StarkWare/Eth2-3)

Overview

Basic terminology

A zero-knowledge proof allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do trust-minimized computation that is also privacy preserving. As a basic example, instead of showing your ID when going to a bar you simply give them a proof that you are over 18, without showing the doorman your id.

zkSNARKs is a form of zero-knowledge proofs. There are many types of zero-knowledge proofs, and the field is evolving rapidly. They come with various trade-offs in terms of things such as: trusted setup, cryptographic assumptions, proof/verification key size, proof/verification time, proof size, etc. See section below for more.

Semaphore is a framework/library/construct on top of zkSNARks. It allows for zero-knowledge signaling, specifically on top of Ethereum. This means an approved user can broadcast some arbitrary string without revealing their identity, given some specific constraints. An approved user is someone who has been added to a certain merkle tree. See current Github home for more.

Circom is a DSL for writing arithmetic circuits that can be used in zkSNARKs, similar to how you might write a NAND gate. See Github for more.

Basic flow

We start with a private voting example, and then extend it to the slashable rate limiting example.

  1. A user registers an identity (arbitrary keypair), along with a small fee, to a smart contract. This adds them to a merkle tree and allows them to prove that they are member of that group, without revealing who they are.

  2. When a user wants to send a message, they compute a zero-knowledge proof. This ensures certain invariants, have some public outputs, and can be verified by anyone (including a smart contract).

  3. Any node can verify the proof, including smart contracts on chain (as of Byzantinum HF). Additionally, a node can have rules for the public output. In the case of voting, one such rule is that a specific output hash has to be equal to some predefined value, such as "2020-01-01 vote on Foo Bar for president".

  4. Because of how the proof is constructed, and the rules around output values, this ensures that: a user is part of the approved set of voters and that a user can only vote once.

  5. As a consequence of above, we have a system where registered users can only vote once, no one can see who voted for what, and this can all be proven and verified.

Rate limiting example

In the case of rate limiting, we do want nodes to send multiple messages. This changes step 3-5 above somewhat.

NOTE: It is a bit more involved than this, and if we precompute proofs the flow might look a bit different. But the general idea is the same.

  1. Instead of having a rule that you can only vote once, we have a rule that you can only send a message per epoch. Epoch here can be every second, as defined by UTC date time +-20s.

  2. Additionally, if a users sends more than one message per epoch, one of the public outputs is a random share of a private key. Using Shamir's Secret Sharing (similar to a multisig) and 2/3 key share as an example threshold: in the normal case only 1/3 private keys is revealed, which is insufficient to have access. In the case where two messages are sent in an epoch, probabilistically 2/3 shares is sufficient to have access to the key (unless you get the same random share of the key).

  3. This means any untrusted user who detects a spamming user, can use it to access their private key corresponding to funds in the contract, and thus slash them.

  4. As a consequence of above, we have a system where registered users can only messages X times per epoch, and no one can see who is sending what messages. Additionally, if a user is violating the above rate limit, they can be punished and any user can profit from it.

Briefly on scope of 'approved users'

In the case of an application like Status, this construct can either be a global StatusNetwork group, or one per chat, or network, etc. It can be applied both at the network and user level. There are no specific limitations on where or who deploys this, and it is thus more of a UX consideration.

Technical details

For a fairly self-contained set of examples above, see exploration in Vac research repo. Note that the Shamir secret sharing is not inside the SNARK, but out-of-band for now.

The current version of Semaphore is using NodeJS and Circom from Iden3 for Snarks.

For more on rate limiting idea, see ethresearch post.

Feasibility

The above repo was used to exercise the basic paths and to gain intution of feasibility. Based on it and related reading we outline a few blockers and things that require further study.

Technical feasibility

Proof time

Prove time for Semaphore (https://github.com/kobigurk/semaphore) zKSNARKs using circom, groth and snarkjs is currently way too long. It takes on the order of ~10m to generate a proof. With Websnark, it is likely to take 30s, which might still be too long. We should experiment with native code on mobile here.

See details.

Proving key size

Prover key size is ~110mb for Semaphore. Assuming this is embedded on mobile device, it bloats the APK a lot. Current APK size is ~30mb and even that might be high for people with limited bandwidth.

See details.

Trusted setup

Using zkSNARKs a trusted setup is required to generate prover and verifier keys. As part of this setup, a toxic parameter lambda is generated. If a party gets access to this lambda, they can prove anything. This means people using zKSNARKs usually have an elaborate MPC ceremony to ensure this parameter doesn't get discovered.

See details.

Shamir logic in SNARK

For Semaphore RLN we need to embed the Shamir logic inside the SNARK in order to do slashing for spam. Currently the implementation is trusted and very hacky.

See details.

End to end integation

Currently is standalone and doesn't touch multiple users, deployed contract with merkle tree and verification, actual transactions, a mocked network, add/remove members, etc. There are bound to be edge cases and unknown unknowns here.

See details.

Licensing issues

Currently Circom uses a GPL license, which can get tricky when it comes to the App Store etc.

See details.

Alternative ZKPs?

Some of the isolated blockers for zKSNARKs (#7, #8, #9) might be mitigated by the use of other ZKP technology. However, they likely have their own issues.

See details.

Social feasibility

Technical skill

zkSNARKs and related technologies are quite new. To learn how they work and get an intuition for them requires individuals to dedicate a lot of time to studying them. This means we must make getting competence in these technologies if we wish to use them to our advantage.

Time and resources

In order for this and related projects (such as private transaction) to get anywhere, it must be made an explicit area of focus for an extend period of time.

General thoughts

Similar to Whisper, and in line with moving towards protocol and infrastructure, we need to upskill and invest resources into this. This doesn't mean developing all of the technologies ourselves, but gaining enough competence to leverage and extend existing solutions by the growing ZKP community.

For example, this might also include leveraging largely ready made solutions such as AZTEC for private transaction; more fundamental research into ZK rollup and similar; using Semaphore for private group membership and private voting; Nim based wrapper aronud Bellman, etc.

Acknowledgement

Thanks to Barry Whitehat for patient explanation and pointers. Thanks to WJ for helping with runtime issues.

Peacock header image from [Tonos](<https://en.wikipedia.org/wiki/File:Flickr-lo.tangelini-Tonos(1).jpg>)._

+ + + + \ No newline at end of file diff --git a/rlog/fixing-whisper-with-waku/index.html b/rlog/fixing-whisper-with-waku/index.html new file mode 100644 index 00000000..f0f6063b --- /dev/null +++ b/rlog/fixing-whisper-with-waku/index.html @@ -0,0 +1,47 @@ + + + + + +Fixing Whisper with Waku | Vac Research + + + + + + + + + + +
+

Fixing Whisper with Waku

by
10 min read

A research log. Why Whisper doesn't scale and how to fix it.

This post will introduce Waku. Waku is a fork of Whisper that attempts to +addresses some of Whisper's shortcomings in an iterative fashion. We will also +introduce a theoretical scaling model for Whisper that shows why it doesn't +scale, and what can be done about it.

Introduction

Whisper is a gossip-based communication protocol or an ephemeral key-value store +depending on which way you look at it. Historically speaking, it is the +messaging pilllar of Web3, together with +Ethereum for consensus and Swarm for storage.

Whisper, being a somewhat esoteric protocol and with some fundamental issues, +hasn't seen a lot of usage. However, applications such as Status are using it, +and have been making minor ad hoc modifications to it to make it run on mobile +devices.

What are these fundamental issues? In short:

  1. scalability, most immediately when it comes to bandwidth usage
  2. spam-resistance, proof of work is a poor mechanism for heterogeneous nodes
  3. no incentivized infrastructure, leading to centralized choke points
  4. lack of formal and unambiguous specification makes it hard to analyze and implement
  5. running over devp2p, which limits where it can run and how

In this post, we'll focus on the first problem, which is scalability through bandwidth usage.

Whisper theoretical scalability model

(Feel free to skip this section if you want to get right to the results).

There's widespread implicit knowledge that Whisper "doesn't scale", but it is less understood exactly why. This theoretical model attempts to encode some characteristics of it. Specifically for use case such as one by Status (see Status Whisper usage +spec).

Caveats

First, some caveats: this model likely contains bugs, has wrong assumptions, or completely misses certain dimensions. However, it acts as a form of existence proof for unscalability, with clear reasons.

If certain assumptions are wrong, then we can challenge them and reason about them in isolation. It doesn’t mean things will definitely work as the model predicts, and that there aren’t unknown unknowns.

The model also only deals with receiving bandwidth for end nodes, uses mostly static assumptions of averages, and doesn’t deal with spam resistance, privacy guarantees, accounting, intermediate node or network wide failures.

Goals

  1. Ensure network scales by being user or usage bound, as opposed to bandwidth growing in proportion to network size.
  2. Staying with in a reasonable bandwidth limit for limited data plans.
  3. Do the above without materially impacting existing nodes.

It proceeds through various case with clear assumptions behind them, starting from the most naive assumptions. It shows results for 100 users, 10k users and 1m users.

Model

Case 1. Only receiving messages meant for you [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A4. Only receiving messages meant for you.

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1000.0KB/day
For 1m users, receiving bandwidth is 1000.0KB/day

------------------------------------------------------------

Case 2. Receiving messages for everyone [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A5. Received messages for everyone.

For 100 users, receiving bandwidth is 97.7MB/day
For 10k users, receiving bandwidth is 9.5GB/day
For 1m users, receiving bandwidth is 953.7GB/day

------------------------------------------------------------

Case 3. All private messages go over one discovery topic [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A8. All private messages are received by everyone (same topic) (static).

For 100 users, receiving bandwidth is 49.3MB/day
For 10k users, receiving bandwidth is 4.8GB/day
For 1m users, receiving bandwidth is 476.8GB/day

------------------------------------------------------------

Case 4. All private messages are partitioned into shards [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

Case 5. 4 + Bloom filter with false positive rate

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1

For 100 users, receiving bandwidth is 10.7MB/day
For 10k users, receiving bandwidth is 978.0MB/day
For 1m users, receiving bandwidth is 95.5GB/day

NOTE: Traffic extremely sensitive to bloom false positives
This completely dominates network traffic at scale.
With p=1% we get 10k users ~100MB/day and 1m users ~10gb/day)

------------------------------------------------------------

Case 6. Case 5 + Benign duplicate receives

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).

For 100 users, receiving bandwidth is 21.5MB/day
For 10k users, receiving bandwidth is 1.9GB/day
For 1m users, receiving bandwidth is 190.9GB/day

------------------------------------------------------------

Case 7. 6 + Mailserver under good conditions; small bloom fp; mostly offline

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).
- A17. User is offline p% of the time (static) p=0.9
- A18. No bad request, dup messages for mailservers; overlap perfect (static).
- A19. Mailserver requests can change false positive rate to be p=0.01

For 100 users, receiving bandwidth is 3.9MB/day
For 10k users, receiving bandwidth is 284.8MB/day
For 1m users, receiving bandwidth is 27.8GB/day

------------------------------------------------------------

Case 8. No metadata protection w bloom filter; 1 node connected; static shard

Aka waku mode.

Next step up is to either only use contact code, or shard more aggressively.
Note that this requires change of other nodes behavior, not just local node.

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

See source +for more detail on the model and its assumptions.

Takeaways

  1. Whisper as it currently works doesn’t scale, and we quickly run into unacceptable bandwidth usage.
  2. There are a few factors of this, but largely it boils down to noisy topics usage and use of bloom filters. Duplicate (e.g. see Whisper vs PSS) and bad envelopes are also factors, but this depends a bit more on specific deployment configurations.
  3. Waku mode (case 8) is an additional capability that doesn’t require other nodes to change, for nodes that put a premium on performance.
  4. The next bottleneck after this is the partitioned topics (app/network specific), which either needs to gracefully (and potentially quickly) grow, or an alternative way of consuming those messages needs to be deviced.

The results are summarized in the graph above. Notice the log-log scale. The +colored backgrounds correspond to the following bandwidth usage:

  • Blue: <10mb/d (<~300mb/month)
  • Green: <30mb/d (<~1gb/month)
  • Yellow: <100mb/d (<~3gb/month)
  • Red: >100mb/d (>3gb/month)

These ranges are somewhat arbitrary, but are based on user +requirements for users +on a limited data plan, with comparable usage for other messaging apps.

Introducing Waku

Motivation for a new protocol

Apps such as Status will likely use something like Whisper for the forseeable +future, and we want to enable them to use it with more users on mobile devices +without bandwidth exploding with minimal changes.

Additionally, there's not a clear cut alternative that maps cleanly to the +desired use cases (p2p, multicast, privacy-preserving, open, etc).

We are actively researching, developing and collaborating with more greenfield +approaches. It is likely that Waku will either converge to those, or Waku will +lay the groundwork (clear specs, common issues/components) necessary to make +switching to another protocol easier. In this project we want to emphasize +iterative work with results on the order of weeks.

Briefly on Waku mode

  • Doesn’t impact existing clients, it’s just a separate node and capability.
  • Other nodes can still use Whisper as is, like a full node.
  • Sacrifices metadata protection and incurs higher connectivity/availability requirements for scalbility

Requirements:

  • Exposes API to get messages from a set of list of topics (no bloom filter)
  • Way of being identified as a Waku node (e.g. through version string)
  • Option to statically encode this node in app, e.g. similar to custom bootnodes/mailserver
  • Only node that needs to be connected to, possibly as Whisper relay / mailserver hybrid

Provides:

  • likely provides scalability of up to 10k users and beyond
  • with some enhancements to partition topic logic, can possibly scale up to 1m users (app/network specific)

Caveats:

  • hasn’t been tested in a large-scale simulation
  • other network and intermediate node bottlenecks might become apparent (e.g. full bloom filter and private cluster capacity; can likely be dealt with in isolation using known techniques, e.g. load balancing) (deployment specific)

Progress so far

In short, we have a Waku version 0 spec up as well as a PoC for backwards compatibility. In the coming weeks, we are going to solidify the specs, get a more fully featured PoC for Waku mode. See rough roadmap, project board [link deprecated] and progress thread on the Vac forum.

The spec has been rewrittten for clarity, with ABNF grammar and less ambiguous language. The spec also incorporates several previously ad hoc implemented features, such as light nodes and mailserver/client support. This has already caught a few incompatibilities between the geth (Go), status/whisper (Go) and nim-eth (Nim) versions, specifically around light node usage and the handshake.

If you are interested in this effort, please check out our forum for questions, comments and proposals. We already have some discussion for better spam protection (see previous post for a more complex but privacy-preserving proposal), something that is likely going to be addressed in future versions of Waku, along with many other fixes and enhancement.

+ + + + \ No newline at end of file diff --git a/rlog/future-of-waku-network/index.html b/rlog/future-of-waku-network/index.html new file mode 100644 index 00000000..f7f5e2b8 --- /dev/null +++ b/rlog/future-of-waku-network/index.html @@ -0,0 +1,63 @@ + + + + + +The Future of Waku Network: Scaling, Incentivization, and Heterogeneity | Vac Research + + + + + + + + + + +
+

The Future of Waku Network: Scaling, Incentivization, and Heterogeneity

by
6 min read

Learn how the Waku Network is evolving through scaling, incentivization, and diverse ecosystem development and what the future might look like.

Waku is preparing for production with a focus on the Status Communities use case. In this blog post, we will provide an +overview of recent discussions and research outputs, aiming to give you a better understanding of how the Waku network +may look like in terms of scaling and incentivization.

DOS Mitigation for Status Communities

Waku is actively exploring DOS mitigation mechanisms suitable for Status Communities. While RLN +(Rate Limiting Nullifiers) remains the go-to DOS protection solution due to its privacy-preserving and +censorship-resistant properties, there is still more work to be done. We are excited to collaborate with PSE +(Privacy & Scaling Explorations) in this endeavor. Learn more about their latest progress in this tweet.

A Heterogeneous Waku Network

As we noted in a previous forum post, Waku's protocol +incentivization model needs to be flexible to accommodate various business models. Flexibility ensures that projects +can choose how they want to use Waku based on their specific needs.

Reversing the Incentivization Question

Traditionally, the question of incentivization revolves around how to incentivize operators to run nodes. We'd like to +reframe the question and instead ask, "How do we pay for the infrastructure?"

Waku does not intend to offer a free lunch. +Ethereum's infrastructure is supported by transaction fees and inflation, with validators receiving rewards from both sources. +However, this model does not suit a communication network like Waku. +Users and platforms would not want to pay for every single message they send. Additionally, Waku aims to support instant +ephemeral messages that do not require consensus or long-term storage.

Projects that use Waku to enable user interactions, whether for chat messages, gaming, private DeFi, notifications, or +inter-wallet communication, may have different value extraction models. Some users might provide services for the +project and expect to receive value by running nodes, while others may pay for the product or run infrastructure to +contribute back. Waku aims to support each of these use cases, which means there will be various ways to "pay for the +infrastructure."

In his talk, Oskar addressed two strategies: RLN and service credentials.

RLN and Service Credentials

RLN enables DOS protection across the network in a privacy-preserving and permission-less manner: stake in a contract, +and you can send messages.

Service credentials establish a customer-provider relationship. Users might pay to have messages they are interested in +stored and served by a provider. Alternatively, a community owner could pay a service provider to host their community.

Providers could offer trial or limited free services to Waku users, similar to Slack or Discord. Once a trial is expired or outgrown, +a community owner could pay for more storage or bandwidth, similar to Slack's model. +Alternatively, individual users could contribute financially, akin to Discord's Server Boost, or by sharing their own +resources with their community.

We anticipate witnessing various scenarios across the spectrum: from users sharing resources to users paying for access to the network and everything in between.

Waku Network: Ethereum or Cosmos?

Another perspective is to consider whether the Waku network will resemble Ethereum or Cosmos.

For those not familiar with the difference between both, in a very concise manner:

  • Ethereum is a set of protocols and software that are designed to operate on one common network and infrastructure
  • Cosmos is a set of protocols and software (SDKs) designed to be deployed in separate yet interoperable networks and infrastructures by third parties

We want Waku to be decentralized to provide censorship resistance and privacy-preserving communication. +If each application has to deploy its own network, we will not achieve this goal. +Therefore, we aim Waku to be not only an open source set of protocols, but also a shared infrastructure that anyone can leverage to build applications on top, with some guarantees in terms of decentralization and anonymity. +This approach is closer in spirit to Ethereum than Cosmos. +Do note that, similarly to Ethereum, anyone is free to take Waku software and protocols and deploy their own network.

Yet, because of the difference in the fee model, the Waku Network is unlikely to be as unified as Ethereum's. +We currently assume that there will be separate gossipsub networks with different funding models. +Since there is no consensus on Waku, each individual operator can decide which network to support, enabling Waku to maintain its permission-less property.

Most likely, the Waku network will be heterogeneous, and node operators will choose the incentivization model they prefer.

Scalability and Discovery Protocols

To enable scalability, the flow of messages in the Waku network will be divided in shards, +so that not every node has to forward every message of the whole network. +Discovery protocols will facilitate users connecting to the right nodes to receive the messages they are interested in.

Different shards could be subject to a variety of rate limiting techniques (globally, targeted to that shard or something in-between).

Marketplace protocols may also be developed to help operators understand how they can best support the network and where +their resources are most needed. However, we are still far from establishing or even assert that such a marketplace will be needed.

Open Problems

Splitting traffic between shards reduces bandwidth consumption for every Waku Relay node. +This improvement increases the likelihood that users with home connections can participate and contribute to the gossipsub network without encountering issues.

However, it does not cap traffic. +There are still open problems regarding how to guarantee that someone can use Waku with lower Internet bandwidth or run critical services, such as a validation node, on the same connection.

We have several ongoing initiatives:

  • Analyzing the Status Community protocol to confirm efficient usage of Waku [4]
  • Simulating the Waku Network to measure actual bandwidth usage [5]
  • Segregating chat messages from control and media messages [6]

The final solution will likely be a combination of protocols that reduce bandwidth usage or mitigate the risk of DOS attacks, providing flexibility for users and platforms to enable the best experience.

The Evolving Waku Network

The definition of the "Waku Network" will likely change over time. In the near future, it will transition from a single +gossipsub network to a sharded set of networks unified by a common discovery layer. This change will promote scalability +and allow various payment models to coexist within the Waku ecosystem.

In conclusion, the future of Waku Network entails growth, incentivization, and heterogeneity while steadfastly +maintaining its core principles. As Waku continues to evolve, we expect it to accommodate a diverse range of use cases +and business models, all while preserving privacy, resisting censorship, avoiding surveillance, and remaining accessible +to devices with limited resources.

References

  1. 51/WAKU2-RELAY-SHARDING
  2. 57/STATUS-Simple-Scaling
  3. 58/RLN-V2
  4. Scaling Status Communities: Potential Problems
  5. Waku Network Testing
  6. 51/WAKU2-RELAY-SHARDING: Control Message Shards
+ + + + \ No newline at end of file diff --git a/rlog/index.html b/rlog/index.html new file mode 100644 index 00000000..6eff0839 --- /dev/null +++ b/rlog/index.html @@ -0,0 +1,30 @@ + + + + + +Research Blog | Vac Research + + + + + + + + + + +
+

Research Blog

Blog

by
7 min read

Waku is an open communication protocol and network. Decentralized apps and infrastructure can use Waku for their +communication needs. It is designed to enable dApps and decentralized infrastructure projects to have secure, private, +scalable communication. Waku is available in several languages and platforms, from Web to mobile to desktop to cloud. +Initially, We pushed Waku adoption to the Web ecosystem, we learned that Waku is usable in a variety of complex applications +and infrastructure projects. We have prioritized our effort to make Waku usable on various platforms and environments.

by
19 min read

What is privacy-protecting infrastructure? Why do we need it and how we can build it? We'll look at Waku, the communication layer for Web3. We'll see how it uses ZKPs to incentivize and protect the Waku network. We'll also look at Zerokit, a library that makes it easier to use ZKPs in different environments. After reading this, I hope you'll better understand the importance of privacy-protecting infrastructure and how we can build it.

+ + + + \ No newline at end of file diff --git a/rlog/introducing-nwaku/index.html b/rlog/introducing-nwaku/index.html new file mode 100644 index 00000000..623de047 --- /dev/null +++ b/rlog/introducing-nwaku/index.html @@ -0,0 +1,172 @@ + + + + + +Introducing nwaku | Vac Research + + + + + + + + + + +
+

Introducing nwaku

by
11 min read

Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas.

Background

If you've been following our research log, +you'll know that many things have happened in the world of Waku v2 since our last general update. +In line with our long term goals, +we've introduced new protocols, +tweaked our existing protocols +and expanded our team. +We've also shown in a series of practical experiments that Waku v2 does indeed deliver on some of the theoretical advantages it was designed to have over its predecessor, Waku v1. +A sustainability and business workshop led to the formulation of a clearer vision for Vac as a team.

From the beginning, our protocol development has been complemented by various client implementations of these protocols, +first in Nim, +but later also in JavaScript +and Go. +A follow-up post will clarify the purposes, similarities and differences between these three clients. +The Nim client, is our reference implementation, +developed by the research team in parallel with the specs +and building on a home-grown implementation of libp2p. +The Nim client is suitable to run as a standalone adaptive node, +managed by individual operators +or as an encapsulated service node in other applications. +This post looks at some recent developments within the Nim client.

1. nim-waku is now known as nwaku

Pronounced NWHA-koo. +You may already have seen us refer to "nwaku" on Vac communication channels, +but it is now official: +The nim-waku Waku v2 client has been named nwaku. +Why? Well, we needed a recognizable name for our client that could easily be referred to in everyday conversations +and nim-waku just didn't roll off the tongue. +We've followed the example of the closely related nimbus project to find a punchier name +that explicitly links the client to both the Waku set of protocols and the Nim language.

2. Improvements in stability and performance

The initial implementation of Waku v2 demonstrated how the suite of protocols can be applied +to form a generalized, peer-to-peer messaging network, +while addressing a wide range of adaptive requirements. +This allowed us to lift several protocol specifications from raw to draft status, +indicating that a reference implementation exists for each. +However, as internal dogfooding increased and more external applications started using nwaku, +we stepped up our focus on the client's stability and performance. +This is especially true where we want nwaku to run unsupervised in a production environment +without any degradation in the services it provides.

Some of the more significant productionization efforts over the last couple of months included:

  1. Reworking the store implementation to maintain stable memory usage +while storing historical messages +and serving multiple clients querying history simultaneously. +Previously, a store node would see gradual service degradation +due to inefficient memory usage when responding to history queries. +Queries that often took longer than 8 mins now complete in under 100 ms.

  2. Improved peer management. +For example, filter nodes will now remove unreachable clients after a number of connection failures, +whereas they would previously keep accumulating dead peers.

  3. Improved disk usage. +nwaku nodes that persist historical messages on disk now manage their own storage size based on the --store-capacity. +This can significantly improve node start-up times.

More stability issues may be addressed in future as nwaku matures, +but we've noticed a marked improvement in the reliability of running nwaku nodes. +These include environments where nwaku nodes are expected to run with a long uptime. +Vac currently operates two long-running fleets of nwaku nodes, wakuv2.prod and wakuv2.test, +for internal dogfooding and +to serve as experimental bootstrapping nodes. +Status has also recently deployed similar fleets for production and testing based on nwaku. +Our goal is to have nwaku be stable, performant and flexible enough +to be an attractive option for operators to run and maintain their own Waku v2 nodes. +See also the future work section below for more on our general goal of nwaku for operators.

3. Improvements in interoperability

We've implemented several features that improve nwaku's usability in different environments +and its interoperability with other Waku v2 clients. +One major step forward here was adding support for both secure and unsecured WebSocket connections as libp2p transports. +This allows direct connectivity with js-waku +and paves the way for native browser usage. +We've also added support for parsing and resolving DNS-type multiaddrs, +i.e. multiaddress protocol schemes dns, dns4, dns6 and dnsaddr. +A nwaku node can now also be configured with its own IPv4 DNS domain name +allowing dynamic IP address allocation without impacting a node's reachability by its peers.

4. Peer discovery

Peer discovery is the method by which nodes become aware of each other’s existence. +The question of peer discovery in a Waku v2 network has been a focus area since the protocol was first conceptualized. +Since then several different approaches to discovery have been proposed and investigated. +We've implemented three discovery mechanisms in nwaku so far:

DNS-based discovery

nwaku nodes can retrieve an authenticated, updateable list of peers via DNS to bootstrap connection to a Waku v2 network. +Our implementation is based on EIP-1459.

GossipSub peer exchange

GossipSub Peer Exchange (PX) is a GossipSub v1.1 mechanism +whereby a pruning peer may provide a pruned peer with a set of alternative peers +where it can connect to reform its mesh. +This is a very suitable mechanism to gradually discover more peers +from an initial connection to a small set of bootstrap peers. +It is enabled in a nwaku node by default.

Waku Node Discovery Protocol v5

This is a DHT-based discovery mechanism adapted to store and relay node records. +Our implementation is based on Ethereum's Discovery v5 protocol +with some minor modifications to isolate our discovery network from that of Ethereum. +The decision to separate the Waku Discovery v5 network from Ethereum's was made on considerations of lookup efficiency. +This comes at a possible tradeoff in network resilience. +We are considering merging with the Ethereum Discovery v5 network in future, +or even implement a hybrid solution. +This post explains the decision and future steps.

5. Spam protection using RLN

An early addition to our suite of protocols was an extension of 11/WAKU-RELAY +that provided spam protection using Rate Limiting Nullifiers (RLN). +The nwaku client now contains a working demonstration and integration of RLN relay. +Check out this tutorial to see the protocol in action using a toy chat application built on nwaku. +We'd love for people to join us in dogfooding RLN spam protection as part of our operator incentive testnet. +Feel free to join our Vac Discord server +and head to the #rln channel for more information.

Future work

As we continue working towards our goal of a fully decentralized, generalized and censorship-resistant messaging protocol, +these are some of the current and future focus areas for nwaku:

Reaching out to operators:

We are starting to push for operators to run and maintain their own Waku v2 nodes, +preferably contributing to the default Waku v2 network as described by the default pubsub topic (/waku/2/default-waku/proto). +Amongst other things, a large fleet of stable operator-run Waku v2 nodes will help secure the network, +provide valuable services to a variety of applications +and ensure the future sustainability of both Vac as a research organization and the Waku suite of protocols.

We are targeting nwaku as the main option for operator-run nodes.
+Specifically, we aim to provide through nwaku:

  1. a lightweight and robust Waku v2 client. +This client must be first in line to support innovative and new Waku v2 protocols, +but configurable enough to serve the adaptive needs of various operators.
  2. an easy-to-follow guide for operators to configure, +set up and maintain their own nodes
  3. a set of operator-focused tools to monitor and maintain a running node

Better conversational security layer guarantees

Conversational security guarantees in Waku v2 are currently designed around the Status application. +Developers building their own applications on top of Waku would therefore +either have to reimplement a set of tools similar to Status +or build their own security solutions on the application layer above Waku. +We are working on a set of features built into Waku +that will provide the general security properties Waku users may desire +and do so in a modern and simple way. +This is useful for applications outside of Status that want similar security guarantees. +As a first step, we've already made good progress toward integrating noise handshakes as a key exchange mechanism in Waku v2.

Protocol incentivization

We want to design incentivization around our protocols to encourage desired behaviors in the Waku network, +rewarding nodes providing costly services +and punishing adversarial actions. +This will increase the overall security of the network +and encourage operators to run their own Waku nodes. +In turn, the sustainability of Vac as an organization will be better guaranteed. +As such, protocol incentivization was a major focus in our recent Vac Sustainability and Business Workshop. +Our first step here is to finish integrating RLN relay into Waku +with blockchain interaction to manage members, +punish spammers +and reward spam detectors. +After this, we want to design monetary incentivization for providers of store, lightpush and filter services. +This may also tie into a reputation mechanism for service nodes based on a network-wide consensus on service quality. +A big challenge for protocol incentivization is doing it in a private fashion, +so we can keep similar metadata protection guarantees as the Waku base layer. +This ties into our focus on Zero Knowledge tech.

Improved store capacity

The nwaku store currently serves as an efficient in-memory store for historical messages, +dimensioned by the maximum number of messages the store node is willing to keep. +This makes the nwaku store appropriate for keeping history over a short term +without any time-based guarantees, +but with the advantage of providing fast responses to history queries. +Some applications, such as Status, require longer-term historical message storage +with time-based dimensioning +to guarantee that messages will be stored for a specified minimum period. +Because of the relatively high cost of memory compared to disk space, +a higher capacity store, with time guarantees, should operate as a disk-only database of historical messages. +This is an ongoing effort.

Multipurpose discovery

In addition to the three discovery methods already implemented in nwaku, +we are working on improving discovery on at least three fronts:

Capability discovery:

Waku v2 nodes may be interested in peers with specific capabilities, for example:

  1. peers within a specific pubsub topic mesh,
  2. peers with store capability,
  3. store peers with x days of history for a specific content topic, etc.

Capability discovery entails mechanisms by which such capabilities can be advertised and discovered/negotiated. +One major hurdle to overcome is the increased complexity of finding a node with specific capabilities within the larger network (a needle in a haystack). +See the original problem statement for more.

Improvements in Discovery v5

Of the implemented discovery methods, +Discovery v5 best addresses our need for a decentralized and scalable discovery mechanism. +With the basic implementation done, +there are some improvements planned for Discovery v5, +including methods to increase security such as merging with the Ethereum Discovery v5 network, +introducing explicit NAT traversal +and utilizing topic advertisement. +The Waku v2 Discovery v5 Roadmap contains more details.

Generalized peer exchange

nwaku already implements GossipSub peer exchange. +We now need a general request-response mechanism outside of GossipSub +by which a node may learn about other Waku v2 nodes +by requesting and receiving a list of peers from a neighbor. +This could, for example, be a suitable way for resource-restricted devices to request a stronger peer +to perform a random Discovery v5 lookup on their behalf +or simply to be informed of a subset of the peers known to that neighbor. +See this issue for more.


This concludes a general outline of some of the main recent developments in the nwaku client +and a summary of the current and future focus areas. +Much more is happening behind the scenes, of course, +so for more information, or to join the conversation, +feel free to join our Vac Discord server +or to check out the nwaku repo on Github. +You can also view the changelog for past releases here.

References

+ + + + \ No newline at end of file diff --git a/rlog/kademlia-to-discv5/index.html b/rlog/kademlia-to-discv5/index.html new file mode 100644 index 00000000..18e2134c --- /dev/null +++ b/rlog/kademlia-to-discv5/index.html @@ -0,0 +1,26 @@ + + + + + +From Kademlia to Discv5 | Vac Research + + + + + + + + + + +
+

From Kademlia to Discv5

by
9 min read

A quick history of discovery in peer-to-peer networks, along with a look into discv4 and discv5, detailing what they are, how they work and where they differ.

If you've been working on Ethereum or adjacent technologies you've probably heard of discv4 or discv5. But what are they actually? How do they work and what makes them different? To answer these questions, we need to start at the beginning, so this post will assume that there is little knowledge on the subject so the post should be accessible for anyone.

The Beginning

Let's start right at the beginning: the problem of discovery and organization of nodes in peer-to-peer networks.

Early P2P file sharing technologies, such as Napster, would share information about who holds what file using a single server. A node would connect to the central server and give it a list of the files it owns. Another node would then connect to that central server, find a node that has the file it is looking for and contact that node. This however was a flawed system -- it was vulnerable to attacks and left a single party open to lawsuits.

It became clear that another solution was needed, and after years of research and experimentation, we were given the distributed hash table or DHT.

Distributed Hash Tables

In 2001 4 new protocols for such DHTs were conceived, Tapestry, Chord, CAN and Pastry, all of which made various trade-offs and changes in their core functionality, giving them unique characteristics.

But as said, they're all DHTs. So what is a DHT?

A distributed hash table (DHT) is essentially a distributed key-value list. Nodes participating in the DHT can easily retrieve the value for a key.

If we have a network with 9 key-value pairs and 3 nodes, ideally each node would store 3 (optimally 6 for redundancy) of those key-value pairs, meaning that if a key-value pair were to be updated, only part of the network would responsible for ensuring that it is. The idea is that any node in the network would know where to find the specific key-value pair it is looking for based on how things are distributed amongst the nodes.

Kademlia

So now that we know what DHTs are, let's get to Kademlia, the predecessor of discv4. Kademlia was created by Petar Maymounkov and David Mazières in 2002. I will naively say that this is probably one of the most popular and most used DHT protocols. It's quite simple in how it works, so let's look at it.

In Kademlia, nodes and values are arranged by distance (in a very mathematical definition). This distance is not a geographical one, but rather based on identifiers. It is calculated how far 2 identifiers are from eachother using some distance function.

Kademlia uses an XOR as its distance function. An XOR is a function that outputs true only when inputs differ. Here is an example with some binary identifiers:

XOR 10011001
00110010
--------
10101011

The top in decimal numbers means that the distance between 153 and 50 is 171.

There are several reasons why XOR was taken:

  1. The distance from one ID to itself will be 0.
  2. Distance is symmetric, A to B is the same as B to A.
  3. Follows triangle inequality, if A, B and C are points on a triangle then the distance A to B is closer or equal to that of A to C plus the one from B to C.

In summary, this distance function allows a node to decide what is "close" to it and make decisions based on that "closeness".

Kademlia nodes store a routing table. This table contains multiple lists. Each subsequent list contains nodes which are a little further distanced than the ones included in the previous list. Nodes maintain detailed knowledge about nodes closest to them, and the further away a node is, the less knowledge the node maintains about it.

So let's say I want to find a specific node. What I would do is go to any node which I already know and ask them for all their neighbours closest to my target. I repeat this process for the returned neighbours until I find my target.

The same thing happens for values. Values have a certain distance from nodes and their IDs are structured the same way so we can calculate this distance. If I want to find a value, I simply look for the neighbours closest to that value's key until I find the one storing said value.

For Kademlia nodes to support these functions, there are several messages with which the protocol communicates.

  • PING - Used to check whether a node is still running.
  • STORE - Stores a value with a given key on a node.
  • FINDNODE - Returns the closest nodes requested to a given ID.
  • FINDVALUE - The same as FINDNODE, except if a node stores the specific value it will return it directly.

This is a very simplified explanation of Kademlia and skips various important details. For the full description, make sure to check out the paper or a more in-depth design specification

Discv4

Now after that history lesson, we finally get to discv4 (which stands for discovery v4), Ethereum's current node discovery protocol. The protocol itself is essentially based off of Kademlia, however it does away with certain aspects of it. For example, it does away with any usage of the value part of the DHT.

Kademlia is mainly used for the organisation of the network, so we only use the routing table to locate other nodes. Due to the fact that discv4 doesn't use the value portion of the DHT at all, we can throw away the FINDVALUE and STORE commands described by Kademlia.

The lookup method previously described by Kademlia describes how a node gets its peers. A node contacts some node and asks it for the nodes closest to itself. It does so until it can no longer find any new nodes.

Additionally, discv4 adds mutual endpoint verification. This is meant to ensure that a peer calling FINDNODE also participates in the discovery protocol.

Finally, all discv4 nodes are expected to maintain up-to-date ENR records. These contain information about a node. They can be requested from any node using a discv4-specific packet called ENRRequest.

If you want some more details on ENRs, check out one of my posts "Network Addresses in Ethereum"

Discv4 comes with its own range of problems however. Let's look at a few of them.

Firstly, the way discv4 works right now, there is no way to differentiate between node sub-protocols. This means for example that an Ethereum node could add an Ethereum Classic Node, Swarm or Whisper node to its DHT without realizing that it is invalid until more communication has happened. This inability to differentiate sub-protocols makes it harder to find specific nodes, such as Ethereum nodes with light-client support.

Next, in order to prevent replay attacks, discv4 uses timestamps. This however can lead to various issues when a host's clock is wrong. For more details, see the "Known Issues" section of the discv4 specification.

Finally, we have an issue with the way mutual endpoint verification works. Messages can get dropped and there is no way to tell if both peers have verified eachother. This means that we could consider our peer verified while it does not consider us so making them drop the FINDNODE packet.

Discv5

Finally, let's look at discv5. The next iteration of discv4 and the discovery protocol which will be used by Eth 2.0. It aims at fixing various issues present in discv4.

The first change is the way FINDNODE works. In traditional Kademlia as well as in discv5, we pass an identifier. However, in discv5 we instead pass the logarithmic distance, meaning that a FINDNODE request gets a response containing all nodes at the specified logarithmic distance from the called node.

Logarithmic distance means we first calculate the distance and then run it through our log base 2 function. See:

log2(A xor B)

And the second, more important change, is that discv5 aims at solving one of the biggest issues of discv4: the differentiation of sub-protocols. It does this by adding topic tables. Topic tables are first in first out lists that contain nodes which have advertised that they provide a specific service. Nodes get themselves added to this list by registering ads on their peers.

As of writing, there is still an issue with this proposal. There is currently no efficient way for a node to place ads on multiple peers, since it would require separate requests for every peer which is inefficient in a large-scale network.

Additionally, it is unclear how many peers a node should place these ads on and exactly which peers to place them on. For more details, check out the issue devp2p#136.

There are a bunch more smaller changes to the protocol, but they are less important hence they were ommitted from this summary.

Nevertheless, discv5 still does not resolve a couple issues present in discv4, such as unreliable endpoint verification. As of writing this post, there is currently no new method in discv5 to improve the endpoint verification process.

As you can see discv5 is still a work in progress and has a few large challenges to overcome. However if it does, it will most likely be a large improvement to a more naive Kademlia implementations.


Hopefully this article helped explain what these discovery protocols are and how they work. If you're interested in their full specifications you can find them on github.

+ + + + \ No newline at end of file diff --git a/rlog/p2p-data-sync-for-mobile/index.html b/rlog/p2p-data-sync-for-mobile/index.html new file mode 100644 index 00000000..97303daa --- /dev/null +++ b/rlog/p2p-data-sync-for-mobile/index.html @@ -0,0 +1,28 @@ + + + + + +P2P Data Sync for Mobile | Vac Research + + + + + + + + + + +
+

P2P Data Sync for Mobile

by
12 min read

A research log. Reliable and decentralized, pick two.

Together with decanus, I've been working on the problem of data sync lately.

In building p2p messaging systems, one problem you quickly come across is the problem of reliably transmitting data. If there's no central server with high availability guarantees, you can't meaningfully guarantee that data has been transmitted. One way of solving this problem is through a synchronization protocol.

There are many synchronization protocols out there and I won't go into detail of how they differ with our approach here. Some common examples are Git and Bittorrent, but there are also projects like IPFS, Swarm, Dispersy, Matrix, Briar, SSB, etc.

Problem motivation

Why do we want to do p2p sync for mobilephones in the first place? There are three components to that question. One is on the value of decentralization and peer-to-peer, the second is on why we'd want to reliably sync data at all, and finally why mobilephones and other resource restricted devices.

Why p2p?

For decentralization and p2p, there are both technical and social/philosophical reasons. Technically, having a user-run network means it can scale with the number of users. Data locality is also improved if you query data that's close to you, similar to distributed CDNs. The throughput is also improved if there are more places to get data from.

Socially and philosophically, there are several ways to think about it. Open and decentralized networks also relate to the idea of open standards, i.e. compare the longevity of AOL with IRC or Bittorrent. One is run by a company and is shut down as soon as it stops being profitable, the others live on. Additionally increasingly control of data and infrastructure is becoming a liability. By having a network with no one in control, everyone is. It's ultimately a form of democratization, more similar to organic social structures pre Big Internet companies. This leads to properties such as censorship resistance and coercion resistance, where we limit the impact a 3rd party might have a voluntary interaction between individuals or a group of people. Examples of this are plentiful in the world of Facebook, Youtube, Twitter and WeChat.

Why reliably sync data?

At risk of stating the obvious, reliably syncing data is a requirement for many problem domains. You don't get this by default in a p2p world, as it is unreliable with nodes permissionslessly join and leave the network. In some cases you can get away with only ephemeral data, but usually you want some kind of guarantees. This is a must for reliable group chat experience, for example, where messages are expected to arrive in a timely fashion and in some reasonable order. The same is true for messages there represent financial transactions, and so on.

Why mobilephones?

Most devices people use daily are mobile phones. It's important to provide the same or at least similar guarantees to more traditional p2p nodes that might run on a desktop computer or computer. The alternative is to rely on gateways, which shares many of the drawbacks of centralized control and prone to censorship, control and surveillence.

More generally, resource restricted devices can differ in their capabilities. One example is smartphones, but others are: desktop, routers, Raspberry PIs, POS systems, and so on. The number and diversity of devices are exploding, and it's useful to be able to leverage this for various types of infrastructure. The alternative is to centralize on big cloud providers, which also lends itself to lack of democratization and censorship, etc.

Minimal Requirements

For requirements or design goals for a solution, here's what we came up with.

  1. MUST sync data reliably between devices. By reliably we mean having the ability to deal with messages being out of order, dropped, duplicated, or delayed.

  2. MUST NOT rely on any centralized services for reliability. By centralized services we mean any single point of failure that isn’t one of the endpoint devices.

  3. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices that are resource restricted, mostly-offline and often changing network.

  4. MAY use helper services in order to be more mobile-friendly. Examples of helper services are decentralized file storage solutions such as IPFS and Swarm. These help with availability and latency of data for mostly-offline devices.

  5. MUST have the ability to provide casual consistency. By casual consistency we mean the commonly accepted definition in distributed systems literature. This means messages that are casually related can achieve a partial ordering.

  6. MUST support ephemeral messages that don’t need replication. That is, allow for messages that don’t need to be reliabily transmitted but still needs to be transmitted between devices.

  7. MUST allow for privacy-preserving messages and extreme data loss. By privacy-preserving we mean things such as exploding messages (self-destructing messages). By extreme data loss we mean the ability for two trusted devices to recover from a, deliberate or accidental, removal of data.

  8. MUST be agnostic to whatever transport it is running on. It should not rely on specific semantics of the transport it is running on, nor be tightly coupled with it. This means a transport can be swapped out without loss of reliability between devices.

MVDS - a minimium viable version

The first minimum viable version is in an alpha stage, and it has a specification, implementation and we have deployed it in a console client for end to end functionality. It's heavily inspired by Bramble Sync Protocol.

The spec is fairly minimal. You have nodes that exchange records over some secure transport. These records are of different types, such as OFFER, MESSAGE, REQUEST, and ACK. A peer keep tracks of the state of message for each node it is interacting with. There's also logic for message retransmission with exponential delay. The positive ACK and retransmission model is quite similar to how TCP is designed.

There are two different modes of syncing, interactive and batch mode. See sequence diagrams below.

Interactive mode: +

Interactive mode

Batch mode: +

Batch mode

Which mode should you choose? It's a tradeoff of latency and bandwidth. If you want to minimize latency, batch mode is better. If you care about preserving bandwidth interactive mode is better. The choice is up to each node.

Basic simulation

Initial ad hoc bandwidth and latency testing shows some issues with a naive approach. Running with the default simulation settings:

  • communicating nodes: 2
  • nodes using interactive mode: 2
  • interval between messages: 5s
  • time node is offine: 90%
  • nodes each node is sharing with: 2

we notice a huge overhead. More specifically, we see a ~5 minute latency overhead and a bandwidth multiplier of x100-1000, i.e. 2-3 orders of magnitude just for receiving a message with interactive mode, without acks.

Now, that seems terrible. A moment of reflection will reveal why that is. If each node is offline uniformly 90% of the time, that means that each record will be lost 90% of the time. Since interactive mode requires offer, request, payload (and then ack), that's three links just for Bob to receive the actual message.

Each failed attempt implies another retransmission. That means we have (1/0.1)^3 = 1000 expected overhead to receive a message in interactive mode. The latency follows naturally from that, with the retransmission logic.

Mostly-offline devices

The problem above hints at the requirements 3 and 4 above. While we did get reliable syncing (requirement 1), it came at a big cost.

There are a few ways of getting around this issue. One is having a store and forward model, where some intermediary node picks up (encrypted) messages and forwards them to the recipient. This is what we have in production right now at Status.

Another, arguably more pure and robust, way is having a remote log, where the actual data is spread over some decentralized storage layer, and you have a mutable reference to find the latest messages, similar to DNS.

What they both have in common is that they act as a sort of highly-available cache to smooth over the non-overlapping connection windows between two endpoints. Neither of them are required to get reliable data transmission.

Basic calculations for bandwidth multiplier

While we do want better simulations, and this is a work in progress, we can also look at the above scenarios using some basic calculations. This allows us to build a better intuition and reason about the problem without having to write code. Let's start with some assumptions:

  • two nodes exchanging a single message in batch mode
  • 10% uniformly random uptime for each node
  • in HA cache case, 100% uptime of a piece of infrastructure C
  • retransmission every epoch (with constant or exponential backoff)
  • only looking at average (p50) case

First case, no helper services

A sends a message to B, and B acks it.

A message -> B (10% chance of arrival)
A <- ack B (10% chance of arrival)

With a constant backoff, A will send messages at epoch 1, 2, 3, .... With exponential backoff and a multiplier of 2, this would be 1, 2, 4, 8, .... Let's assume constant backoff for now, as this is what will influence the success rate and thus the bandwidth multiplier.

There's a difference between time to receive and time to stop sending. Assuming each send attempt is independent, it takes on average 10 epochs for A's message to arrive with B. Furthermore:

  1. A will send messages until it receives an ACK.
  2. B will send ACK if it receives a message.

To get an average of one ack through, A needs to send 100 messages, and B send on average 10 acks. That's a multiplier of roughly a 100. That's roughly what we saw with the simulation above for receiving a message in interactive mode.

Second case, high-availability caching layer

Let's introduce a helper node or piece of infrastructure, C. Whenever A or B sends a message, it also sends it to C. Whenever A or B comes online, it queries for messages with C.

A message    -> B (10% chance of arrival)
A message -> C (100% chance of arrival)
B <- req/res -> C (100% chance of arrival)
A <- ack B (10% chance of arrival)
C <- ack B (100% chance of arrival)
A <- req/res -> C (100% chance of arrival)

What's the probability that A's messages will arrive at B? Directly, it's still 10%. But we can assume it's 100% that C picks up the message. (Giving C a 90% chance success rate doesn't materially change the numbers).

B will pick up A's message from C after an average of 10 epochs. Then B will send ack to A, which will also be picked up by C 100% of the time. Once A comes online again, it'll query C and receive B's ack.

Assuming we use exponential backoff with a multiplier of 2, A will send a message directly to B at epoch 1, 2, 4, 8 (assuming it is online). At this point, epoch 10, B will be online in the average case. These direct sends will likely fail, but B will pick the message up from C and send one ack, both directly to A and to be picked up by C. Once A comes online, it'll query C and receive the ack from B, which means it won't do any more retransmits.

How many messages have been sent? Not counting interactions with C, A sends 4 (at most) and B 1. Depending on if the interaction with C is direct or indirect (i.e. multicast), the factor for interaction with C will be ~2. This means the total bandwidth multiplier is likely to be <10, which is a lot more acceptable.

Since the syncing semantics are end-to-end, this is without relying on the reliablity of C.

Caveat

Note that both of these are probabilistic argument. They are also based on heuristics. More formal analysis would be desirable, as well as better simulations to experimentally verify them. In fact, the calculations could very well be wrong!

Future work

There are many enhancements that can be made and are desirable. Let's outline a few.

  1. Data sync clients. Examples of actual usage of data sync, with more interesting domain semantics. This also includes usage of sequence numbers and DAGs to know what content is missing and ought to be synced.

  2. Remote log. As alluded to above, this is necessary. It needs a more clear specification and solid proof of concepts.

  3. More efficient ways of syncing with large number of nodes. When the number of nodes goes up, the algorithmic complexity doesn't look great. This also touches on things such as ambient content discovery.

  4. More robust simulations and real-world deployments. Exisiting simulation is ad hoc, and there are many improvements that can be made to gain more confidence and identify issues. Additionally, better formal analysis.

  5. Example usage over multiple transports. Including things like sneakernet and meshnets. The described protocol is designed to work over unstructured, structured and private p2p networks. In some cases it can leverage differences in topology, such as multicast, or direct connections.

+ + + + \ No newline at end of file diff --git a/rlog/page/2/index.html b/rlog/page/2/index.html new file mode 100644 index 00000000..0109a197 --- /dev/null +++ b/rlog/page/2/index.html @@ -0,0 +1,26 @@ + + + + + +Research Blog | Vac Research + + + + + + + + + + +
+

Research Blog

Blog

by
11 min read

Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas.

by
21 min read

This post is going to give you an overview of how spam protection can be achieved in Waku Relay through rate-limiting nullifiers. We will cover a summary of spam-protection methods in centralized and p2p systems, and the solution overview and details of the economic spam-protection method. The open issues and future steps are discussed in the end.

by
8 min read

A research log. Read on to find out what is going on with Waku v2, a messaging protocol. What has been happening? What is coming up next?

+ + + + \ No newline at end of file diff --git a/rlog/page/3/index.html b/rlog/page/3/index.html new file mode 100644 index 00000000..2ff001a1 --- /dev/null +++ b/rlog/page/3/index.html @@ -0,0 +1,26 @@ + + + + + +Research Blog | Vac Research + + + + + + + + + + +
+

Research Blog

Blog

by
9 min read

A quick history of discovery in peer-to-peer networks, along with a look into discv4 and discv5, detailing what they are, how they work and where they differ.

by
6 min read

A research log. What's the current state of Waku? How many users does it support? What are the bottlenecks? What's next?

by
6 min read

Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. Overview of terms, stack and open problems.

+ + + + \ No newline at end of file diff --git a/rlog/page/4/index.html b/rlog/page/4/index.html new file mode 100644 index 00000000..9923800c --- /dev/null +++ b/rlog/page/4/index.html @@ -0,0 +1,26 @@ + + + + + +Research Blog | Vac Research + + + + + + + + + + +
+

Research Blog

Blog
+ + + + \ No newline at end of file diff --git a/rlog/presenting-js-waku/index.html b/rlog/presenting-js-waku/index.html new file mode 100644 index 00000000..ce30a32f --- /dev/null +++ b/rlog/presenting-js-waku/index.html @@ -0,0 +1,106 @@ + + + + + +Presenting JS-Waku: Waku v2 in the Browser | Vac Research + + + + + + + + + + +
+

Presenting JS-Waku: Waku v2 in the Browser

by
7 min read

JS-Waku is bringing Waku v2 to the browser. Learn what we achieved so far and what is next in our pipeline!

For the past 3 months, we have been working on bringing Waku v2 to the browser. +Our aim is to empower dApps with Waku v2, and it led to the creation of a new library. +We believe now is good time to introduce it!

Waku v2

First, let's review what Waku v2 is and what problem it is trying to solve.

Waku v2 comes from a need to have a more scalable, better optimised solution for the Status app to achieve decentralised +communications on resource restricted devices (i.e., mobile phones).

The Status chat feature was initially built over Whisper. +However, Whisper has a number of caveats which makes it inefficient for mobile phones. +For example, with Whisper, all devices are receiving all messages which is not ideal for limited data plans.

To remediate this, a Waku mode (then Waku v1), based on devp2p, was introduced. +To further enable web and restricted resource environments, Waku v2 was created based on libp2p. +The migration of the Status chat feature to Waku v2 is currently in progress.

We see the need of such solution in the broader Ethereum ecosystem, beyond Status. +This is why we are building Waku v2 as a decentralised communication platform for all to use and build on. +If you want to read more about Waku v2 and what it aims to achieve, +checkout What's the Plan for Waku v2?.

Since last year, we have been busy defining and implementing Waku v2 protocols in nim-waku, +from which you can build wakunode2. +Wakunode2 is an adaptive and modular Waku v2 node, +it allows users to run their own node and use the Waku v2 protocols they need. +The nim-waku project doubles as a library, that can be used to add Waku v2 support to native applications.

Waku v2 in the browser

We believe that dApps and wallets can benefit from the Waku network in several ways. +For some dApps, it makes sense to enable peer-to-peer communications. +For others, machine-to-machine communications would be a great asset. +For example, in the case of a DAO, +Waku could be used for gas-less voting. +Enabling the DAO to notify their users of a new vote, +and users to vote without interacting with the blockchain and spending gas.

Murmur was the first attempt to bring Whisper to the browser, +acting as a bridge between devp2p and libp2p. +Once Waku v2 was started and there was a native implementation on top of libp2p, +a chat POC was created to demonstrate the potential of Waku v2 +in web environment. +It showed how using js-libp2p with few modifications enabled access to the Waku v2 network. +There was still some unresolved challenges. +For example, nim-waku only support TCP connections which are not supported by browser applications. +Hence, to connect to other node, the POC was connecting to a NodeJS proxy application using websockets, +which in turn could connect to wakunode2 via TCP.

However, to enable dApp and Wallet developers to easily integrate Waku in their product, +we need to give them a library that is easy to use and works out of the box: +introducing JS-Waku.

JS-Waku is a JavaScript library that allows your dApp, wallet or other web app to interact with the Waku v2 network. +It is available right now on npm:

npm install js-waku.

As it is written in TypeScript, types are included in the npm package to allow easy integration with TypeScript, ClojureScript and other typed languages that compile to JavaScript.

Key Waku v2 protocols are already available: +message, store, relay and light push, +enabling your dApp to:

  • Send and receive near-instant messages on the Waku network (relay),
  • Query nodes for messages that may have been missed, e.g. due to poor cellular network (store),
  • Send messages with confirmations (light push).

JS-Waku needs to operate in the same context from which Waku v2 was born: +a restricted environment were connectivity or uptime are not guaranteed; +JS-Waku brings Waku v2 to the browser.

Achievements so far

We focused the past month on developing a ReactJS Chat App. +The aim was to create enough building blocks in JS-Waku to enable this showcase web app that +we now use for dogfooding purposes.

Most of the effort was on getting familiar with the js-libp2p library +that we heavily rely on. +JS-Waku is the second implementation of Waku v2 protocol, +so a lot of effort on interoperability was needed. +For example, to ensure compatibility with the nim-waku reference implementation, +we run our tests against wakunode2 as part of the CI.

This interoperability effort helped solidify the current Waku v2 specifications: +By clarifying the usage of topics +(#327, #383), +fix discrepancies between specs and nim-waku +(#418, #419) +and fix small nim-waku & nim-libp2p bugs +(#411, #439).

To fully access the waku network, JS-Waku needs to enable web apps to connect to nim-waku nodes. +A standard way to do so is using secure websockets as it is not possible to connect directly to a TCP port from the browser. +Unfortunately websocket support is not yet available in nim-libp2p so +we ended up deploying websockify alongside wakunode2 instances.

As we built the web chat app, +we were able to fine tune the API to provide a simple and succinct interface. +You can start a node, connect to other nodes and send a message in less than ten lines of code:

import { Waku } from 'js-waku'

const waku = await Waku.create({})

const nodes = await getStatusFleetNodes()
await Promise.all(nodes.map((addr) => waku.dial(addr)))

const msg = WakuMessage.fromUtf8String(
'Here is a message!',
'/my-cool-app/1/my-use-case/proto',
)
await waku.relay.send(msg)

We have also put a bounty at 0xHack for using JS-Waku +and running a workshop. +We were thrilled to have a couple of hackers create new software using our libraries. +One of the projects aimed to create a decentralised, end-to-end encrypted messenger app, +similar to what the ETH-DM protocol aims to achieve. +Another project was a decentralised Twitter platform. +Such projects allow us to prioritize the work on JS-Waku and understand how DevEx can be improved.

As more developers use JS-Waku, we will evolve the API to allow for more custom and fine-tune usage of the network +while preserving this out of the box experience.

What's next?

Next, we are directing our attention towards Developer Experience. +We already have documentation available but we want to provide more: +Tutorials, various examples +and showing how JS-Waku can be used with Web3.

By prioritizing DevEx we aim to enable JS-Waku integration in dApps and wallets. +We think JS-Waku builds a strong case for machine-to-machine (M2M) communications. +The first use cases we are looking into are dApp notifications: +Enabling dApp to notify their user directly in their wallets! +Leveraging Waku as a decentralised infrastructure and standard so that users do not have to open their dApp to be notified +of events such as DAO voting.

We already have some POC in the pipeline to enable voting and polling on the Waku network, +allowing users to save gas by not broadcasting each individual vote on the blockchain.

To facilitate said applications, we are looking at improving integration with Web3 providers by providing examples +of signing, validating, encrypting and decrypting messages using Web3. +Waku is privacy conscious, so we will also provide signature and encryption examples decoupled from users' Ethereum identity.

As you can read, we have grand plans for JS-Waku and Waku v2. +There is a lot to do, and we would love some help so feel free to +check out the new role in our team: +js-waku: Wallet & Dapp Integration Developer. +We also have a number of positions open to work on Waku protocol and nim-waku.

If you are as excited as us by JS-Waku, why not build a dApp with it? +You can find documentation on the npmjs page.

Whether you are a developer, you can come chat with us using WakuJS Web Chat +or chat2. +You can get support in #dappconnect-support on Vac Discord or Telegram. +If you have any ideas on how Waku could enable a specific dapp or use case, do share, we are always keen to hear it.

+ + + + \ No newline at end of file diff --git a/rlog/remote-log/index.html b/rlog/remote-log/index.html new file mode 100644 index 00000000..facb5833 --- /dev/null +++ b/rlog/remote-log/index.html @@ -0,0 +1,30 @@ + + + + + +P2P Data Sync with a Remote Log | Vac Research + + + + + + + + + + +
+

P2P Data Sync with a Remote Log

by
5 min read

A research log. Asynchronous P2P messaging? Remote logs to the rescue!

A big problem when doing end-to-end data sync between mobile nodes is that most devices are offline most of the time. With a naive approach, you quickly run into issues of 'ping-pong' behavior, where messages have to be constantly retransmitted. We saw some basic calculations of what this bandwidth multiplier looks like in a previous post.

While you could do some background processing, this is really battery-draining, and on iOS these capabilities are limited. A better approach instead is to loosen the constraint that two nodes need to be online at the same time. How do we do this? There are two main approaches, one is the store and forward model, and the other is a remote log.

In the store and forward model, we use an intermediate node that forward messages on behalf of the recipient. In the remote log model, you instead replicate the data onto some decentralized storage, and have a mutable reference to the latest state, similar to DNS. While both work, the latter is somewhat more elegant and "pure", as it has less strict requirements of an individual node's uptime. Both act as a highly-available cache to smoothen over non-overlapping connection windows between endpoints.

In this post we are going to describe how such a remote log schema could work. Specifically, how it enhances p2p data sync and takes care of the following requirements:

  1. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices +that are resource restricted, mostly-offline and often changing network.
  1. MAY use helper services in order to be more mobile-friendly. Examples of +helper services are decentralized file storage solutions such as IPFS and +Swarm. These help with availability and latency of data for mostly-offline +devices.

Remote log

A remote log is a replication of a local log. This means a node can read data from a node that is offline.

The spec is in an early draft stage and can be found here. A very basic spike / proof-of-concept can be found here.

Definitions

TermDefinition
CASContent-addressed storage. Stores data that can be addressed by its hash.
NSName system. Associates mutable data to a name.
Remote logReplication of a local log at a different location.

Roles

There are four fundamental roles:

  1. Alice
  2. Bob
  3. Name system (NS)
  4. Content-addressed storage (CAS)

The remote log is the data format of what is stored in the name system.

"Bob" can represent anything from 0 to N participants. Unlike Alice, Bob only needs read-only access to NS and CAS.

Flow

Figure 1: Remote log data synchronization.

Data format

The remote log lets receiving nodes know what data they are missing. Depending on the specific requirements and capabilities of the nodes and name system, the information can be referred to differently. We distinguish between three rough modes:

  1. Fully replicated log
  2. Normal sized page with CAS mapping
  3. "Linked list" mode - minimally sized page with CAS mapping

A remote log is simply a mapping from message identifiers to their corresponding address in a CAS:

Message Identifier (H1)CAS Hash (H2)
H1_3H2_3
H1_2H2_2
H1_1H2_1
address to next page

The numbers here corresponds to messages. Optionally, the content itself can be included, just like it normally would be sent over the wire. This bypasses the need for a dedicated CAS and additional round-trips, with a trade-off in bandwidth usage.

Message Identifier (H1)Content
H1_3C3
H1_2C2
H1_1C1
address to next page

Both patterns can be used in parallel, e,g. by storing the last k messages directly and use CAS pointers for the rest. Together with the next_page page semantics, this gives users flexibility in terms of bandwidth and latency/indirection, all the way from a simple linked list to a fully replicated log. The latter is useful for things like backups on durable storage.

Interaction with MVDS

vac.mvds.Message payloads are the only payloads that MUST be uploaded. Other messages types MAY be uploaded, depending on the implementation.

Future work

The spec is still in an early draft stage, so it is expected to change. Same with the proof of concept. More work is needed on getting a fully featured proof of concept with specific CAS and NAS instances. E.g. Swarm and Swarm Feeds, or IPFS and IPNS, or something else.

For data sync in general:

  • Make consistency guarantees more explicit for app developers with support for sequence numbers and DAGs, as well as the ability to send non-synced messages. E.g. ephemeral typing notifications, linear/sequential history and casual consistency/DAG history
  • Better semantics and scalability for multi-user sync contexts, e.g. CRDTs and joining multiple logs together
  • Better usability in terms of application layer usage (data sync clients) and supporting more transports

PS1. Thanks everyone who submitted great logo proposals for Vac!

PPS2. Next week on October 10th decanus and I will be presenting Vac at Devcon, come say hi :)

+ + + + \ No newline at end of file diff --git a/rlog/rln-anonymous-dos-prevention/index.html b/rlog/rln-anonymous-dos-prevention/index.html new file mode 100644 index 00000000..d525ab25 --- /dev/null +++ b/rlog/rln-anonymous-dos-prevention/index.html @@ -0,0 +1,70 @@ + + + + + +Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku | Vac Research + + + + + + + + + + +
+

Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku

by
7 min read

Rate Limiting Nullifiers in practice, applied to an anonymous p2p network, like Waku.

Introduction

Rate Limiting Nullifier (RLN) is a zero-knowledge gadget that allows users to prove 2 pieces of information,

  1. They belong to a permissioned membership set
  2. Their rate of signaling abides by a fixed number that has been previously declared

The "membership set" introduced above, is in the form of a sparse, indexed merkle tree. +This membership set can be maintained on-chain, off-chain or as a hybrid depending on the network's storage costs. +Waku makes use of a hybrid membership set, +where insertions are tracked in a smart contract. +In addition, each Waku node maintains a local copy of the tree, +which is updated upon each insertion.

Users register themselves with a hash of a locally generated secret, +which is then inserted into the tree at the next available index. +After having registered, users can prove their membership by proving their knowledge of the pre-image of the respective leaf in the tree. +The leaf hashes are also referred to as commitments of the respective users. +The actual proof is done by a Merkle Inclusion Proof, which is a type of ZK proof.

The circuit ensures that the user's secret does indeed hash to a leaf in the tree, +and that the provided Merkle proof is valid.

After a User generates this Merkle proof, +they can transmit it to other users, +who can verify the proof. +Including a message's hash within the proof generation, +additionally guarantees integrity of that message.

A malicious user could generate multiple proofs per epoch. +they generate multiple proofs per epoch. +However, when multiple proofs are generated per epoch, +the malicious user's secret is exposed, which strongly disincentivizes this attack. +This mechanism is further described in malicious User secret interpolation mechanism

Note: This blog post describes rln-v1, which excludes the range check in favor of a global rate limit for all users, +which is once per time window. This version is currently in use in waku-rln-relay.

RLN Protocol parameters

Given below is the set of cryptographic primitives, +and constants that are used in the RLN protocol.

  1. Proving System: groth16
  2. Elliptic Curve: bn254 (aka bn128) (not to be confused with the 254 bit Weierstrass curve)
  3. Finite Field: Prime-order subgroup of the group of points on the bn254 curve
  4. Default Merkle Tree Height: 20
  5. Hashing algorithm: Poseidon
  6. Merkle Tree: Sparse Indexed Merkle Tree
  7. Messages per epoch: 1
  8. Epoch duration: 10 seconds

Malicious User secret interpolation mechanism

note: all the parameters mentioned below are elements in the finite field mentioned above.

The private inputs to the circuit are as follows: -

identitySecret: the randomly generated secret of the user
identityPathIndex: the index of the commitment derived from the secret
pathElements: elements included in the path to the index of the commitment

Following are the public inputs to the circuit -

x: hash of the signal to the finite field
rlnIdentifier: application-specific identifier which this proof is being generated for
epoch: the timestamp which this proof is being generated for

The outputs of the circuit are as follows: -

y: result of Shamir's secret sharing calculation
root: root of the Merkle tree obtained after applying the inclusion proof
nullifier: uniquely identifies a message, derived from rlnIdentifier, epoch, and the user's secret

With the above data in mind, following is the circuit pseudocode -

identityCommitment = Poseidon([identitySecret])
root = MerkleInclusionProof(identityCommitment, identityPathIndex, pathElements)
externalNullifier = Poseidon([epoch, rlnIdentifier])
a1 = Poseidon([identitySecret, externalNullifier])
y = identitySecret + a1 * x
nullifier = Poseidon([a1])

To interpolate the secret of a user who has sent multiple signals during the same epoch to the same rln-based application, we may make use of the following formula -

a1=(y1y2)(x1x2)a_1 = {(y_1 - y_2) \over (x_1 - x_2)}

where x1x_1, y1y_1 and x2x_2, y2y_2 are shares from different messages

subsequently, we may use one pair of the shares, x1x_1 and y1y_1 to obtain the identitySecret

identitySecret=y1a1xidentitySecret = y_1 - a_1 * x

This enables RLN to be used for rate limiting with a global limit. For arbitrary limits, +please refer to an article written by @curryrasul, rln-v2.

Waku's problem with DoS

In a decentralized, privacy focused messaging system like Waku, +Denial of Service (DoS) vulnerabilities are very common, and must be addressed to promote network scale and optimal bandwidth utilization.

DoS prevention with user metadata

There are a couple of ways a user can be rate-limited, either -

  1. IP Logging
  2. KYC Logging

Both IP and KYC logging prevent systems from being truly anonymous, and hence, cannot be used as a valid DoS prevention mechanism for Waku.

RLN can be used as an alternative, which provides the best of both worlds, i.e a permissioned membership set, as well as anonymous signaling. +However, we are bound by k-anonymity rules of the membership set.

Waku-RLN-Relay is a libp2p pubsub validator that verifies if a proof attached to a given message is valid. +In case the proof is valid, the message is relayed.

Performance analysis

Test bench specs: AMD EPYC 7502P 32-Core, 4x32GB DDR4 Reg.ECC Memory

This simulation was conducted by @alrevuelta, and is described in more detail here.

The simulation included 100 waku nodes running in parallel.

Proof generation times - +

img

Proof verification times - +

img

A spammer node publishes 3000 msg/epoch, which is detected by all connected nodes, and subsequently disconnect to prevent further spam - +

img

Security analysis

Barbulescu and Duquesne +conclude that that the bn254 curve has only 100 bits of security. +Since the bn254 curve has a small embedding degree, +it is vulnerable to the MOV attack. +However, the MOV attack is only applicable to pairings, +and not to the elliptic curve itself. +It is acceptable to use the bn254 curve for RLN, +since the circuit does not make use of pairings.

An analysis on the number of rounds in the Poseidon hash function was done, +which concluded that the hashing rounds should not be reduced,

The smart contracts have not been audited, and are not recommended for real world deployments yet.

Storage analysis

commitment_size=32 bytestree_height=20total_leaves=220max_tree_size=total_leavescommitment_sizemax_tree_size=22032=33,554,432max_tree_size=33.55 megabytescommitment\_size = 32\ bytes \\ tree\_height =20 \\ total\_leaves = 2^{20} \\ max\_tree\_size = total\_leaves * commitment\_size \\ max\_tree\_size = 2^{20} * 32 = 33,554,432 \\ ∴max\_tree\_size = 33.55\ megabytes

The storage overhead introduced by RLN is minimal. +RLN only requires 34 megabytes of storage, which poses no problem on most end-user hardware, with the exception of IoT/microcontrollers. +Still, we are working on further optimizations allowing proof generation without having to store the full tree.

The bare minimum requirements to run RLN

With proof generation time in sub-second latency, along with low storage overhead for the tree, +it is possible for end users to generate and verify RLN proofs on a modern smartphone.

Following is a demo provided by @rramos that demonstrates +waku-rln-relay used in react native.

Warning: The react native sdk will be deprecated soon, and the above demo should serve as a PoC for RLN on mobiles

RLN usage guide

Zerokit implements api's that allow users to handle operations to the tree, +as well as generate/verify RLN proofs.

Our main implementation of RLN can be accessed via this Rust crate, +which is documented here. +It can used in other langugages via the FFI API, which is documented here. +The usage of RLN in Waku is detailed in our RLN Implementers guide, +which provides step-by-step instructions on how to run Waku-RLN-Relay.

Following is a diagram that will help understand the dependency tree -

rln-dep-tree

Future work

  • Optimizations to zerokit for proof generation time.
  • Incrementing tree depth from 20 to 32, to allow more memberships.
  • Optimizations to the smart contract.
  • An ability to signal validity of a message in different time windows.
  • Usage of proving systems other than Groth16.

References

+ + + + \ No newline at end of file diff --git a/rlog/rln-light-verifiers/index.html b/rlog/rln-light-verifiers/index.html new file mode 100644 index 00000000..98d59c62 --- /dev/null +++ b/rlog/rln-light-verifiers/index.html @@ -0,0 +1,58 @@ + + + + + +Verifying RLN Proofs in Light Clients with Subtrees | Vac Research + + + + + + + + + + +
+

Verifying RLN Proofs in Light Clients with Subtrees

by
5 min read

How resource-restricted devices can verify RLN proofs fast and efficiently.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

This post expands upon ideas described in the previous post, +focusing on how resource-restricted devices can verify RLN proofs fast and efficiently.

Previously, it was required to fetch all the memberships from the smart contract, +construct the merkle tree locally, +and derive the merkle root, +which is subsequently used to verify RLN proofs.

This process is not feasible for resource-restricted devices since it involves a lot of RPC calls, computation and fault tolerance. +One cannot expect a mobile phone to fetch all the memberships from the smart contract and construct the merkle tree locally.

Constraints and requirements

An alternative solution to the one proposed in this post is to construct the merkle tree on-chain, +and have the root accessible with a single RPC call. +However, this approach increases gas costs for inserting new memberships and may not be feasible until it is optimized further with batching mechanisms, etc.

The other methods have been explored in more depth here.

Following are the requirements and constraints for the solution proposed in this post:

  1. Cheap membership insertions.
  2. As few RPC calls as possible to reduce startup time.
  3. Merkle root of the tree is available on-chain.
  4. No centralized services to sequence membership insertions.
  5. Map inserted commitments to the block in which they were inserted.

Metrics on sync time for a tree with 2,653 leaves

The following metrics are based on the current implementation of RLN in the Waku gen0 network.

Test bench

  • Hardware: Macbook Air M2, 16GB RAM
  • Network: 120 Megabits/sec
  • Nwaku commit: e61e4ff
  • RLN membership set contract: 0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4
  • Deployed block number: 4,230,716
  • RLN Membership set depth: 20
  • Hash function: PoseidonT3 (which is a gas guzzler)
  • Max size of the membership set: 2^20 = 1,048,576 leaves

Metrics

  • Time to sync the whole tree: 4 minutes
  • RPC calls: 702
  • Number of leaves: 2,653

One can argue that the time to sync the tree at the current state is not that bad. +However, the number of RPC calls is a concern, +which scales linearly with the number of blocks since the contract was deployed +This is because the implementation fetches all events from the contract, +chunking 2,000 blocks at a time. +This is done to avoid hitting the block limit of 10,000 events per call, +which is a limitation of popular RPC providers.

Proposed solution

From a theoretical perspective, +one could construct the merkle tree on-chain, +in a view call, in-memory. +However, this is not feasible due to the gas costs associated with it.

To compute the root of a Merkle tree with 2202^{20} leaves it costs approximately 2 billion gas. +With Infura and Alchemy capping the gas limit to 350M and 550M gas respectively, +it is not possible to compute the root of the tree in a single call.

Acknowledging that Polygon Miden and Penumbra both make use of a tiered commitment tree, +we propose a similar approach for RLN.

A tiered commitment tree is a tree which is sharded into multiple smaller subtrees, +each of which is a tree in itself. +This allows scaling in terms of the number of leaves, +as well as reducing state bloat by just storing the root of a subtree when it is full instead of all its leaves.

Here, the question arises: +What is the maximum number of leaves in a subtree with which the root can be computed in a single call?

It costs approximately 217M gas to compute the root of a Merkle tree with 2102^{10} leaves.

This is a feasible number for a single call, +and hence we propose a tiered commitment tree with a maximum of 2102^{10} leaves in a subtree and the number of subtrees is 2102^{10}. +Therefore, the maximum number of leaves in the tree is 2202^{20} (the same as the current implementation).

img

Insertion

When a commitment is inserted into the tree it is first inserted into the first subtree. +When the first subtree is full the next insertions go into the second subtree and so on.

Syncing

When syncing the tree, +one only needs to fetch the roots of the subtrees. +The root of the full tree can be computed in-memory or on-chain.

This allows us to derive the following relation:

number_of_rpc_calls=number_of_filled_subtrees+1number\_of\_rpc\_calls = number\_of\_filled\_subtrees + 1

This is a significant improvement over the current implementation, +which requires fetching all the memberships from the smart contract.

Gas costs

The gas costs for inserting a commitment into the tree are the same as the current implementation except it consists of an extra SSTORE operation to store the shardIndex of the commitment.

Events

The events emitted by the contract are the same as the current implementation, +appending the shardIndex of the commitment.

Proof of concept

A proof of concept implementation of the tiered commitment tree is available here, +and is deployed on Sepolia at 0xE7987c70B54Ff32f0D5CBbAA8c8Fc1cAf632b9A5.

It is compatible with the current implementation of the RLN verifier.

Future work

  1. Optimize the gas costs of the tiered commitment tree.
  2. Explore using different number of leaves under a given node in the tree (currently set to 2).

Conclusion

The tiered commitment tree is a promising approach to reduce the number of RPC calls required to sync the tree and reduce the gas costs associated with computing the root of the tree. +Consequently, it allows for a more scalable and efficient RLN verifier.

References

+ + + + \ No newline at end of file diff --git a/rlog/rln-relay/index.html b/rlog/rln-relay/index.html new file mode 100644 index 00000000..0b68c7aa --- /dev/null +++ b/rlog/rln-relay/index.html @@ -0,0 +1,35 @@ + + + + + +Privacy-preserving p2p economic spam protection in Waku v2 | Vac Research + + + + + + + + + + +
+

Privacy-preserving p2p economic spam protection in Waku v2

by
21 min read

This post is going to give you an overview of how spam protection can be achieved in Waku Relay through rate-limiting nullifiers. We will cover a summary of spam-protection methods in centralized and p2p systems, and the solution overview and details of the economic spam-protection method. The open issues and future steps are discussed in the end.

Introduction

This post is going to give you an overview of how spam protection can be achieved in Waku Relay protocol2 through Rate-Limiting Nullifiers3 4 or RLN for short.

Let me give a little background about Waku(v2)1. Waku is a privacy-preserving peer-to-peer (p2p) messaging protocol for resource-restricted devices. Being p2p means that Waku relies on No central server. Instead, peers collaboratively deliver messages in the network. Waku uses GossipSub16 as the underlying routing protocol (as of the writeup of this post). At a high level, GossipSub is based on publisher-subscriber architecture. That is, peers, congregate around topics they are interested in and can send messages to topics. Each message gets delivered to all peers subscribed to the topic. In GossipSub, a peer has a constant number of direct connections/neighbors. In order to publish a message, the author forwards its message to a subset of neighbors. The neighbors proceed similarly till the message gets propagated in the network of the subscribed peers. The message publishing and routing procedures are part of the Waku Relay17 protocol. +

Figure 1: An overview of privacy-preserving p2p economic spam protection in Waku v2 RLN-Relay protocol.

What do we mean by spamming?

In centralized messaging systems, a spammer usually indicates an entity that uses the messaging system to send an unsolicited message (spam) to large numbers of recipients. However, in Waku with a p2p architecture, spam messages not only affect the recipients but also all the other peers involved in the routing process as they have to spend their computational power/bandwidth/storage capacity on processing spam messages. As such, we define a spammer as an entity that uses the messaging system to publish a large number of messages in a short amount of time. The messages issued in this way are called spam. In this definition, we disregard the intention of the spammer as well as the content of the message and the number of recipients.

Possible Solutions

Has the spamming issue been addressed before? Of course yes! Here is an overview of the spam protection techniques with their trade-offs and use-cases. In this overview, we distinguish between protection techniques that are targeted for centralized messaging systems and those for p2p architectures.

Centralized Messaging Systems

In traditional centralized messaging systems, spam usually signifies unsolicited messages sent in bulk or messages with malicious content like malware. Protection mechanisms include

  • authentication through some piece of personally identifiable information e.g., phone number
  • checksum-based filtering to protect against messages sent in bulk
  • challenge-response systems
  • content filtering on the server or via a proxy application

These methods exploit the fact that the messaging system is centralized and a global view of the users' activities is available based on which spamming patterns can be extracted and defeated accordingly. Moreover, users are associated with an identifier e.g., a username which enables the server to profile each user e.g., to detect suspicious behavior like spamming. Such profiling possibility is against the user's anonymity and privacy.

Among the techniques enumerated above, authentication through phone numbers is a some-what economic-incentive measure as providing multiple valid phone numbers will be expensive for the attacker. Notice that while using an expensive authentication method can reduce the number of accounts owned by a single spammer, cannot address the spam issue entirely. This is because the spammer can still send bulk messages through one single account. For this approach to be effective, a centralized mediator is essential. That is why such a solution would not fit the p2p environments where no centralized control exists.

P2P Systems

What about spam prevention in p2p messaging platforms? There are two techniques, namely Proof of Work8 deployed by Whisper9 and Peer scoring6 method (namely reputation-based approach) adopted by LibP2P. However, each of these solutions has its own shortcomings for real-life use-cases as explained below.

Proof of work

The idea behind the Proof Of Work i.e., POW8 is to make messaging a computationally costly operation hence lowering the messaging rate of all the peers including the spammers. In specific, the message publisher has to solve a puzzle and the puzzle is to find a nonce such that the hash of the message concatenated with the nonce has at least z leading zeros. z is known as the difficulty of the puzzle. Since the hash function is one-way, peers have to brute-force to find a nonce. Hashing is a computationally-heavy operation so is the brute-force. While solving the puzzle is computationally expensive, it is comparatively cheap to verify the solution.

POW is also used as the underlying mining algorithm in Ethereum and Bitcoin blockchain. There, the goal is to contain the mining speed and allow the decentralized network to come to a consensus, or agree on things like account balances and the order of transactions.

While the use of POW makes perfect sense in Ethereum / Bitcoin blockchain, it shows practical issues in heterogeneous p2p messaging systems with resource-restricted peers. Some peers won't be able to carry the designated computation and will be effectively excluded. Such exclusion showed to be practically an issue in applications like Status, which used to rely on POW for spam-protection, to the extent that the difficulty level had to be set close to zero.

Peer Scoring

The peer scoring method6 that is utilized by libp2p is to limit the number of messages issued by a peer in connection to another peer. That is each peer monitors all the peers to which it is directly connected and adjusts their messaging quota i.e., to route or not route their messages depending on their past activities. For example, if a peer detects its neighbor is sending more than x messages per month, can drop its quota to z.x where z is less than one. The shortcoming of this solution is that scoring is based on peers' local observations and the concept of the score is defined in relation to one single peer. This leaves room for an attack where a spammer can make connections to k peers in the system and publishes k.(x-1) messages by exploiting all of its k connections. Another attack scenario is through botnets consisting of a large number of e.g., a million bots. The attacker rents a botnet and inserts each of them as a legitimate peer to the network and each can publish x-1 messages per month7.

Economic-Incentive Spam protection

Is this the end of our spam-protection journey? Shall we simply give up and leave spammers be? Certainly not! +Waku RLN-Relay gives us a p2p spam-protection method which:

  • suits p2p systems and does not rely on any central entity.
  • is efficient i.e., with no unreasonable computational, storage, memory, and bandwidth requirement! as such, it fits the network of heterogeneous peers.
  • respects users privacy unlike reputation-based and centralized methods.
  • deploys economic-incentives to contain spammers' activity. Namely, there is a financial sacrifice for those who want to spam the system. How? follow along ...

We devise a general rule to save everyone's life and that is

No one can publish more than M messages per epoch without being financially charged!

We set M to 1 for now, but this can be any arbitrary value. You may be thinking "This is too restrictive! Only one per epoch?". Don't worry, we set the epoch to a reasonable value so that it does not slow down the communication of innocent users but will make the life of spammers harder! Epoch here can be every second, as defined by UTC date-time +-20s.

The remainder of this post is all about the story of how to enforce this limit on each user's messaging rate as well as how to impose the financial cost when the limit gets violated. This brings us to the Rate Limiting Nullifiers and how we integrate this technique into Waku v2 (in specific the Waku Relay protocol) to protect our valuable users against spammers.

Technical Terms

Zero-knowledge proof: Zero-knowledge proof (ZKP)14 allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do the trust-minimized computation that is also privacy-preserving. As a basic example, instead of showing your ID when going to a bar you simply give them proof that you are over 18, without showing the doorman your id. In this write-up, by ZKP we essentially mean zkSNARK15 which is one of the many types of ZKPs.

Threshold Secret Sharing Scheme: (m,n) Threshold secret-sharing is a method by which you can split a secret value s into n pieces in a way that the secret s can be reconstructed by having m pieces (m <= n). The economic-incentive spam protection utilizes a (2,n) secret sharing realized by Shamir Secret Sharing Scheme13.

Overview: Economic-Incentive Spam protection through Rate Limiting Nullifiers

Context: We started the idea of economic-incentive spam protection more than a year ago and conducted a feasibility study to identify blockers and unknowns. The results are published in our prior post. Since then major progress has been made and the prior identified blockers that are listed below are now addressed. Kudos to Barry WhiteHat, Onur Kilic, Koh Wei Jie for all of their hard work, research, and development which made this progress possible.

  • the proof time22 which was initially in the order of minutes ~10 mins and now is almost 0.5 seconds
  • the prover key size21 which was initially ~110MB and now is ~3.9MB
  • the lack of Shamir logic19 which is now implemented and part of the RLN repository4
  • the concern regarding the potential multi-party computation for the trusted setup of zkSNARKs which got resolved20
  • the lack of end-to-end integration that now we made it possible, have it implemented, and are going to present it in this post. New blockers are also sorted out during the e2e integration which we will discuss in the Feasibility and Open Issues section.

Now that you have more context, let's see how the final solution works. The fundamental point is to make it economically costly to send more than your share of messages and to do so in a privacy-preserving and e2e fashion. To do that we have the following components:

  • 1- Group: We manage all the peers inside a large group (later we can split peers into smaller groups, but for now consider only one). The group management is done via a smart contract which is devised for this purpose and is deployed on the Ethereum blockchain.
  • 2- Membership: To be able to send messages and in specific for the published messages to get routed by all the peers, publishing peers have to register to the group. Membership involves setting up public and private key pairs (think of it as the username and password). The private key remains at the user side but the public key becomes a part of the group information on the contract (publicly available) and everyone has access to it. Public keys are not human-generated (like usernames) and instead they are random numbers, as such, they do not reveal any information about the owner (think of public keys as pseudonyms). Registration is mandatory for the users who want to publish a message, however, users who only want to listen to the messages are more than welcome and do not have to register in the group.
  • Membership fee: Membership is not for free! each peer has to lock a certain amount of funds during the registration (this means peers have to have an Ethereum account with sufficient balance for this sake). This fund is safely stored on the contract and remains intact unless the peer attempts to break the rules and publish more than one message per epoch.
  • Zero-knowledge Proof of membership: Do you want your message to get routed to its destination, fine, but you have to prove that you are a member of the group (sorry, no one can escape the registration phase!). Now, you may be thinking that should I attach my public key to my message to prove my membership? Absolutely Not! we said that our solution respects privacy! membership proofs are done in a zero-knowledge manner that is each message will carry cryptographic proof asserting that "the message is generated by one of the current members of the group", so your identity remains private and your anonymity is preserved!
  • Slashing through secret sharing: Till now it does not seem like we can catch spammers, right? yes, you are right! now comes the exciting part, detecting spammers and slashing them. The core idea behind the slashing is that each publishing peer (not routing peers!) has to integrate a secret share of its private key inside the message. The secret share is deterministically computed over the private key and the current epoch. The content of this share is harmless for the peer's privacy (it looks random) unless the peer attempts to publish more than one message in the same epoch hence disclosing more than one secret share of its private key. Indeed two distinct shares of the private key under the same epoch are enough to reconstruct the entire private key. Then what should you do with the recovered private key? hurry up! go to the contract and withdraw the private key and claim its fund and get rich!! Are you thinking what if spammers attach junk values instead of valid secret shares? Of course, that wouldn't be cool! so, there is a zero-knowledge proof for this sake as well where the publishing peer has to prove that the secret shares are generated correctly.

A high-level overview of the economic spam protection is shown in Figure 1.

Flow

In this section, we describe the flow of the economic-incentive spam detection mechanism from the viewpoint of a single peer. An overview of this flow is provided in Figure 3.

Setup and Registration

A peer willing to publish a message is required to register. Registration is moderated through a smart contract deployed on the Ethereum blockchain. The state of the contract contains the list of registered members' public keys. An overview of registration is illustrated in Figure 2.

For the registration, a peer creates a transaction that sends x amount of Ether to the contract. The peer who has the "private key" sk associated with that deposit would be able to withdraw x Ether by providing valid proof. Note that sk is initially only known by the owning peer however it may get exposed to other peers in case the owner attempts spamming the system i.e., sending more than one message per epoch. +The following relation holds between the sk and pk i.e., pk = H(sk) where H denotes a hash function. +

Figure 2: Registration

Maintaining the membership Merkle Tree

The ZKP of membership that we mentioned before relies on the representation of the entire group as a Merkle Tree. The tree construction and maintenance is delegated to the peers (the initial idea was to keep the tree on the chain as part of the contract, however, the cost associated with member deletion and insertion was high and unreasonable, please see Feasibility and Open Issues for more details). As such, each peer needs to build the tree locally and sync itself with the contract updates (peer insertion and deletion) to mirror them on its tree. +Two pieces of information of the tree are important as they enable peers to generate zero-knowledge proofs. One is the root of the tree and the other is the membership proof (or the authentication path). The tree root is public information whereas the membership proof is private data (or more precisely the index of the peer in the tree).

Publishing

In order to publish at a given epoch, each message must carry a proof i.e., a zero-knowledge proof signifying that the publishing peer is a registered member, and has not exceeded the messaging rate at the given epoch.

Recall that the enforcement of the messaging rate was through associating a secret shared version of the peer's sk into the message together with a ZKP that the secret shares are constructed correctly. As for the secret sharing part, the peer generates the following data:

  1. shareX
  2. shareY
  3. nullifier

The pair (shareX, shareY) is the secret shared version of sk that are generated using Shamir secret sharing scheme. Having two such pairs for an identical nullifier results in full disclosure of peer's sk and hence burning the associated deposit. Note that the nullifier is a deterministic value derived from sk and epoch therefore any two messages issued by the same peer (i.e., using the same sk) for the same epoch are guaranteed to have identical nullifiers.

Finally, the peer generates a zero-knowledge proof zkProof asserting the membership of the peer in the group and the correctness of the attached secret share (shareX, shareY) and the nullifier. In order to generate a valid proof, the peer needs to have two private inputs i.e., its sk and its authentication path. Other inputs are the tree root, epoch, and the content of the message.

Privacy Hint: Note that the authentication path of each peer depends on the recent list of members (hence changes when new peers register or leave). As such, it is recommended (and necessary for privacy/anonymity) that the publisher updates her authentication path based on the latest status of the group and attempts the proof using the updated version.

An overview of the publishing procedure is provided in Figure 3.

Routing

Upon the receipt of a message, the routing peer needs to decide whether to route it or not. This decision relies on the following factors:

  1. If the epoch value attached to the message has a non-reasonable gap with the routing peer's current epoch then the message must be dropped (this is to prevent a newly registered peer spamming the system by messaging for all the past epochs).
  2. The message MUST contain valid proof that gets verified by the routing peer. +If the preceding checks are passed successfully, then the message is relayed. In case of an invalid proof, the message is dropped. If spamming is detected, the publishing peer gets slashed (see Spam Detection and Slashing).

An overview of the routing procedure is provided in Figure 3.

Spam Detection and Slashing

In order to enable local spam detection and slashing, routing peers MUST record the nullifier, shareX, and shareY of any incoming message conditioned that it is not spam and has valid proof. To do so, the peer should follow the following steps.

  1. The routing peer first verifies the zkProof and drops the message if not verified.
  2. Otherwise, it checks whether a message with an identical nullifier has already been relayed.
    • a) If such message exists and its shareX and shareY components are different from the incoming message, then slashing takes place (if the shareX and shareY fields of the previously relayed message is identical to the incoming message, then the message is a duplicate and shall be dropped).
    • b) If none found, then the message gets relayed.

An overview of the slashing procedure is provided in Figure 3. +

Figure 3: Publishing, Routing and Slashing workflow.

Feasibility and Open Issues

We've come a long way since a year ago, blockers resolved, now we have implemented it end-to-end. We learned lot and could identify further issues and unknowns some of which are blocking getting to production. The summary of the identified issues are presented below.

Storage overhead per peer

Currently, peers are supposed to maintain the entire tree locally and it imposes storage overhead which is linear in the size of the group (see this issue11 for more details). One way to cope with this is to use the light-node and full-node paradigm in which only a subset of peers who are more resourceful retain the tree whereas the light nodes obtain the necessary information by interacting with the full nodes. Another way to approach this problem is through a more storage efficient method (as described in this research issue12) where peers store a partial view of the tree instead of the entire tree. Keeping the partial view lowers the storage complexity to O(log(N)) where N is the size of the group. There are still unknown unknowns to this solution, as such, it must be studied further to become fully functional.

Cost-effective way of member insertion and deletion

Currently, the cost associated with RLN-Relay membership is around 30 USD10. We aim at finding a more cost-effective approach. Please feel free to share with us your solution ideas in this regard in this issue.

Exceeding the messaging rate via multiple registrations

While the economic-incentive solution has an economic incentive to discourage spamming, we should note that there is still expensive attack(s)23 that a spammer can launch to break the messaging rate limit. That is, the attacker can pay for multiple legit registrations e.g., k, hence being able to publish k messages per epoch. We believe that the higher the membership fee is, the less probable would be such an attack, hence a stronger level of spam-protection can be achieved. Following this argument, the high fee associated with the membership (which we listed above as an open problem) can indeed be contributing to a better protection level.

Conclusion and Future Steps

As discussed in this post, Waku RLN Relay can achieve a privacy-preserving economic spam protection through rate-limiting nullifiers. The idea is to financially discourage peers from publishing more than one message per epoch. In specific, exceeding the messaging rate results in a financial charge. Those who violate this rule are called spammers and their messages are spam. The identification of spammers does not rely on any central entity. Also, the financial punishment of spammers is cryptographically guaranteed. +In this solution, privacy is guaranteed since: 1) Peers do not have to disclose any piece of personally identifiable information in any phase i.e., neither in the registration nor in the messaging phase 2) Peers can prove that they have not exceeded the messaging rate in a zero-knowledge manner and without leaving any trace to their membership accounts. +Furthermore, all the computations are light hence this solution fits the heterogenous p2p messaging system. Note that the zero-knowledge proof parts are handled through zkSNARKs and the benchmarking result can be found in the RLN benchmark report5.

Future steps:

We are still at the PoC level, and the development is in progress. As our future steps,

  • we would like to evaluate the running time associated with the Merkle tree operations. Indeed, the need to locally store Merkle tree on each peer was one of the unknowns discovered during this PoC and yet the concrete benchmarking result in this regard is not available.
  • We would also like to pursue our storage-efficient Merkle Tree maintenance solution in order to lower the storage overhead of peers.
  • In line with the storage optimization, the full-node light-node structure is another path to follow.
  • Another possible improvement is to replace the membership contract with a distributed group management scheme e.g., through distributed hash tables. This is to address possible performance issues that the interaction with the Ethereum blockchain may cause. For example, the registration transactions are subject to delay as they have to be mined before being visible in the state of the membership contract. This means peers have to wait for some time before being able to publish any message.

Acknowledgement

Thanks to Onur Kılıç for his explanation and pointers and for assisting with development and runtime issues. Also thanks to Barry Whitehat for his time and insightful comments. Special thanks to Oskar Thoren for his constructive comments and his guides during the development of this PoC and the writeup of this post.

References


  1. RLN-Relay specification: https://rfc.vac.dev/spec/17/
  2. RLN documentation: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?both
  3. RLN repositories: https://github.com/kilic/RLN and https://github.com/kilic/rlnapp
  4. Waku v2: https://rfc.vac.dev/spec/10/
  5. GossipSub: https://docs.libp2p.io/concepts/publish-subscribe/
  6. Waku Relay: https://rfc.vac.dev/spec/11/
  7. Proof of work: http://www.infosecon.net/workshop/downloads/2004/pdf/clayton.pdf and https://link.springer.com/content/pdf/10.1007/3-540-48071-4_10.pdf
  8. EIP-627 Whisper: https://eips.ethereum.org/EIPS/eip-627
  9. Peer Scoring: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring
  10. Peer scoring security issues: https://github.com/vacp2p/research/issues/44
  11. Zero Knowledge Proof: https://dl.acm.org/doi/abs/10.1145/3335741.3335750 and https://en.wikipedia.org/wiki/Zero-knowledge_proof
  12. zkSNARKs: https://link.springer.com/chapter/10.1007/978-3-662-49896-5_11 and https://coinpare.io/whitepaper/zcash.pdf
  13. Shamir Secret Sharing Scheme: https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing
  14. zkSNARKs proof time: https://github.com/vacp2p/research/issues/7
  15. Prover key size: https://github.com/vacp2p/research/issues/8
  16. The lack of Shamir secret sharing in zkSNARKs: https://github.com/vacp2p/research/issues/10
  17. The MPC required for zkSNARKs trusted setup: https://github.com/vacp2p/research/issues/9
  18. Storage overhead per peer: https://github.com/vacp2p/research/issues/57
  19. Storage-efficient Merkle Tree maintenance: https://github.com/vacp2p/research/pull/54
  20. Cost-effective way of member insertion and deletion: https://github.com/vacp2p/research/issues/56
  21. Attack on the messaging rate: https://github.com/vacp2p/specs/issues/251
  22. RLN Benchmark: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Benchmarks
+ + + + \ No newline at end of file diff --git a/rlog/rln-v3/index.html b/rlog/rln-v3/index.html new file mode 100644 index 00000000..af0612a9 --- /dev/null +++ b/rlog/rln-v3/index.html @@ -0,0 +1,73 @@ + + + + + +RLN-v3: Towards a Flexible and Cost-Efficient Implementation | Vac Research + + + + + + + + + + +
+

RLN-v3: Towards a Flexible and Cost-Efficient Implementation

by
7 min read

Improving on the previous version of RLN by allowing dynamic epoch sizes.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

The premise of RLN-v3 is to have a variable message rate per variable epoch, +which can be explained in the following way:

  • RLN-v1: “Alice can send 1 message per global epoch”

    Practically, this is 1 msg/second

  • RLN-v2: “Alice can send x messages per global epoch”

    Practically, this is x msg/second

  • RLN-v3: “Alice can send x messages within a time interval y chosen by herself. +The funds she has to pay are affected by both the number of messages and the chosen time interval. +Other participants can choose different time intervals fitting their specific needs.

    Practically, this is x msg/y seconds

RLN-v3 allows higher flexibility and ease of payment/stake for users who have more predictable usage patterns and therefore, +more predictable bandwidth usage on a p2p network (Waku, etc.).

For example:

  • An AMM that broadcasts bids, asks, and fills over Waku may require a lot of throughput in the smallest epoch possible and hence may register an RLN-v3 membership of 10000 msg/1 second. +They could do this with RLN-v2, too.
  • Alice, a casual user of a messaging app built on Waku, who messages maybe 3-4 people infrequently during the day, may register an RLN-v3 membership of 100 msg/hour, +which would not be possible in RLN-v2 considering the global epoch was set to 1 second. +With RLN-v2, Alice would have to register with a membership of 1 msg/sec, +which would translate to 3600 msg/hour. This is much higher than her usage and would +result in her overpaying to stake into the membership set.
  • A sync service built over Waku, +whose spec defines that it MUST broadcast a set of public keys every hour, +may register an RLN-v3 membership of 1 msg/hour, +cutting down the costs to enter the membership set earlier.

Theory

Modification to leaves set in the membership Merkle tree

To ensure that a user’s epoch size (user_epoch_limit) is included within their membership we must modify the user’s commitment/leaf in the tree to contain it. +A user’s commitment/leaf in the tree is referred to as a rate_commitment, +which was previously derived from their public key (identity_commitment) +and their variable message rate (user_message_limit).

In RLN-v2:

rate_commitment=poseidon([identity_commitment,user_message_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit])

In RLN-v3:

rate_commitment=poseidon([identity_commitment,user_message_limit,user_epoch_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit, user\_epoch\_limit])

Modification to circuit inputs

To detect double signaling, +we make use of a circuit output nullifier, +which remains the same if a user generates a proof with the same message_id and external_nullifier, +where the external_nullifier and nullifier are defined as:

external_nullifier=poseidon([epoch,rln_identifier])nullifier=poseidon([identity_secret,external_nullifier,message_id])external\_nullifier = poseidon([epoch, rln\_identifier]) \\ nullifier = poseidon([identity\_secret, external\_nullifier, message\_id])

Where:

  • epoch is defined as the Unix epoch timestamp with seconds precision.
  • rln_identifier uniquely identifies an application for which a user submits a proof.
  • identity_secret is the private key of the user.
  • message_id is the sequence number of the user’s message within user_message_limit in an epoch.

In RLN-v2, the global epoch was 1 second, +hence we did not need to perform any assertions to the epoch’s value inside the circuit, +and the validation of the epoch was handled off-circuit (i.e., too old, too large, bad values, etc.).

In RLN-v3, we propose that the epoch that is passed into the circuit +must be a valid multiple of user_epoch_limit +since the user may pass in values of the epoch which do not directly correlate with the user_epoch_limit.

For example:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237 +generates user_message_limit proofs with it, +can increment the epoch by 1, +and generate user_message_limit proofs with it, +thereby allowing them to bypass the message per epoch restriction.

One could say that we could perform this validation outside of the circuit, +but we maintain the user_epoch_limit as a private input to the circuit so that the user is not deanonymized by the anonymity set connected to that user_epoch_limit. +Since user_epoch_limit is kept private, +the verifier does not have access to that value and cannot perform validation on it.

If we ensure that the epoch is a multiple of user_epoch_limit, +we have the following scenarios:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237. +Proof generation fails since the epoch is not a multiple of user_epoch_limit.
  • A user with user_epoch_limit of 120 +passes in an epoch of 240 and +can generate user_message_limit proofs without being slashed.

Since we perform operations on the epoch, we must include it as a circuit input (previously, it was removed from the circuit inputs to RLN-v2).

Therefore, the new circuit inputs are as follows:

// unchanged
private identity_secret
private user_message_limit
private message_id
private pathElements[]
private pathIndices[]
public x // messageHash

// new/changed
private user_epoch_limit
private user_epoch_quotient // epoch/user_epoch_limit to assert within circuit
public epoch
public rln_identifier

The circuit outputs remain the same.

Additional circuit constraints

  1. Since we accept the epoch, user_epoch_quotient, and user_epoch_limit, +we must ensure that the relation between these 3 values is preserved. I.e.:

    epoch==user_epoch_limituser_epoch_quotientepoch == user\_epoch\_limit * user\_epoch\_quotient
  2. To ensure no overflows/underflows occur in the above multiplication, +we must constrain the inputs of epoch, user_epoch_quotient, and user_epoch_limit. +We have assumed 3600 to be the maximum valid size of the user_epoch_quotient.

size(epoch)64 bitssize(user_epoch_limit)12 bitsuser_epoch_limit3600user_epoch_limitepochuser_epoch_quotient<user_epoch_limitsize(epoch) \leq 64\ bits \\ size(user\_epoch\_limit) \leq 12\ bits \\ user\_epoch\_limit \leq 3600 \\ user\_epoch\_limit \leq epoch \\ user\_epoch\_quotient < user\_epoch\_limit

Modifications to external epoch validation (Waku, etc.)

For receivers of an RLN-v3 proof +to detect if a message is too old, we must use the higher bound of the user_epoch_limit, which has been set to 3600. +The trade-off here is that we allow hour-old messages to propagate within the network.

Modifications to double signaling detection scheme (Waku, etc.)

For verifiers of RLN-v1/v2 proofs, +a log of nullifiers seen in the last epoch is maintained, +and if there is a match with a pre-existing nullifier, +double signaling has been detected and the verifier MAY proceed to slash the spamming user.

With the RLN-v3 scheme, +we need to increase the size of the nullifier log used, +which previously cleared itself every second to the higher bound of the user_epoch_limit, which is 3600. +Now, the RLN proof verifier must clear the nullifier log every 3600 seconds to satisfactorily detect double signaling.

The implementation

An implementation of the RLN-v3 scheme in gnark can be found here.

Comments on performance

  • Hardware: Macbook Air M2, 16GB RAM
  • Circuit: RLN-v3
  • Proving system: Groth16
  • Framework: gnark
  • Elliptic curve: bn254 (aka bn128) (not to be confused with the 254-bit Weierstrass curve)
  • Finite field: Prime-order subgroup of the group of points on the bn254 curve
  • Default Merkle tree height: 20
  • Hashing algorithm: Poseidon
  • Merkle tree: Sparse Indexed Merkle Tree

Proving

The proving time for the RLN-v3 circuit is 90ms for a single proof.

Verification

The verification time for the RLN-v3 circuit is 1.7ms for a single proof.

Conclusion

The RLN-v3 scheme introduces a new epoch-based message rate-limiting scheme to the RLN protocol. +It enhances the user's flexibility in setting their message limits and cost-optimizes their stake.

Future work

  • Implementing the RLN-v3 scheme in Zerokit
  • Implementing the RLN-v3 scheme in Waku
  • Formal security analysis of the RLN-v3 scheme

References

+ + + + \ No newline at end of file diff --git a/rlog/rss.xml b/rlog/rss.xml new file mode 100644 index 00000000..ee53e9a3 --- /dev/null +++ b/rlog/rss.xml @@ -0,0 +1,1779 @@ + + + + Vac Research Blog + https://vac.dev/rlog + Vac Research Blog + Mon, 13 May 2024 12:00:00 GMT + https://validator.w3.org/feed/docs/rss2.html + https://github.com/jpmonette/feed + en + + <![CDATA[RLN-v3: Towards a Flexible and Cost-Efficient Implementation]]> + https://vac.dev/rlog/rln-v3 + https://vac.dev/rlog/rln-v3 + Mon, 13 May 2024 12:00:00 GMT + + Improving on the previous version of RLN by allowing dynamic epoch sizes.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

The premise of RLN-v3 is to have a variable message rate per variable epoch, +which can be explained in the following way:

  • RLN-v1: “Alice can send 1 message per global epoch”

    Practically, this is 1 msg/second

  • RLN-v2: “Alice can send x messages per global epoch”

    Practically, this is x msg/second

  • RLN-v3: “Alice can send x messages within a time interval y chosen by herself. +The funds she has to pay are affected by both the number of messages and the chosen time interval. +Other participants can choose different time intervals fitting their specific needs.

    Practically, this is x msg/y seconds

RLN-v3 allows higher flexibility and ease of payment/stake for users who have more predictable usage patterns and therefore, +more predictable bandwidth usage on a p2p network (Waku, etc.).

For example:

  • An AMM that broadcasts bids, asks, and fills over Waku may require a lot of throughput in the smallest epoch possible and hence may register an RLN-v3 membership of 10000 msg/1 second. +They could do this with RLN-v2, too.
  • Alice, a casual user of a messaging app built on Waku, who messages maybe 3-4 people infrequently during the day, may register an RLN-v3 membership of 100 msg/hour, +which would not be possible in RLN-v2 considering the global epoch was set to 1 second. +With RLN-v2, Alice would have to register with a membership of 1 msg/sec, +which would translate to 3600 msg/hour. This is much higher than her usage and would +result in her overpaying to stake into the membership set.
  • A sync service built over Waku, +whose spec defines that it MUST broadcast a set of public keys every hour, +may register an RLN-v3 membership of 1 msg/hour, +cutting down the costs to enter the membership set earlier.

Theory

Modification to leaves set in the membership Merkle tree

To ensure that a user’s epoch size (user_epoch_limit) is included within their membership we must modify the user’s commitment/leaf in the tree to contain it. +A user’s commitment/leaf in the tree is referred to as a rate_commitment, +which was previously derived from their public key (identity_commitment) +and their variable message rate (user_message_limit).

In RLN-v2:

rate_commitment=poseidon([identity_commitment,user_message_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit])

In RLN-v3:

rate_commitment=poseidon([identity_commitment,user_message_limit,user_epoch_limit])rate\_commitment = poseidon([identity\_commitment, user\_message\_limit, user\_epoch\_limit])

Modification to circuit inputs

To detect double signaling, +we make use of a circuit output nullifier, +which remains the same if a user generates a proof with the same message_id and external_nullifier, +where the external_nullifier and nullifier are defined as:

external_nullifier=poseidon([epoch,rln_identifier])nullifier=poseidon([identity_secret,external_nullifier,message_id])external\_nullifier = poseidon([epoch, rln\_identifier]) \\ nullifier = poseidon([identity\_secret, external\_nullifier, message\_id])

Where:

  • epoch is defined as the Unix epoch timestamp with seconds precision.
  • rln_identifier uniquely identifies an application for which a user submits a proof.
  • identity_secret is the private key of the user.
  • message_id is the sequence number of the user’s message within user_message_limit in an epoch.

In RLN-v2, the global epoch was 1 second, +hence we did not need to perform any assertions to the epoch’s value inside the circuit, +and the validation of the epoch was handled off-circuit (i.e., too old, too large, bad values, etc.).

In RLN-v3, we propose that the epoch that is passed into the circuit +must be a valid multiple of user_epoch_limit +since the user may pass in values of the epoch which do not directly correlate with the user_epoch_limit.

For example:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237 +generates user_message_limit proofs with it, +can increment the epoch by 1, +and generate user_message_limit proofs with it, +thereby allowing them to bypass the message per epoch restriction.

One could say that we could perform this validation outside of the circuit, +but we maintain the user_epoch_limit as a private input to the circuit so that the user is not deanonymized by the anonymity set connected to that user_epoch_limit. +Since user_epoch_limit is kept private, +the verifier does not have access to that value and cannot perform validation on it.

If we ensure that the epoch is a multiple of user_epoch_limit, +we have the following scenarios:

  • A user with user_epoch_limit of 120 +passes in an epoch of 237. +Proof generation fails since the epoch is not a multiple of user_epoch_limit.
  • A user with user_epoch_limit of 120 +passes in an epoch of 240 and +can generate user_message_limit proofs without being slashed.

Since we perform operations on the epoch, we must include it as a circuit input (previously, it was removed from the circuit inputs to RLN-v2).

Therefore, the new circuit inputs are as follows:

// unchanged
private identity_secret
private user_message_limit
private message_id
private pathElements[]
private pathIndices[]
public x // messageHash

// new/changed
private user_epoch_limit
private user_epoch_quotient // epoch/user_epoch_limit to assert within circuit
public epoch
public rln_identifier

The circuit outputs remain the same.

Additional circuit constraints

  1. Since we accept the epoch, user_epoch_quotient, and user_epoch_limit, +we must ensure that the relation between these 3 values is preserved. I.e.:

    epoch==user_epoch_limituser_epoch_quotientepoch == user\_epoch\_limit * user\_epoch\_quotient
  2. To ensure no overflows/underflows occur in the above multiplication, +we must constrain the inputs of epoch, user_epoch_quotient, and user_epoch_limit. +We have assumed 3600 to be the maximum valid size of the user_epoch_quotient.

size(epoch)64 bitssize(user_epoch_limit)12 bitsuser_epoch_limit3600user_epoch_limitepochuser_epoch_quotient<user_epoch_limitsize(epoch) \leq 64\ bits \\ size(user\_epoch\_limit) \leq 12\ bits \\ user\_epoch\_limit \leq 3600 \\ user\_epoch\_limit \leq epoch \\ user\_epoch\_quotient < user\_epoch\_limit

Modifications to external epoch validation (Waku, etc.)

For receivers of an RLN-v3 proof +to detect if a message is too old, we must use the higher bound of the user_epoch_limit, which has been set to 3600. +The trade-off here is that we allow hour-old messages to propagate within the network.

Modifications to double signaling detection scheme (Waku, etc.)

For verifiers of RLN-v1/v2 proofs, +a log of nullifiers seen in the last epoch is maintained, +and if there is a match with a pre-existing nullifier, +double signaling has been detected and the verifier MAY proceed to slash the spamming user.

With the RLN-v3 scheme, +we need to increase the size of the nullifier log used, +which previously cleared itself every second to the higher bound of the user_epoch_limit, which is 3600. +Now, the RLN proof verifier must clear the nullifier log every 3600 seconds to satisfactorily detect double signaling.

The implementation

An implementation of the RLN-v3 scheme in gnark can be found here.

Comments on performance

  • Hardware: Macbook Air M2, 16GB RAM
  • Circuit: RLN-v3
  • Proving system: Groth16
  • Framework: gnark
  • Elliptic curve: bn254 (aka bn128) (not to be confused with the 254-bit Weierstrass curve)
  • Finite field: Prime-order subgroup of the group of points on the bn254 curve
  • Default Merkle tree height: 20
  • Hashing algorithm: Poseidon
  • Merkle tree: Sparse Indexed Merkle Tree

Proving

The proving time for the RLN-v3 circuit is 90ms for a single proof.

Verification

The verification time for the RLN-v3 circuit is 1.7ms for a single proof.

Conclusion

The RLN-v3 scheme introduces a new epoch-based message rate-limiting scheme to the RLN protocol. +It enhances the user's flexibility in setting their message limits and cost-optimizes their stake.

Future work

  • Implementing the RLN-v3 scheme in Zerokit
  • Implementing the RLN-v3 scheme in Waku
  • Formal security analysis of the RLN-v3 scheme

References

]]>
+
+ + <![CDATA[Verifying RLN Proofs in Light Clients with Subtrees]]> + https://vac.dev/rlog/rln-light-verifiers + https://vac.dev/rlog/rln-light-verifiers + Fri, 03 May 2024 12:00:00 GMT + + How resource-restricted devices can verify RLN proofs fast and efficiently.

Introduction

Recommended previous reading: Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku.

This post expands upon ideas described in the previous post, +focusing on how resource-restricted devices can verify RLN proofs fast and efficiently.

Previously, it was required to fetch all the memberships from the smart contract, +construct the merkle tree locally, +and derive the merkle root, +which is subsequently used to verify RLN proofs.

This process is not feasible for resource-restricted devices since it involves a lot of RPC calls, computation and fault tolerance. +One cannot expect a mobile phone to fetch all the memberships from the smart contract and construct the merkle tree locally.

Constraints and requirements

An alternative solution to the one proposed in this post is to construct the merkle tree on-chain, +and have the root accessible with a single RPC call. +However, this approach increases gas costs for inserting new memberships and may not be feasible until it is optimized further with batching mechanisms, etc.

The other methods have been explored in more depth here.

Following are the requirements and constraints for the solution proposed in this post:

  1. Cheap membership insertions.
  2. As few RPC calls as possible to reduce startup time.
  3. Merkle root of the tree is available on-chain.
  4. No centralized services to sequence membership insertions.
  5. Map inserted commitments to the block in which they were inserted.

Metrics on sync time for a tree with 2,653 leaves

The following metrics are based on the current implementation of RLN in the Waku gen0 network.

Test bench

  • Hardware: Macbook Air M2, 16GB RAM
  • Network: 120 Megabits/sec
  • Nwaku commit: e61e4ff
  • RLN membership set contract: 0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4
  • Deployed block number: 4,230,716
  • RLN Membership set depth: 20
  • Hash function: PoseidonT3 (which is a gas guzzler)
  • Max size of the membership set: 2^20 = 1,048,576 leaves

Metrics

  • Time to sync the whole tree: 4 minutes
  • RPC calls: 702
  • Number of leaves: 2,653

One can argue that the time to sync the tree at the current state is not that bad. +However, the number of RPC calls is a concern, +which scales linearly with the number of blocks since the contract was deployed +This is because the implementation fetches all events from the contract, +chunking 2,000 blocks at a time. +This is done to avoid hitting the block limit of 10,000 events per call, +which is a limitation of popular RPC providers.

Proposed solution

From a theoretical perspective, +one could construct the merkle tree on-chain, +in a view call, in-memory. +However, this is not feasible due to the gas costs associated with it.

To compute the root of a Merkle tree with 2202^{20} leaves it costs approximately 2 billion gas. +With Infura and Alchemy capping the gas limit to 350M and 550M gas respectively, +it is not possible to compute the root of the tree in a single call.

Acknowledging that Polygon Miden and Penumbra both make use of a tiered commitment tree, +we propose a similar approach for RLN.

A tiered commitment tree is a tree which is sharded into multiple smaller subtrees, +each of which is a tree in itself. +This allows scaling in terms of the number of leaves, +as well as reducing state bloat by just storing the root of a subtree when it is full instead of all its leaves.

Here, the question arises: +What is the maximum number of leaves in a subtree with which the root can be computed in a single call?

It costs approximately 217M gas to compute the root of a Merkle tree with 2102^{10} leaves.

This is a feasible number for a single call, +and hence we propose a tiered commitment tree with a maximum of 2102^{10} leaves in a subtree and the number of subtrees is 2102^{10}. +Therefore, the maximum number of leaves in the tree is 2202^{20} (the same as the current implementation).

img

Insertion

When a commitment is inserted into the tree it is first inserted into the first subtree. +When the first subtree is full the next insertions go into the second subtree and so on.

Syncing

When syncing the tree, +one only needs to fetch the roots of the subtrees. +The root of the full tree can be computed in-memory or on-chain.

This allows us to derive the following relation:

number_of_rpc_calls=number_of_filled_subtrees+1number\_of\_rpc\_calls = number\_of\_filled\_subtrees + 1

This is a significant improvement over the current implementation, +which requires fetching all the memberships from the smart contract.

Gas costs

The gas costs for inserting a commitment into the tree are the same as the current implementation except it consists of an extra SSTORE operation to store the shardIndex of the commitment.

Events

The events emitted by the contract are the same as the current implementation, +appending the shardIndex of the commitment.

Proof of concept

A proof of concept implementation of the tiered commitment tree is available here, +and is deployed on Sepolia at 0xE7987c70B54Ff32f0D5CBbAA8c8Fc1cAf632b9A5.

It is compatible with the current implementation of the RLN verifier.

Future work

  1. Optimize the gas costs of the tiered commitment tree.
  2. Explore using different number of leaves under a given node in the tree (currently set to 2).

Conclusion

The tiered commitment tree is a promising approach to reduce the number of RPC calls required to sync the tree and reduce the gas costs associated with computing the root of the tree. +Consequently, it allows for a more scalable and efficient RLN verifier.

References

]]>
+
+ + <![CDATA[Strengthening Anonymous DoS Prevention with Rate Limiting Nullifiers in Waku]]> + https://vac.dev/rlog/rln-anonymous-dos-prevention + https://vac.dev/rlog/rln-anonymous-dos-prevention + Tue, 07 Nov 2023 12:00:00 GMT + + Rate Limiting Nullifiers in practice, applied to an anonymous p2p network, like Waku.

Introduction

Rate Limiting Nullifier (RLN) is a zero-knowledge gadget that allows users to prove 2 pieces of information,

  1. They belong to a permissioned membership set
  2. Their rate of signaling abides by a fixed number that has been previously declared

The "membership set" introduced above, is in the form of a sparse, indexed merkle tree. +This membership set can be maintained on-chain, off-chain or as a hybrid depending on the network's storage costs. +Waku makes use of a hybrid membership set, +where insertions are tracked in a smart contract. +In addition, each Waku node maintains a local copy of the tree, +which is updated upon each insertion.

Users register themselves with a hash of a locally generated secret, +which is then inserted into the tree at the next available index. +After having registered, users can prove their membership by proving their knowledge of the pre-image of the respective leaf in the tree. +The leaf hashes are also referred to as commitments of the respective users. +The actual proof is done by a Merkle Inclusion Proof, which is a type of ZK proof.

The circuit ensures that the user's secret does indeed hash to a leaf in the tree, +and that the provided Merkle proof is valid.

After a User generates this Merkle proof, +they can transmit it to other users, +who can verify the proof. +Including a message's hash within the proof generation, +additionally guarantees integrity of that message.

A malicious user could generate multiple proofs per epoch. +they generate multiple proofs per epoch. +However, when multiple proofs are generated per epoch, +the malicious user's secret is exposed, which strongly disincentivizes this attack. +This mechanism is further described in malicious User secret interpolation mechanism

Note: This blog post describes rln-v1, which excludes the range check in favor of a global rate limit for all users, +which is once per time window. This version is currently in use in waku-rln-relay.

RLN Protocol parameters

Given below is the set of cryptographic primitives, +and constants that are used in the RLN protocol.

  1. Proving System: groth16
  2. Elliptic Curve: bn254 (aka bn128) (not to be confused with the 254 bit Weierstrass curve)
  3. Finite Field: Prime-order subgroup of the group of points on the bn254 curve
  4. Default Merkle Tree Height: 20
  5. Hashing algorithm: Poseidon
  6. Merkle Tree: Sparse Indexed Merkle Tree
  7. Messages per epoch: 1
  8. Epoch duration: 10 seconds

Malicious User secret interpolation mechanism

note: all the parameters mentioned below are elements in the finite field mentioned above.

The private inputs to the circuit are as follows: -

identitySecret: the randomly generated secret of the user
identityPathIndex: the index of the commitment derived from the secret
pathElements: elements included in the path to the index of the commitment

Following are the public inputs to the circuit -

x: hash of the signal to the finite field
rlnIdentifier: application-specific identifier which this proof is being generated for
epoch: the timestamp which this proof is being generated for

The outputs of the circuit are as follows: -

y: result of Shamir's secret sharing calculation
root: root of the Merkle tree obtained after applying the inclusion proof
nullifier: uniquely identifies a message, derived from rlnIdentifier, epoch, and the user's secret

With the above data in mind, following is the circuit pseudocode -

identityCommitment = Poseidon([identitySecret])
root = MerkleInclusionProof(identityCommitment, identityPathIndex, pathElements)
externalNullifier = Poseidon([epoch, rlnIdentifier])
a1 = Poseidon([identitySecret, externalNullifier])
y = identitySecret + a1 * x
nullifier = Poseidon([a1])

To interpolate the secret of a user who has sent multiple signals during the same epoch to the same rln-based application, we may make use of the following formula -

a1=(y1y2)(x1x2)a_1 = {(y_1 - y_2) \over (x_1 - x_2)}

where x1x_1, y1y_1 and x2x_2, y2y_2 are shares from different messages

subsequently, we may use one pair of the shares, x1x_1 and y1y_1 to obtain the identitySecret

identitySecret=y1a1xidentitySecret = y_1 - a_1 * x

This enables RLN to be used for rate limiting with a global limit. For arbitrary limits, +please refer to an article written by @curryrasul, rln-v2.

Waku's problem with DoS

In a decentralized, privacy focused messaging system like Waku, +Denial of Service (DoS) vulnerabilities are very common, and must be addressed to promote network scale and optimal bandwidth utilization.

DoS prevention with user metadata

There are a couple of ways a user can be rate-limited, either -

  1. IP Logging
  2. KYC Logging

Both IP and KYC logging prevent systems from being truly anonymous, and hence, cannot be used as a valid DoS prevention mechanism for Waku.

RLN can be used as an alternative, which provides the best of both worlds, i.e a permissioned membership set, as well as anonymous signaling. +However, we are bound by k-anonymity rules of the membership set.

Waku-RLN-Relay is a libp2p pubsub validator that verifies if a proof attached to a given message is valid. +In case the proof is valid, the message is relayed.

Performance analysis

Test bench specs: AMD EPYC 7502P 32-Core, 4x32GB DDR4 Reg.ECC Memory

This simulation was conducted by @alrevuelta, and is described in more detail here.

The simulation included 100 waku nodes running in parallel.

Proof generation times - +

img

Proof verification times - +

img

A spammer node publishes 3000 msg/epoch, which is detected by all connected nodes, and subsequently disconnect to prevent further spam - +

img

Security analysis

Barbulescu and Duquesne +conclude that that the bn254 curve has only 100 bits of security. +Since the bn254 curve has a small embedding degree, +it is vulnerable to the MOV attack. +However, the MOV attack is only applicable to pairings, +and not to the elliptic curve itself. +It is acceptable to use the bn254 curve for RLN, +since the circuit does not make use of pairings.

An analysis on the number of rounds in the Poseidon hash function was done, +which concluded that the hashing rounds should not be reduced,

The smart contracts have not been audited, and are not recommended for real world deployments yet.

Storage analysis

commitment_size=32 bytestree_height=20total_leaves=220max_tree_size=total_leavescommitment_sizemax_tree_size=22032=33,554,432max_tree_size=33.55 megabytescommitment\_size = 32\ bytes \\ tree\_height =20 \\ total\_leaves = 2^{20} \\ max\_tree\_size = total\_leaves * commitment\_size \\ max\_tree\_size = 2^{20} * 32 = 33,554,432 \\ ∴max\_tree\_size = 33.55\ megabytes

The storage overhead introduced by RLN is minimal. +RLN only requires 34 megabytes of storage, which poses no problem on most end-user hardware, with the exception of IoT/microcontrollers. +Still, we are working on further optimizations allowing proof generation without having to store the full tree.

The bare minimum requirements to run RLN

With proof generation time in sub-second latency, along with low storage overhead for the tree, +it is possible for end users to generate and verify RLN proofs on a modern smartphone.

Following is a demo provided by @rramos that demonstrates +waku-rln-relay used in react native.

Warning: The react native sdk will be deprecated soon, and the above demo should serve as a PoC for RLN on mobiles

RLN usage guide

Zerokit implements api's that allow users to handle operations to the tree, +as well as generate/verify RLN proofs.

Our main implementation of RLN can be accessed via this Rust crate, +which is documented here. +It can used in other langugages via the FFI API, which is documented here. +The usage of RLN in Waku is detailed in our RLN Implementers guide, +which provides step-by-step instructions on how to run Waku-RLN-Relay.

Following is a diagram that will help understand the dependency tree -

rln-dep-tree

Future work

  • Optimizations to zerokit for proof generation time.
  • Incrementing tree depth from 20 to 32, to allow more memberships.
  • Optimizations to the smart contract.
  • An ability to signal validity of a message in different time windows.
  • Usage of proving systems other than Groth16.

References

]]>
+
+ + <![CDATA[GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks]]> + https://vac.dev/rlog/GossipSub Improvements + https://vac.dev/rlog/GossipSub Improvements + Mon, 06 Nov 2023 12:00:00 GMT + + GossipSub Improvements: Evolution of Overlay Design and Message Dissemination in Unstructured P2P Networks

Motivitation

We have been recently working on analyzing and improving the performance of the GossipSub protocol for large messages, +as in the case of Ethereum Improvement Proposal EIP-4844. +This work led to a comprehensive study of unstructured P2P networks. +The intention was to identify the best practices that can serve as guidelines for performance improvement and scalability of P2P networks.

Introduction

Nodes in an unstructured p2p network form self-organizing overlay(s) on top of the IP infrastructure to facilitate different services like information dissemination, +query propagation, file sharing, etc. The overlay(s) can be as optimal as a tree-like structure or as enforcing as a fully connected mesh.

Due to peer autonomy and a trustless computing environment, some peers may deviate from the expected operation or even leave the network. +At the same time, the underlying IP layer is unreliable.

Therefore, tree-like overlays are not best suited for reliable information propagation. +Moreover, tree-based solutions usually result in significantly higher message dissemination latency due to suboptimal branches.

Flooding-based solutions, on the other hand, result in maximum resilience against adversaries and achieve minimal message dissemination latency because the message propagates through all (including the optimal) paths. +Redundant transmissions help maintain the integrity and security of the network in the presence of adversaries and high node failure but significantly increase network-wide bandwidth utilization, cramming the bottleneck links.

An efficient alternative is to lower the number of redundant transmissions by D-regular broadcasting, where a peer will likely receive (or relay) a message from up to DD random peers. +Publishing through a D-regular overlay triggers approximately N×DN \times D transmissions. +Reducing DD reduces the redundant transmissions but compromises reachability and latency. +Sharing metadata through a K-regular overlay (where K>DK > D) allows nodes to pull missing messages.

GossipSub [1] benefits from full-message (D-regular) and metadata-only (k-regular) overlays. +Alternatively, a metadata-only overlay can be used, requiring a pull-based operation that significantly minimizes bandwidth utilization at the cost of increased latency.

Striking the right balance between parameters like D,KD, K, pull-based operation, etc., can yield application-specific performance tuning, but scalability remains a problem.

At the same time, many other aspects can significantly contribute to the network's performance and scalability. +One option is to realize peers' suitability and continuously changing capabilities while forming overlays.

For instance, a low-bandwidth link near a publisher can significantly demean the entire network's performance. +Reshuffling of peering links according to the changing network conditions can lead to superior performance.

Laying off additional responsibilities to more capable nodes (super nodes) can alleviate peer cramming, but it makes the network susceptible to adversaries/peer churn. +Grouping multiple super nodes to form virtual node(s) can solve this problem.

Similarly, flat (single-tier) overlays cannot address the routing needs in large (geographically dispersed) networks.

Hierarchical (Multi-tier) overlays with different intra/inter-overlay routing solutions can better address these needs. +Moreover, using message aggregation schemes for grouping multiple messages can save bandwidth and provide better resilience against adversaries/peer churn.

This article's primary objective is to investigate the possible choices that can empower an unstructured P2P network to achieve superior performance for the broadest set of applications. +We look into different constraints imposed by application-specific needs (performance goals) and investigate various choices that can augment the network's performance. +We explore overlay designs/freshness, peer selection approaches, message-relaying mechanisms, and resilience against adversaries/peer churn. +We consider GossipSub a baseline protocol to explore various possibilities and decisively commit to the ones demonstrating superior performance. +We also discuss the current state and, where applicable, propose a strategic plan for embedding new features to the GossipSub protocol.

GOAL1: Low Latency Operation

Different applications, like blockchain, streaming, etc., impose strict time bounds on network-wide message dissemination latency. +A message delivered after the imposed time bounds is considered as dropped. +An early message delivery in applications like live streaming can further enhance the viewing quality.

The properties and nature of the overlay network topology significantly impact the performance of services and applications executed on top of them. +Studying and devising mechanisms for better overlay design and message dissemination is paramount to achieving superior performance.

Interestingly, shortest-path message delivery trees have many limitations:

1) Changing network dynamics requires a quicker and continuous readjustment of the multicast tree. +2) The presence of resource-constrained (bandwidth/compute, etc.) nodes in the overlay can result in congestion. +3) Node failure can result in partitions, making many segments unreachable. +4) Assuring a shortest-path tree-like structure requires a detailed view of the underlying (and continuously changing) network topology.

Solutions involve creating multiple random trees to add redundancy [2]. +Alternatives involve building an overlay mesh and forwarding messages through the multicast delivery tree (eager push).

Metadata is shared through the overlay links so that the nodes can ask for missing messages (lazy push or pull-based operation) through the overlay links. +New nodes are added from the overlay on node failure, but it requires non-faulty node selection.

GossipSub uses eager push (through overlay mesh) and lazy push (through IWANT messages).

The mesh degree DLowDDHighD_{Low} \leq D \leq D_{High} is crucial in deciding message dissemination latency. +A smaller value for DD results in higher latency due to increased rounds, whereas a higher DD reduces latency on the cost of increased bandwidth. +At the same time, keeping DD independent of the growing network size (NN) may increase network-wide message dissemination latency. +Adjusting DD with NN maintains similar latency on the cost of increased workload for peers. +Authors in [3] suggest only a logarithmic increase in DD to maintain a manageable workload for peers. +In [4], it is reported that the average mesh degree should not exceed Davg=ln(N)+CD_{avg} = \ln(N) + C for an optimal operation, +where CC is a small constant.

Moreover, quicker shuffling of peers results in better performance in the presence of resource-constrained nodes or node failure [4].

GOAL2: Considering Heterogeneity In Overlay Design

Random peering connections in P2P overlays represent a stochastic process. It is inherently difficult to precisely model the performance of such systems. +Most of the research on P2P networks provides simulation results assuming nodes with similar capabilities. +The aspect of dissimilar capabilities and resource-constrained nodes is less explored.

It is discussed in GOAL1 that overlay mesh results in better performance if DavgD_{avg} does not exceed ln(N)+C\ln(N) + C. +Enforcing all the nodes to have approximately ln(N)+C\ln(N) + C peers makes resource-rich nodes under-utilized, while resource-constrained nodes are overloaded. +At the same time, connecting high-bandwidth nodes through a low-bandwidth node undermines the network's performance. +Ideally, the workload on any node should not exceed its available resources. +A better solution involves a two-phased operation:

  1. Every node computes its available bandwidth and selects a node degree DD proportional to its available bandwidth [4]. +Different bandwidth estimation approaches are suggested in literature [5,6]. +Simple bandwidth estimation approaches like variable packet size probing [6] yield similar results with less complexity. +It is also worth mentioning that many nodes may want to allocate only a capped share of their bandwidth to the network. +Lowering DD according to the available bandwidth can still prove helpful. +Additionally, bandwidth preservation at the transport layer through approaches like µTP can be useful. +To further conform to the suggested mesh-degree average DavgD_{avg}, every node tries achieving this average within its neighborhood, resulting in an overall similar DavgD_{avg}.

  2. From the available local view, every node tries connecting peers with the lowest latency until DD connections are made. +We suggest referring to the peering solution discussed in GOAL5 to avoid network partitioning.

The current GossipSub design considers homogeneous peers, and every node tries maintaining DLowDDHighD_{Low} \leq D \leq D_{High} connections.

GOAL3: Bandwidth Optimization

Redundant message transmissions are essential for handling adversaries/node failure. However, these transmissions result in traffic bursts, cramming many overlay links. +This not only adds to the network-wide message dissemination latency but a significant share of the network's bandwidth is wasted on (usually) unnecessary transmissions. +It is essential to explore solutions that can minimize the number of redundant transmissions while assuring resilience against node failures.

Many efforts have been made to minimize the impact of redundant transmissions. +These solutions include multicast delivery trees, metadata sharing to enable pull-based operation, in-network information caching, etc. [7,8]. +GossipSub employs a hybrid of eager push (message dissemination through the overlay) and lazy push (a pull-based operation by the nodes requiring information through IWANT messages).

A better alternative to simple redundant transmission is to use message aggregation [9,10,11] for the GossipSub protocol. +As a result, redundant message transmissions can serve as a critical advantage of the GossipSub protocol. +Suppose that we have three equal-length messages x1,x2,x3x1, x2, x3. Assuming an XOR coding function, +we know two trivial properties: x1x2x2=x1x1 \oplus x2 \oplus x2 = x1 and x1=x1x2x2\vert x1 \vert = \vert x1 \oplus x2 \oplus x2 \vert.

This implies that instead of sending messages individually, we can encode and transmit composite message(s) to the network. +The receiver can reconstruct the original message from encoded segments. +As a result, fewer transmissions are sufficient for sending more messages to the network.

However, sharing linear combinations of messages requires organizing messages in intervals, +and devising techniques to identify all messages belonging to each interval. +In addition, combining messages from different publishers requires more complex arrangements, +involving embedding publisher/message IDs, delayed forwarding (to accommodate more messages), and mechanisms to ensure the decoding of messages at all peers. +Careful application-specific need analysis can help decide the benefits against the added complexity.

GOAL4: Handling Large Messages

Many applications require transferring large messages for their successful operation. For instance, database/blockchain transactions [12]. +This introduces two challenges:

1) Redundant large message transmissions result in severe network congestion. +2) Message transmissions follow a store/forward process at all peers, which is inefficient in the case of large messages.

The above-mentioned challenges result in a noticeable increase in message dissemination latency and bandwidth wastage. +Most of the work done for handling large messages involves curtailing redundant transmissions using multicast delivery trees, +reducing the number of fanout nodes, employing in-network message caching, pull-based operation, etc.

Approaches like message aggregation also prove helpful in minimizing bandwidth wastage.

Our recent work on GossipSub improvements (still a work in progress) suggests the following solutions to deal with large message transmissions:

  1. Using IDontWant message proposal [13] and staggered sending.

    IDontWant message helps curtail redundant transmissions by letting other peers know we have already received the message. +Staggered sending enables relaying the message to a short subset of peers in each round. +We argue that simultaneously relaying a message to all peers hampers the effectiveness of the IDontWant message. +Therefore, using the IDontWant message with staggered sending can yield better results by allowing timely reception and processing of IDontWant messages.

  2. Message transmissions follow a store/forward process at all peers that is inefficient in the case of large messages. +We can parallelize message transmission by partitioning large messages into smaller fragments, letting intermediate peers relay these fragments as soon as they receive them.

GOAL5: Scalability

P2P networks are inherently scalable because every incoming node brings in bandwidth and compute resources. +In other words, we can keep adding nodes to the network as long as every incoming node brings at-least R×DR \times D bandwidth, +where RR is average data arrival rate. +It is worth mentioning that network-wide message dissemination requires at-least logD(N)\lceil \log_D (N) \rceil hops. +Therefore, increasing network size increases message dissemination latency, assuming D is independent of the network size.

Additionally, problems like peer churn, adversaries, heterogeneity, distributed operation, etc., significantly hamper the network's performance. +Most efforts for bringing scalability to the P2P systems have focused on curtailing redundant transmissions and flat overlay adjustments. +Hierarchical overlay designs, on the other hand, are less explored.

Placing a logical structure in unstructured P2P systems can help scale P2P networks.

One possible solution is to use a hierarchical overlay inspired by the approaches [14,15,16]. +An abstract operation of such overlay design is provided below:

  1. Clustering nodes based on locality, assuming that such peers will have relatively lower intra-cluster latency and higher bandwidth. +For this purpose, every node tries connecting peers with the lowest latency until DD connections are made or the cluster limit is reached.

  2. A small subset of nodes having the highest bandwidth and compute resources is selected from each cluster. +These super nodes form a fully connected mesh and jointly act as a virtual node, +mitigating the problem of peer churn among super nodes.

  3. Virtual nodes form a fully connected mesh to construct a hierarchical overlay. +Each virtual node is essentially a collection of super nodes; +a link to any of the constituent super nodes represents a link to the virtual node.

  4. One possible idea is to use GossipSub for intra-cluster message dissemination and FloodSub for inter-cluster message dissemination.

Summary

Overlay acts as a virtual backbone for a P2P network. A flat overlay is more straightforward and allows effortless readjustment to application needs. +On the other hand, a hierarchical overlay can bring scalability at the cost of increased complexity. +Regardless of the overlay design, a continuous readjustment to appropriate peering links is essential for superior performance. +At the same time, bandwidth preservation (through message aggregation, caching at strategic locations, metadata sharing, pull-based operation, etc.) can help minimize latency. +However, problems like peer churn and in-network adversaries can be best alleviated through balanced redundant coverage, and frequent reshuffling of the peering links.

References

  • [1] D. Vyzovitis, Y. Napora, D. McCormick, D. Dias, and Y. Psaras, “Gossipsub: Attack-resilient message propagation in the filecoin and eth2. 0 networks,” arXiv preprint arXiv:2007.02754, 2020. Retrieved from https://arxiv.org/pdf/2007.02754.pdf
  • [2] M. Matos, V. Schiavoni, P. Felber, R. Oliveira, and E. Riviere, “Brisa: Combining efficiency and reliability in epidemic data dissemination,” in 2012 IEEE 26th International Parallel and Distributed Processing Symposium. IEEE, 2012, pp. 983–994. Retrieved from https://ieeexplore.ieee.org/abstract/document/6267905
  • [3] P. T. Eugster, R. Guerraoui, A. M. Kermarrec, and L. Massouli, “Epidemic information dissemination in distributed systems,” IEEE Computer, vol. 37, no. 5, 2004. Retrieved from https://infoscience.epfl.ch/record/83478/files/EugGueKerMas04IEEEComp.pdf
  • [4] D. Frey, “Epidemic protocols: From large scale to big data,” Ph.D. dissertation, Universite De Rennes 1, 2019. Retrieved from https://inria.hal.science/tel-02375909/document
  • [5] M. Jain and C. Dovrolis, “End-to-end available bandwidth: measurement methodology, dynamics, and relation with tcp throughput,” IEEE/ACM Transactions on networking, vol. 11, no. 4, pp. 537–549, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1224454
  • [6] R. Prasad, C. Dovrolis, M. Murray, and K. Claffy, “Bandwidth estimation: metrics, measurement techniques, and tools,” IEEE network, vol. 17, no. 6, pp. 27–35, 2003. Retrieved from https://ieeexplore.ieee.org/abstract/document/1248658
  • [7] D. Kostic, A. Rodriguez, J. Albrecht, and A. Vahdat, “Bullet: High bandwidth data dissemination using an overlay mesh,” in Proceedings of the nineteenth ACM symposium on Operating systems principles, 2003, pp. 282–297. Retrieved from https://dl.acm.org/doi/abs/10.1145/945445.945473
  • [8] V. Pai, K. Kumar, K. Tamilmani, V. Sambamurthy, and A. E. Mohr, “Chainsaw: Eliminating trees from overlay multicast,” in Peer-to-Peer Systems IV: 4th International Workshop, IPTPS 2005, Ithaca, NY, USA, February 24-25, 2005. Revised Selected Papers 4. Springer, 2005, pp. 127–140. Retrieved from https://link.springer.com/chapter/10.1007/11558989_12
  • [9] Y.-D. Bromberg, Q. Dufour, and D. Frey, “Multisource rumor spreading with network coding,” in IEEE INFOCOM 2019-IEEE Conference on Computer Communications. IEEE, 2019, pp. 2359–2367. Retrieved from https://ieeexplore.ieee.org/abstract/document/8737576
  • [10] B. Haeupler, “Analyzing network coding gossip made easy,” in Proceedings of the forty-third annual ACM symposium on Theory of computing, 2011, pp. 293–302. Retrieved from https://dl.acm.org/doi/abs/10.1145/1993636.1993676
  • [11] S. Yu and Z. Li, “Massive data delivery in unstructured peer-to-peer networks with network coding,” in 6th IEEE/ACIS International Conference on Computer and Information Science (ICIS 2007). IEEE, 2007, pp. 592–597. Retrieved from https://ieeexplore.ieee.org/abstract/document/4276446
  • [12] V. Buterin, D. Feist, D. Loerakker, G. Kadianakis, M. Garnett, M. Taiwo, and A. Dietrichs, “Eip-4844: Shard blob transactions scale data-availability of ethereum in a simple, forwards-compatible manner,” 2022. Retrieved from https://eips.ethereum.org/EIPS/eip-4844
  • [13] A. Manning, “Gossipsub extension for epidemic meshes (v1.2.0),” 2022. Retrieved from https://github.com/libp2p/specs/pull/413
  • [14] Z. Duan, C. Tian, M. Zhou, X. Wang, N. Zhang, H. Du, and L. Wang, “Two-layer hybrid peer-to-peer networks,” Peer-to-Peer Networking and Applications, vol. 10, pp. 1304–1322, 2017. Retrieved from https://link.springer.com/article/10.1007/s12083-016-0460-5
  • [15] W. Hao, J. Zeng, X. Dai, J. Xiao, Q. Hua, H. Chen, K.-C. Li, and H. Jin, “Blockp2p: Enabling fast blockchain broadcast with scalable peer-to-peer network topology,” in Green, Pervasive, and Cloud Computing: 14th International Conference, GPC 2019, Uberlandia, Brazil, May 26–28, 2019, Proceedings 14. Springer, 2019, pp. 223–237. Retrieved from https://link.springer.com/chapter/10.1007/978-3-030-19223-5_16
  • [16] H. Qiu, T. Ji, S. Zhao, X. Chen, J. Qi, H. Cui, and S. Wang, “A geography-based p2p overlay network for fast and robust blockchain systems,” IEEE Transactions on Services Computing, 2022. Retrieved from https://ieeexplore.ieee.org/abstract/document/9826458
]]>
+
+ + <![CDATA[Nescience - A zkVM leveraging hiding properties]]> + https://vac.dev/rlog/Nescience-A-zkVM-leveraging-hiding-properties + https://vac.dev/rlog/Nescience-A-zkVM-leveraging-hiding-properties + Mon, 28 Aug 2023 12:00:00 GMT + + Nescience, a privacy-first blockchain zkVM.

Introduction

Nescience is a privacy-first blockchain project that aims to enable private transactions and provide a general-purpose execution environment for classical applications. +The goals include creating a state separation architecture for public/private computation, +designing a versatile virtual machine based on mainstream instruction sets, +creating proofs for private state updates, implementing a kernel-based architecture for correct execution of private functions, +and implementing core DeFi protocols such as AMMs and staking from a privacy perspective.

It intends to create a user experience that is similar to public blockchains, but with additional privacy features that users can leverage at will. +To achieve this goal, Nescience will implement a versatile virtual machine that can be used to implement existing blockchain applications, +while also enabling the development of privacy-centric protocols such as private staking and private DEXs.

To ensure minimal trust assumptions and prevent information leakage, Nescience proposes a proof system that allows users to create proofs for private state updates, +while the verification of the proofs and the execution of the public functions inside the virtual machine can be delegated to an external incentivised prover.

It also aims to implement a seamless interaction between public and private state, enabling composability between contracts, and private and public functions. +Finally, Nescience intends to implement permissive licensing, which means that the source code will be open-source, +and developers will be able to use and modify the code without any restriction.

Our primary objective is the construction of the Zero-Knowledge Virtual Machine (zkVM). This document serves as a detailed exploration of the multifaceted challenges, +potential solutions, and alternatives that lay ahead. Each step is a testament to our commitment to thoroughness; +we systematically test various possibilities and decisively commit to the one that demonstrates paramount performance and utility. +For instance, as we progress towards achieving Goal 2, we are undertaking a rigorous benchmarking of the Nova proof system against its contemporaries. +Should Nova showcase superior performance metrics, we stand ready to integrate it as our proof system of choice. Through such meticulous approaches, +we not only reinforce the foundation of our project but also ensure its scalability and robustness in the ever-evolving landscape of blockchain technology.

Goal 1: Create a State Separation Architecture

The initial goal revolves around crafting a distinctive architecture that segregates public and private computations, +employing an account-based framework for the public state and a UTXO-based structure for the private state.

The UTXO model [1,2], notably utilized in Bitcoin, generates new UTXOs to serve future transactions, +while the account-based paradigm assigns balances to accounts that transactions can modify. +Although the UTXO model bolsters privacy by concealing comprehensive balances, +the pursuit of a dual architecture mandates a meticulous synchronization of these state models, +ensuring that private transactions remain inconspicuous in the wider public network state.

This task is further complicated by the divergent transaction processing methods intrinsic to each model, +necessitating a thoughtful and innovative approach to harmonize their functionality. +To seamlessly bring together the dual architecture, harmonizing the account-based model for public state with the UTXO-based model for private state, +a comprehensive strategy is essential.

The concept of blending an account-based structure with a UTXO-based model for differentiating between public and private states is intriguing. +It seeks to leverage the strengths of both models: the simplicity and directness of the account-based model with the privacy enhancements of the UTXO model.

Here's a breakdown and a potential strategy for harmonizing these models:

Rationale Behind the Dual Architecture:

  • Account-Based Model: This model is intuitive and easy to work with. Every participant has an account, +and transactions directly modify the balances of these accounts. It's conducive for smart contracts and a broad range of applications.

  • UTXO-Based Model: This model treats every transaction as a new output, which can then be used as an input for future transactions. +By not explicitly associating transaction outputs with user identities, it offers a degree of privacy.

Harmonizing the Two Systems:

  1. Translation Layer

    • Role: Interface between UTXO and account-based states.

    • UTXO-to-Account Adapter: When UTXOs are spent, the adapter can translate these into the corresponding account balance modifications. +This could involve creating a temporary 'pseudo-account' that mirrors the +UTXO's attributes.

    • Account-to-UTXO Adapter: When an account wishes to make a private transaction, +it would initiate a process converting a part of its balance to a UTXO, facilitating a privacy transaction.

  2. Unified Identity Management

    • Role: Maintain a unified identity (or address) system that works across both state models, +allowing users to easily manage their public and private states without requiring separate identities.

    • Deterministic Wallets: Use Hierarchical Deterministic (HD) wallets [3,4], enabling users to generate multiple addresses (both UTXO and account-based) from a single seed. +This ensures privacy while keeping management centralized for the user.

  1. State Commitments

    • Role: Use cryptographic commitments to commit to the state of both models. This can help in efficiently validating cross-model transactions.

    • Verkle Trees: Verkle Trees combine Vector Commitment and the KZG polynomial commitment scheme to produce a structure that's efficient in terms of both proofs and verification. +Verkle proofs are considerably small in size (less data to store and transmit), where Transaction and state verifications can be faster due to the smaller proof sizes and computational efficiencies.

    • Mimblewimble-style Aggregation [5]: For UTXOs, techniques similar to those used in Mimblewimble can be used to aggregate transactions, keeping the state compact and enhancing privacy.

  1. Batch Processing & Anonymity Sets

    • Role: Group several UTXO-based private transactions into a single public account-based transaction. +This can provide a level of obfuscation and can make synchronization between the two models more efficient.

    • CoinJoin Technique [6]: As seen in Bitcoin, multiple users can combine their UTXO transactions into one, enhancing privacy.

    • Tornado Cash Principle [7]: For account-based systems wanting to achieve privacy, methods like those used in Tornado Cash can be implemented, +providing zk-SNARKs-based private transactions.

  2. Event Hooks & Smart Contracts

    • Role: Implement event-driven mechanisms that trigger specific actions in one model based on events in the other. +For instance, a private transaction (UTXO-based) can trigger a corresponding public notification or event in the account-based model.

    • Conditional Execution: Smart contracts could be set to execute based on events in the UTXO system. For instance, +a smart contract might release funds (account-based) once a specific UTXO is spent.

    • Privacy Smart Contracts: Using zk-SNARKs or zk-STARKs to bring privacy to the smart contract layer, +allowing for private logic execution.

Challenges and Solutions

  1. Synchronization Overhead

    • Challenge: Combining two distinct transaction models creates an inherent synchronization challenge.

    • State Channels: By allowing transactions to be conducted off-chain between participants, state channels can alleviate synchronization stresses. +Only the final state needs to be settled on-chain, drastically reducing the amount of data and frequency of updates required.

    • Sidechains: These act as auxiliary chains to the main blockchain. Transactions can be processed on the sidechain and then periodically synced with the main chain. +This structure helps reduce the immediate load on the primary system.

    • Checkpointing: Introduce periodic checkpoints where the two systems' states are verified and harmonized. +This can ensure consistency without constant synchronization.

  2. Double Spending

    • Challenge: With two models operating in tandem, there's an increased risk of double-spending attacks.

    • Multi-Signature Transactions: Implementing transactions that require signatures from both systems can prevent unauthorized movements.

    • Cross-Verification Mechanisms: Before finalizing a transaction, it undergoes verification in both UTXO and account-based systems. +If discrepancies arise, the transaction can be halted.

    • Timestamping: By attaching a timestamp to each transaction, it's possible to order them sequentially, making it easier to spot and prevent double spending.

  3. Complexity in User Experience

    • Challenge: The dual model, while powerful, is inherently complex.

    • Abstracted User Interfaces: Design UIs that handle the complexity behind the scenes, +allowing users to make transactions without needing to understand the nuances of the dual model.

    • Guided Tutorials: Offer onboarding tutorials to acquaint users with the system's features, +especially emphasizing when and why they might choose one transaction type over the other.

    • Feedback Systems: Implement systems where users can provide feedback on any complexities or challenges they encounter. +This real-time feedback can be invaluable for iterative design improvements.

  4. Security

    • Challenge: Merging two systems can introduce unforeseen vulnerabilities.

    • Threat Modeling: Regularly conduct threat modeling exercises to anticipate potential attack vectors, +especially those that might exploit the interaction between the two systems.

    • Layered Security Protocols: Beyond regular audits, introduce multiple layers of security checks. +Each layer can act as a fail-safe if a potential threat bypasses another.

    • Decentralized Watchtowers: These are third-party services that monitor the network for malicious activities. +If any suspicious activity is detected, they can take corrective measures or raise alerts.

  5. Gas & Fee Management:

    • Challenge: A dual model can lead to convoluted fee structures.

    • Dynamic Fee Adjustment: Implement algorithms that adjust fees based on network congestion and transaction type. +This can ensure fairness and prevent network abuse.

    • Fee Estimation Tools: Provide tools that can estimate fees before a transaction is initiated. +This helps users understand potential costs upfront.

    • Unified Gas Stations: Design platforms where users can purchase or allocate gas for both transaction types simultaneously, +simplifying the gas acquisition process.

By addressing these challenges head-on with a detailed and systematic approach, it's possible to unlock the full potential of a dual-architecture system, +combining the strengths of both UTXO and account-based models without their standalone limitations.

AspectDetails
Harmony- Advanced VM Development: Design tailored for private smart contracts. - Leverage Established Architectures: Use WASM or RISC-V to harness their versatile and encompassing nature suitable for zero-knowledge applications. - Support for UTXO & Account-Based Models: Enhance adaptability across various blockchain structures.
Challenges- Adaptation Concerns: WASM and RISC-V weren't designed with zero-knowledge proofs as a primary focus, posing integration challenges. - Complexities with Newer Systems: Systems like (Super)Nova, STARKs, and Sangria are relatively nascent, adding another layer of intricacy to the integration. - Optimization Concerns: Ensuring that these systems are optimized for zero-knowledge proofs.
Proposed Solutions- Integration of Nova: Consider Nova's proof system for its potential alignment with project goals. - Comprehensive Testing: Rigorously test and benchmark against alternatives like Halo2, Plonky, and Starky to validate choices. - Poseidon Recursion Technique: To conduct exhaustive performance tests, providing insights into each system's efficiency and scalability.

Goal 2: Virtual Machine Creation

The second goal entails the creation of an advanced virtual machine by leveraging established mainstream instruction sets like WASM or RISC-V. +Alternatively, the objective involves pioneering a new, specialized instruction set meticulously optimized for Zero-Knowledge applications.

This initiative seeks to foster a versatile and efficient environment for executing computations within the privacy-focused context of the project. +Both WASM and RISC-V exhibit adaptability to both UTXO and account-based models due to their encompassing nature as general-purpose instruction set architectures.

WASM, operating as a low-level virtual machine, possesses the capacity to execute code derived from a myriad of high-level programming languages, +and boasts seamless integration across diverse blockchain platforms.

Meanwhile, RISC-V emerges as a versatile option, accommodating both models, and can be seamlessly integrated with secure enclaves like SGX or TEE, +elevating the levels of security and privacy. However, it is crucial to acknowledge that employing WASM or RISC-V might present challenges, +given their original design without specific emphasis on optimizing for Zero-Knowledge Proofs (ZKPs).

Further complexity arises with the consideration of more potent proof systems like (Super)Nova, STARKs, and Sangria, which, +while potentially addressing optimization concerns, necessitate extensive research and testing due to their relatively nascent status within the field. +This accentuates the need for a judicious balance between established options and innovative solutions in pursuit of an architecture harmoniously amalgamating privacy, security, and performance.

The ambition to build a powerful virtual machine tailored to zero-knowledge (ZK) applications is both commendable and intricate. +The combination of two renowned instruction sets, WASM and RISC-V, in tandem with ZK, is an innovation that could redefine privacy standards in blockchain. +Let's dissect the challenges and possibilities inherent in this goal:

  1. Established Mainstream Instruction Sets - WASM and RISC-V

    • Strengths:

      • WASM: Rooted in its ability to execute diverse high-level language codes, its potential for cross-chain compatibility makes it a formidable contender. +Serving as a low-level virtual machine, its role in the blockchain realm is analogous to that of the Java Virtual Machine in the traditional computing landscape.

      • RISC-V: This open-standard instruction set architecture has made waves due to its customizable nature. +Its adaptability to both UTXO and account-based structures coupled with its compatibility with trusted execution environments like SGX and TEE augments its appeal, +especially in domains that prioritize security and privacy.

    • Challenges: Neither WASM nor RISC-V was primarily designed with ZKPs in mind. While they offer flexibility, +they might lack the necessary optimizations for ZK-centric tasks. Adjustments to these architectures might demand intensive R&D efforts.

  1. Pioneering a New, Specialized Instruction Set

    • Strengths: A bespoke instruction set can be meticulously designed from the ground up with ZK in focus, +potentially offering unmatched performance and optimizations tailored to the project's requirements.

    • Challenges: Crafting a new instruction set is a monumental task requiring vast resources, including expertise, time, and capital. +It would also need to garner community trust and support over time.

  1. Contemporary Proof Systems - (Super)Nova, STARKs, Sangria

    • Strengths: These cutting-edge systems, being relatively new, might offer breakthrough cryptographic efficiencies that older systems lack: designed with modern challenges in mind, +they could potentially bridge the gap where WASM and RISC-V might falter in terms of ZKP optimization.

    • Challenges: Their nascent nature implies a dearth of exhaustive testing, peer reviews, and potentially limited community support. +The unknowns associated with these systems could introduce unforeseen vulnerabilities or complexities. +While they could offer optimizations that address challenges presented by WASM and RISC-V, their young status demands rigorous vetting and testing.

Mainstream (WASM, RISC-V)ZK-optimized (New Instruction Set)
Existing ToolingYESNO
Blockchain-focusedNOYES
PerformantDEPENDSYES

Optimization Concerns for WASM and RISC-V:

  • Cryptography Libraries: ZKP applications rely heavily on specific cryptographic primitives. Neither WASM nor RISC-V natively supports all of these primitives. +Thus, a comprehensive library of cryptographic functions, optimized for these platforms, needs to be developed.

  • Parallel Execution: Given the heavy computational demands of ZKPs, leveraging parallel processing capabilities can optimize the time taken. +Both WASM and RISC-V would need modifications to handle parallel execution of ZKP processes efficiently.

  • Memory Management: ZKP computations can sometimes require significant amounts of memory, especially during the proof generation phase. +Fine-tuned memory management mechanisms are essential to prevent bottlenecks.

Emerging ZKP Optimized Systems Considerations:

  • Proof Size: Different systems generate proofs of varying sizes. A smaller proof size is preferable for blockchain applications to save on storage and bandwidth. +The trade-offs between proof size, computational efficiency, and security need to be balanced.

  • Universality: Some systems can support any computational statement (universal), while others might be tailored to specific tasks. +A universal system can be more versatile for diverse applications on the blockchain.

  • Setup Requirements: Certain ZKP systems, like zk-SNARKs, require a trusted setup, which can be a security concern. +Alternatives like zk-STARKs don't have this requirement but come with other trade-offs.

Strategies for Integration:

  • Iterative Development: Given the complexities, an iterative development approach can be beneficial. +Start with a basic integration of WASM or RISC-V for general tasks and gradually introduce specialized ZKP functionalities.

  • Benchmarking: Establish benchmark tests specifically for ZKP operations. This will provide continuous feedback on the performance of the system as modifications are made, ensuring optimization.

  • External Audits & Research: Regular checks from cryptographic experts and collaboration with academic researchers can help in staying updated and ensuring secure implementations.

Goal 3: Proofs Creation and Verification

The process of generating proofs for private state updates is vested in the hands of the user, aligning with our commitment to minimizing trust assumptions and enhancing privacy. +Concurrently, the responsibility of verifying these proofs and executing public functions within the virtual machine can be effectively delegated to an external prover, +a role that is incentivized to operate with utmost honesty and integrity. This intricate balance seeks to safeguard against information leakage, +preserving the confidentiality of private transactions. Integral to this mechanism is the establishment of a robust incentivization framework.

To ensure the prover’s steadfast commitment to performing tasks with honesty, we should introduce a mechanism that facilitates both rewards for sincere behavior and penalties for any deviation from the expected standards. +This two-pronged approach serves as a compelling deterrent against dishonest behavior and fosters an environment of accountability. +In addition to incentivization, a crucial consideration is the economic aspect of verification and execution. +The verification process has been intentionally designed to be more cost-effective than execution.

This strategic approach prevents potential malicious actors from exploiting the system by flooding it with spurious proofs, a scenario that could arise when the costs align favorably. +By maintaining a cost balance that favors verification, we bolster the system’s resilience against fraudulent activities while ensuring its efficiency. +In sum, our multifaceted approach endeavors to strike an intricate equilibrium between user-initiated proof creation, external verification, and incentivization. +This delicate interplay of mechanisms ensures a level of trustworthiness that hinges on transparency, accountability, and economic viability.

As a result, we are poised to cultivate an ecosystem where users’ privacy is preserved, incentives are aligned, +and the overall integrity of the system is fortified against potential adversarial actions. To achieve the goals of user-initiated proof creation, +external verification, incentivization, and cost-effective verification over execution, several options and mechanisms can be employed:

  1. User-Initiated Proof Creation: Users are entrusted with the generation of proofs for private state updates, thus ensuring greater privacy and reducing trust dependencies.

    • Challenges:

      • Maintaining the quality and integrity of the proofs generated by users.

      • Ensuring that users have the tools and knowledge to produce valid proofs.

    • Solutions:

      • Offer extensive documentation, tutorials, and user-friendly tools to streamline the proof-generation process.

      • Implement checks at the verifier's end to ensure the quality of proofs.

  1. External Verification by Provers: An external prover verifies the proofs and executes public functions within the virtual machine.

    • Challenges:

      • Ensuring that the external prover acts honestly.

      • Avoiding centralized points of failure.

    • Solutions:

      • Adopt a decentralized verification approach, with multiple provers cross-verifying each other’s work.

      • Use reputation systems to rank provers based on their past performances, creating a trust hierarchy.

  2. Incentivization Framework: A system that rewards honesty and penalizes dishonest actions, ensuring provers' commitment to the task.

    • Challenges:

      • Determining the right balance of rewards and penalties.

      • Ensuring that the system cannot be gamed for undue advantage.

    • Solutions1:

      • Implement a dynamic reward system that adjusts based on network metrics and provers' performance.

      • Use a staking mechanism where provers need to lock up a certain amount of assets. +Honest behavior earns rewards, while dishonest behavior could lead to loss of staked assets.

  3. Economic Viability through Cost Dynamics: Making verification more cost-effective than execution to deter spamming and malicious attacks.

    • Challenges:

      • Setting the right cost metrics for both verification and execution.

      • Ensuring that genuine users aren’t priced out of the system.

    • Solutions:

      • Use a dynamic pricing model, adjusting costs in real-time based on network demand.

      • Implement gas-like mechanisms to differentiate operation costs and ensure fairness.

  4. Maintaining Trustworthiness: Create a system that's transparent, holds all actors accountable, and is economically sound.

    • Challenges:

      • Keeping the balance where users feel their privacy is intact, while provers feel incentivized.

      • Ensuring the system remains resilient against adversarial attacks.

    • Solutions:

      • Implement layered checks and balances.

      • Foster community involvement, allowing them to participate in decision-making, potentially through a decentralized autonomous organization (DAO).

Each of these options can be combined or customized to suit the specific requirements of your project, striking a balance between user incentives, +cost dynamics, and verification integrity. A thoughtful combination of these mechanisms ensures that the system remains robust, resilient, +and conducive to the objectives of user-initiated proof creation, incentivized verification, and cost- effective validation.

AspectDetails
Design Principle- User Responsibility: Generating proofs for private state updates. - External Prover: Delegated the task of verifying proofs and executing public VM functions.
Trust & Privacy- Minimized Trust Assumptions: Place proof generation in users' hands. - Enhanced Privacy: Ensure confidentiality of private transactions and prevent information leakage.
Incentivization Framework- Rewards: Compensate honest behavior. - Penalties: Deter and penalize dishonest behavior.
Economic Considerations- Verification vs. Execution: Make verification more cost-effective than execution to prevent spurious proofs flooding. - Cost Balance: Strengthen resilience against fraudulent activities and maintain efficiency.
OutcomeAn ecosystem where: - Users' privacy is paramount. - Incentives are appropriately aligned. - The system is robust against adversarial actions.

Goal 4: Kernel-based Architecture Implementation

This goal centers on the establishment of a kernel-based architecture, akin to the model observed in ZEXE, to facilitate the attestation of accurate private function executions. +This innovative approach employs recursion to construct a call stack, which is then validated through iterative recursive computations. +At its core, this technique harnesses a recursive Succinct Non-Interactive Argument of Knowledge (SNARK) mechanism, where each function call’s proof accumulates within the call stack.

The subsequent verification of this stack’s authenticity leverages recursive SNARK validation. +While this method offers robust verification of private function executions, it’s essential to acknowledge its associated intricacies.

The generation of SNARK proofs necessitates a substantial computational effort, which, in turn, may lead to elevated gas fees for users. +Moreover, the iterative recursive computations could potentially exhibit computational expansion as the depth of recursion increases. +This calls for a meticulous balance between the benefits of recursive verification and the resource implications it may entail.

In essence, Goal 4 embodies a pursuit of enhanced verification accuracy through a kernel-based architecture. +By weaving recursion and iterative recursive computations into the fabric of our system, we aim to establish a mechanism that accentuates the trustworthiness of private function executions, +while conscientiously navigating the computational demands that ensue.

To accomplish the goal of implementing a kernel-based architecture for recursive verification of private function executions, +several strategic steps and considerations can be undertaken: recursion handling and depth management.

Recursion Handling
  • Call Stack Management:

    • Implement a data structure to manage the call stack, recording each recursive function call’s details, parameters, and state.
  • Proof Accumulation:

    • Design a mechanism to accumulate proof data for each function call within the call stack. +This includes cryptographic commitments, intermediate results, and cryptographic challenges.

    • Ensure that the accumulated proof data remains secure and tamper-resistant throughout the recursion process.

  • Intermediary SNARK Proofs:

    • Develop an intermediary SNARK proof for each function call’s correctness within the call stack. +This proof should demonstrate that the function executed correctly and produced expected outputs.

    • Ensure that the intermediary SNARK proof for each recursive call can be aggregated and verified together, maintaining the integrity of the entire call stack.

Depth management
  • Depth Limitation:

    • Define a threshold for the maximum allowable recursion depth based on the system’s computational capacity, gas limitations, and performance considerations.

    • Implement a mechanism to prevent further recursion beyond the defined depth limit, safeguarding against excessive computational growth.

  • Graceful Degradation:

    • Design a strategy for graceful degradation when the recursion depth approaches or reaches the defined limit. +This may involve transitioning to alternative execution modes or optimization techniques.

    • Communicate the degradation strategy to users and ensure that the system gracefully handles scenarios where recursion must be curtailed.

  • Resource Monitoring:

    • Develop tools to monitor resource consumption (such as gas usage and computational time) as recursion progresses. +Provide real-time feedback to users about the cost and impact of recursive execution.
  • Dynamic Depth Adjustment:

    • Consider implementing adaptive depth management that dynamically adjusts the recursion depth based on network conditions, transaction fees, and available resources.

    • Utilize algorithms to assess the optimal recursion depth for efficient execution while adhering to gas cost constraints.

  • Fallback Mechanisms:

    • Create fallback mechanisms that activate if the recursion depth limit is reached or if the system encounters resource constraints. +These mechanisms could involve alternative verification methods or delayed execution.
  • User Notifications:

    • Notify users when the recursion depth limit is approaching, enabling them to make informed decisions about the complexity of their transactions and potential resource usage.

Goal 4 underscores the project's ambition to integrate the merits of a kernel-based architecture with recursive verifications to bolster the reliability of private function executions. +While the approach promises robust outcomes, it's pivotal to maneuver through its intricacies with astute strategies, ensuring computational efficiency and economic viability. +By striking this balance, the architecture can realize its full potential in ensuring trustworthy and efficient private function executions.

Goal 5: Seamless Interaction Design

Goal 5 revolves around the meticulous design of a seamless interaction between public and private states within the blockchain ecosystem. +This objective envisions achieving not only composability between contracts but also the harmonious integration of private and public functions.

A notable challenge in this endeavor lies in the intricate interplay between public and private states, +wherein the potential linkage of a private transaction to a public one raises concerns about unintended information leakage.

The essence of this goal entails crafting an architecture that facilitates the dynamic interaction of different states while ensuring that the privacy and confidentiality of private transactions remain unbreached. +This involves the formulation of mechanisms that enable secure composability between contracts, guaranteeing the integrity of interactions across different layers of functionality.

A key focus of this goal is to surmount the challenge of information leakage by implementing robust safeguards. +The solution involves devising strategies to mitigate the risk of revealing private transaction details when connected to corresponding public actions. +By creating a nuanced framework that com- partmentalizes private and public interactions, the architecture aims to uphold privacy while facilitating seamless interoperability.

Goal 5 encapsulates a multifaceted undertaking, calling for the creation of an intricate yet transparent framework that empowers users to confidently engage in both public and private functions, +without compromising the confidentiality of private transactions. The successful realization of this vision hinges on a delicate blend of architectural ingenuity, cryptographic sophistication, and user-centric design.

To achieve seamless interaction between public and private states, composability, and privacy preservation, a combination of solutions and approaches can be employed. +In the table below, a comprehensive list of solutions that address these objectives:

Solution CategoryDescription
Layer 2 SolutionsEmploy zk-Rollups, Optimistic Rollups, and state channels to handle private interactions off-chain and settle them on-chain periodically. Boost scalability and cut transaction costs.
Intermediary Smart ContractsCraft smart contracts as intermediaries for secure public-private interactions. Use these to manage data exchange confidentially.
Decentralized Identity & PseudonymityImplement decentralized identity systems for pseudonymous interactions. Validate identity using cryptographic proofs.
Confidential Sidechains & Cross-ChainSet up confidential sidechains and employ cross-chain protocols to ensure private and composability across blockchains.
Temporal Data StructuresCreate chronological data structures for secure interactions. Utilize cryptographic methods for data integrity and privacy.
Homomorphic Encryption & MPCApply homomorphic encryption and MPC for computations on encrypted data and interactions between state layers.
Commit-Reveal SchemesIntroduce commit-reveal mechanisms for private transactions, revealing data only post necessary public actions.
Auditability & VerifiabilityUse on-chain tools for auditing and verifying interactions. Utilize cryptographic commitments for third-party validation.
Data Fragmentation & ShardingFragment data across shards for private interactions and curtailed data exposure. Bridge shards securely with cryptography.
Ring Signatures & CoinJoinIncorporate ring signatures and CoinJoin protocols to mask transaction details and mix transactions collaboratively.

Goal 6: Integration of DeFi Protocols with a Privacy-Preserving Framework

The primary aim of Goal 6 is to weave key DeFi protocols, such as AMMs and staking, into a user-centric environment that accentuates privacy. +This endeavor comes with inherent challenges, especially considering the heterogeneity of existing DeFi protocols, predominantly built on Ethereum. +These variations in programming languages and VMs exacerbate the quest for interoperability. Furthermore, the success and functionality of DeFi protocols is closely tied to liquidity, +which in turn is influenced by user engagement and the amount of funds locked into the system.

Strategic Roadmap for Goal 6

  1. Pioneering Privacy-Centric DeFi Models: Initiate the development of AMMs and staking solutions that are inherently protective of users' transactional privacy and identity.

  2. Specialized Smart Contracts with Privacy: Architect distinct smart contracts infused with privacy elements, setting the stage for secure user interactions within this new, confidential DeFi landscape.

  3. Optimized User Interfaces: Craft interfaces that resonate with user needs, simplifying the journey through the private DeFi space without compromising on security.

  4. Tackling Interoperability:

    • Deploy advanced bridge technologies and middleware tools to foster efficient data exchanges and guarantee operational harmony across a spectrum of programming paradigms and virtual environments.

    • Design and enforce universal communication guidelines that bridge the privacy-centric DeFi entities with the larger DeFi world seamlessly.

  1. Enhancing and Sustaining Liquidity:

    • Unveil innovative liquidity stimuli and yield farming incentives, compelling users to infuse liquidity into the private DeFi space.

    • Incorporate adaptive liquidity frameworks that continually adjust based on the evolving market demands, ensuring consistent liquidity.

    • Forge robust alliances with other DeFi stalwarts, jointly maximizing liquidity stores and honing sustainable token distribution strategies.

  1. Amplifying Community Engagement: Design and roll out enticing incentive schemes to rally users behind privacy-focused AMMs and staking systems, +thereby nurturing a vibrant, privacy-advocating DeFi community.

Through the integration of these approaches, we aim to achieve Goal 6, providing users with a privacy-focused platform for engaging effortlessly in core DeFi functions such as AMMs and staking, +all while effectively overcoming the obstacles related to interoperability and liquidity concerns.

Summary of the Architecture

In our quest to optimize privacy, we're proposing a Zero-Knowledge Virtual Machine (Zkvm) that harnesses the power of Zero-Knowledge Proofs (ZKPs). +These proofs ensure that while private state data remains undisclosed, public state transitions can still be carried out and subsequently verified by third parties. +This blend of public and private state is envisaged to be achieved through a state tree representing the public state, while the encrypted state leaves stand for the private state. +Each user's private state indicates validity through the absence of a corresponding nullifier. +A nullifier is a unique cryptographic value generated in privacy-preserving blockchain transactions to prevent double-spending, +ensuring that each private transaction is spent only once without revealing its details.

Private functions' execution mandates users to offer a proof underscoring the accurate execution of all encapsulated private calls. +For validating a singular private function call, we're leaning into the kernel-based model inspired by the ZEXE protocol. +Defined as kernel circuits, these functions validate the correct execution of each private function call. +Due to their recursive circuit structure, a succession of private function calls can be executed by calculating proofs in an iterative manner. +Execution-relevant data, like private and public call stacks and additions to the state tree, are incorporated as public inputs.

Our method integrates the verification keys for these functions within a merkle tree. Here's the innovation: a user's ZKP showcases the existence of the verification key in this tree, yet keeps the executed function concealed. +The unique function identifier can be presented as the verification key, with all contracts merkleized for hiding functionalities.

We suggest a nuanced shift from the ZEXE protocol's identity function, which crafts an identity for smart contracts delineating its behavior, access timeframes, and other functionalities. +Instead of the ZEXE protocol's structure, our approach pivots to a method anchored in the +security of a secret combined with the uniqueness from hashing with the contract address. +The underlying rationale is straightforward: the sender, equipped with a unique nonce and salt for the transaction, hashes the secret, payload, nonce, and salt. +This result is then hashed with the contract address for the final value. The hash function's unidirectional nature ensures that the input cannot be deduced easily from its output. +A specific concern, however, is the potential repetition of secret and payload values across transactions, which could jeopardize privacy. +Yet, by embedding the function's hash within the hash of the contract address, users can validate a specific function's execution without divulging the function, navigating this limitation.

Alternative routes do exist: We could employ signature schemes like ECDSA, focusing on uniqueness and authenticity, albeit at the cost of complex key management. +Fully Homomorphic Encryption (FHE) offers another pathway, enabling function execution on encrypted data, or Multi-Party Computation (MPC) which guarantees non-disclosure of function or inputs. +Yet, integrating ZKPs with either FHE or MPC presents a challenge. Combining cryptographic functions like SHA-3 and BLAKE2 can also bolster security and uniqueness. +It's imperative to entertain these alternatives, especially when hashing might not serve large input/output functions effectively or might fall short in guaranteeing uniqueness.

Current State

Our aim is to revolutionize the privacy and security paradigms through Nescience. +As we strive to set milestones and achieve groundbreaking advancements, +our current focus narrows onto the realization of Goal 2 and Goal 3.

Our endeavors to build a powerful virtual machine tailored for Zero-Knowledge applications have led us down the path of rigorous exploration and testing. +We believe that integrating the right proof system is pivotal to our project's success, which brings us to Nova [8]. +In our project journey, we have opted to integrate the Nova proof system, recognizing its potential alignment with our overarching goals. +However, as part of our meticulous approach to innovation and optimization, we acknowledge the need to thoroughly examine Nova’s performance capabilities, +particularly due to its status as a pioneering and relatively unexplored proof system.

This critical evaluation entails a comprehensive process of benchmarking and comparative analysis [9], +pitting Nova against other prominent proof systems in the field, including Halo2 [10], +Plonky2 [11], and Starky [12]. +This ongoing and methodical initiative is designed to ensure a fair and impartial assessment, enabling us to draw meaningful conclusions about Nova’s strengths and limitations in relation to its counterparts. +By leveraging the Poseidon recursion technique, we are poised to conduct an exhaustive performance test that delves into intricate details. +Through this testing framework, we aim to discern whether Nova possesses the potential to outshine its contemporaries in terms of efficiency, scalability, and overall performance. +The outcome of this rigorous evaluation will be pivotal in shaping our strategic decisions moving forward. +Armed with a comprehensive understanding of Nova’s performance metrics vis-à-vis other proof systems, +we can confidently chart a course that maximizes the benefits of our project’s optimization efforts.

Moreover, as we ambitiously pursue the establishment of a robust mechanism for proof creation and verification, our focus remains resolute on preserving user privacy, +incentivizing honest behaviour, and ensuring the cost-effective verification of transactions. +At the heart of this endeavor is our drive to empower users by allowing them the autonomy of generating proofs for private state updates, +thereby reducing dependencies and enhancing privacy. +We would like to actively work on providing comprehensive documentation, user-friendly tools, +and tutorials to aid users in this intricate process.

Parallelly, we're looking into decentralized verification processes, harnessing the strength of multiple external provers that cross-verify each other's work. +Our commitment is further cemented by our efforts to introduce a dynamic reward system that adjusts based on network metrics and prover performance. +This intricate balance, while challenging, aims to fortify our system against potential adversarial actions, aligning incentives, and preserving the overall integrity of the project.

References

[1] Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. Retrieved from https://bitcoin.org/bitcoin.pdf

[2] Sanchez, F. (2021). Cardano’s Extended UTXO accounting model. Retrived from https://iohk.io/en/blog/posts/2021/03/11/cardanos-extended-utxo-accounting-model/

[3] Morgan, D. (2020). HD Wallets Explained: From High Level to Nuts and Bolts. Retrieved from https://medium.com/mycrypto/the-journey-from-mnemonic-phrase-to-address-6c5e86e11e14

[4] Wuille, P. (012). Bitcoin Improvement Proposal (BIP) 44. Retrieved from https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki

[5] Jedusor, T. (2020). Introduction to Mimblewimble and Grin. Retrieved from https://github.com/mimblewimble/grin/blob/master/doc/intro.md

[6] Bitcoin's official wiki overview of the CoinJoin method. Retrieved from https://en.bitcoin.it/wiki/CoinJoin

[7] TornadoCash official Github page. Retrieved from https://github.com/tornadocash/tornado-classic-ui

[8] Kothapalli, A., Setty, S., Tzialla, I. (2021). Nova: Recursive Zero-Knowledge Arguments from Folding Schemes. Retrieved from https://eprint.iacr.org/2021/370

[9] ZKvm Github page. Retrieved from https://github.com/vacp2p/zk-explorations

[10] Electric Coin Company (2020). Explaining Halo 2. Retrieved from https://electriccoin.co/blog/explaining-halo-2/

[11] Polygon Labs (2022). Introducing Plonky2. Retrieved from https://polygon.technology/blog/introducing-plonky2

[12] StarkWare (2021). ethSTARK Documentation. Retrieved from https://eprint.iacr.org/2021/582


  1. Incentive Mechanisms:

    • Token Rewards: Design a token-based reward system where honest provers are compensated with tokens for their verification services. +This incentivizes participation and encourages integrity.

    • Staking and Slashing: Introduce a staking mechanism where provers deposit tokens as collateral. +Dishonest behavior results in slashing (partial or complete loss) of the staked tokens, while honest actions are rewarded.

    • Proof of Work/Proof of Stake: Implement a proof-of-work or proof-of- stake consensus mechanism for verification, +aligning incentives with the blockchain’s broader consensus mechanism.

]]>
+
+ + <![CDATA[Device Pairing in Js-waku and Go-waku]]> + https://vac.dev/rlog/device-pairing-in-js-waku-and-go-waku + https://vac.dev/rlog/device-pairing-in-js-waku-and-go-waku + Mon, 24 Apr 2023 12:00:00 GMT + + Device pairing and secure message exchange using Waku and noise protocol.

As the world becomes increasingly connected through the internet, the need for secure and reliable communication becomes paramount. In this article it is described how the Noise protocol can be used as a key-exchange mechanism for Waku.

Recently, this feature was introduced in js-waku and go-waku, providing a simple API for developers to implement secure communication protocols using the Noise Protocol framework. These open-source libraries provide a solid foundation for building secure and decentralized applications that prioritize data privacy and security.

This functionality is designed to be simple and easy to use, even for developers who are not experts in cryptography. The library offers a clear and concise API that abstracts away the complexity of the Noise Protocol framework and provides an straightforward interface for developers to use. Using this, developers can effortlessly implement secure communication protocols on top of their JavaScript and Go applications, without having to worry about the low-level details of cryptography.

One of the key benefits of using Noise is that it provides end-to-end encryption, which means that the communication between two parties is encrypted from start to finish. This is essential for ensuring the security and privacy of sensitive information

Device Pairing

In today's digital world, device pairing has become an integral part of our lives. Whether it's connecting our smartphones with other computers or web applications, the need for secure device pairing has become more crucial than ever. With the increasing threat of cyber-attacks and data breaches, it's essential to implement secure protocols for device pairing to ensure data privacy and prevent unauthorized access.

To demonstrate how device pairing can be achieved using Waku and Noise, we have examples available at https://examples.waku.org/noise-js/. You can try pairing different devices, such as mobile and desktop, via a web application. This can be done by scanning a QR code or opening a URL that contains the necessary data for a secure handshake.

The process works as follows:

Actors:

  • Alice the initiator
  • Bob the responder
  1. The first step in achieving secure device pairing using Noise and Waku is for Bob generate the pairing information which could be transmitted out-of-band. For this, Bob opens https://examples.waku.org/noise-js/ and a QR code is generated, containing the data required to do the handshake. This pairing QR code is timeboxed, meaning that after 2 minutes, it will become invalid and a new QR code must be generated
  2. Alice scans the QR code using a mobile phone. This will open the app with the QR code parameters initiating the handshake process which is described in 43/WAKU2-DEVICE-PAIRING. These messages are exchanged between two devices over Waku to establish a secure connection. The handshake messages consist of three main parts: the initiator's message, the responder's message, and the final message, which are exchanged to establish a secure connection. While using js-noise, the developer is abstracted of this process, since the messaging happens automatically depending on the actions performed by the actors in the pairing process.
  3. Both Alice and Bob will be asked to verify each other's identity. This is done by confirming if an 8-digits authorization code match in both devices. If both actors confirm that the authorization code is valid, the handshake concludes succesfully
  4. Alice and Bob receive a set of shared keys that can be used to start exchanging encrypted messages. The shared secret keys generated during the handshake process are used to encrypt and decrypt messages sent between the devices. This ensures that the messages exchanged between the devices are secure and cannot be intercepted or modified by an attacker.

The above example demonstrates device pairing using js-waku. Additionally, You can also try building and experimenting with other noise implementations like nwaku, or go-waku, with an example available at https://github.com/waku-org/go-waku/tree/master/examples/noise in which the same flow described before is done with Bob (the receiver) using go-waku instead of js-waku.

Conclusion

With its easy to use API built on top of the Noise Protocol framework and the LibP2P networking stack, if you are a developer looking to implement secure messaging in their applications that are both decentralized and censorship resistant, Waku is definitely an excellent choice worth checking out!

Waku is also Open source with a MIT and APACHEv2 licenses, which means that developers are encouraged to contribute code, report bugs, and suggest improvements to make it even better.

Don't hesitate to try the live example at https://examples.waku.org/noise-js and build your own webapp using https://github.com/waku-org/js-noise, https://github.com/waku-org/js-waku and https://github.com/waku-org/go-waku. This will give you a hands-on experience of implementing secure communication protocols using the Noise Protocol framework in a practical setting. Happy coding!

References

]]>
+
+ + <![CDATA[The Future of Waku Network: Scaling, Incentivization, and Heterogeneity]]> + https://vac.dev/rlog/future-of-waku-network + https://vac.dev/rlog/future-of-waku-network + Mon, 03 Apr 2023 00:00:00 GMT + + Learn how the Waku Network is evolving through scaling, incentivization, and diverse ecosystem development and what the future might look like.

Waku is preparing for production with a focus on the Status Communities use case. In this blog post, we will provide an +overview of recent discussions and research outputs, aiming to give you a better understanding of how the Waku network +may look like in terms of scaling and incentivization.

DOS Mitigation for Status Communities

Waku is actively exploring DOS mitigation mechanisms suitable for Status Communities. While RLN +(Rate Limiting Nullifiers) remains the go-to DOS protection solution due to its privacy-preserving and +censorship-resistant properties, there is still more work to be done. We are excited to collaborate with PSE +(Privacy & Scaling Explorations) in this endeavor. Learn more about their latest progress in this tweet.

A Heterogeneous Waku Network

As we noted in a previous forum post, Waku's protocol +incentivization model needs to be flexible to accommodate various business models. Flexibility ensures that projects +can choose how they want to use Waku based on their specific needs.

Reversing the Incentivization Question

Traditionally, the question of incentivization revolves around how to incentivize operators to run nodes. We'd like to +reframe the question and instead ask, "How do we pay for the infrastructure?"

Waku does not intend to offer a free lunch. +Ethereum's infrastructure is supported by transaction fees and inflation, with validators receiving rewards from both sources. +However, this model does not suit a communication network like Waku. +Users and platforms would not want to pay for every single message they send. Additionally, Waku aims to support instant +ephemeral messages that do not require consensus or long-term storage.

Projects that use Waku to enable user interactions, whether for chat messages, gaming, private DeFi, notifications, or +inter-wallet communication, may have different value extraction models. Some users might provide services for the +project and expect to receive value by running nodes, while others may pay for the product or run infrastructure to +contribute back. Waku aims to support each of these use cases, which means there will be various ways to "pay for the +infrastructure."

In his talk, Oskar addressed two strategies: RLN and service credentials.

RLN and Service Credentials

RLN enables DOS protection across the network in a privacy-preserving and permission-less manner: stake in a contract, +and you can send messages.

Service credentials establish a customer-provider relationship. Users might pay to have messages they are interested in +stored and served by a provider. Alternatively, a community owner could pay a service provider to host their community.

Providers could offer trial or limited free services to Waku users, similar to Slack or Discord. Once a trial is expired or outgrown, +a community owner could pay for more storage or bandwidth, similar to Slack's model. +Alternatively, individual users could contribute financially, akin to Discord's Server Boost, or by sharing their own +resources with their community.

We anticipate witnessing various scenarios across the spectrum: from users sharing resources to users paying for access to the network and everything in between.

Waku Network: Ethereum or Cosmos?

Another perspective is to consider whether the Waku network will resemble Ethereum or Cosmos.

For those not familiar with the difference between both, in a very concise manner:

  • Ethereum is a set of protocols and software that are designed to operate on one common network and infrastructure
  • Cosmos is a set of protocols and software (SDKs) designed to be deployed in separate yet interoperable networks and infrastructures by third parties

We want Waku to be decentralized to provide censorship resistance and privacy-preserving communication. +If each application has to deploy its own network, we will not achieve this goal. +Therefore, we aim Waku to be not only an open source set of protocols, but also a shared infrastructure that anyone can leverage to build applications on top, with some guarantees in terms of decentralization and anonymity. +This approach is closer in spirit to Ethereum than Cosmos. +Do note that, similarly to Ethereum, anyone is free to take Waku software and protocols and deploy their own network.

Yet, because of the difference in the fee model, the Waku Network is unlikely to be as unified as Ethereum's. +We currently assume that there will be separate gossipsub networks with different funding models. +Since there is no consensus on Waku, each individual operator can decide which network to support, enabling Waku to maintain its permission-less property.

Most likely, the Waku network will be heterogeneous, and node operators will choose the incentivization model they prefer.

Scalability and Discovery Protocols

To enable scalability, the flow of messages in the Waku network will be divided in shards, +so that not every node has to forward every message of the whole network. +Discovery protocols will facilitate users connecting to the right nodes to receive the messages they are interested in.

Different shards could be subject to a variety of rate limiting techniques (globally, targeted to that shard or something in-between).

Marketplace protocols may also be developed to help operators understand how they can best support the network and where +their resources are most needed. However, we are still far from establishing or even assert that such a marketplace will be needed.

Open Problems

Splitting traffic between shards reduces bandwidth consumption for every Waku Relay node. +This improvement increases the likelihood that users with home connections can participate and contribute to the gossipsub network without encountering issues.

However, it does not cap traffic. +There are still open problems regarding how to guarantee that someone can use Waku with lower Internet bandwidth or run critical services, such as a validation node, on the same connection.

We have several ongoing initiatives:

  • Analyzing the Status Community protocol to confirm efficient usage of Waku [4]
  • Simulating the Waku Network to measure actual bandwidth usage [5]
  • Segregating chat messages from control and media messages [6]

The final solution will likely be a combination of protocols that reduce bandwidth usage or mitigate the risk of DOS attacks, providing flexibility for users and platforms to enable the best experience.

The Evolving Waku Network

The definition of the "Waku Network" will likely change over time. In the near future, it will transition from a single +gossipsub network to a sharded set of networks unified by a common discovery layer. This change will promote scalability +and allow various payment models to coexist within the Waku ecosystem.

In conclusion, the future of Waku Network entails growth, incentivization, and heterogeneity while steadfastly +maintaining its core principles. As Waku continues to evolve, we expect it to accommodate a diverse range of use cases +and business models, all while preserving privacy, resisting censorship, avoiding surveillance, and remaining accessible +to devices with limited resources.

References

  1. 51/WAKU2-RELAY-SHARDING
  2. 57/STATUS-Simple-Scaling
  3. 58/RLN-V2
  4. Scaling Status Communities: Potential Problems
  5. Waku Network Testing
  6. 51/WAKU2-RELAY-SHARDING: Control Message Shards
]]>
+
+ + <![CDATA[Waku for All Decentralized Applications and Infrastructures]]> + https://vac.dev/rlog/waku-for-all + https://vac.dev/rlog/waku-for-all + Tue, 08 Nov 2022 00:00:00 GMT + + Waku is an open communication protocol and network. Decentralized apps and infrastructure can use Waku for their +communication needs. It is designed to enable dApps and decentralized infrastructure projects to have secure, private, +scalable communication. Waku is available in several languages and platforms, from Web to mobile to desktop to cloud. +Initially, We pushed Waku adoption to the Web ecosystem, we learned that Waku is usable in a variety of complex applications +and infrastructure projects. We have prioritized our effort to make Waku usable on various platforms and environments.

Background

We have built Waku to be the communication layer for Web3. Waku is a collection of protocols to chose from for your +messaging needs. It enables secure, censorship-resistant, privacy-preserving, spam-protected communication for its user. +It is designed to run on any device, from mobile to the cloud.

Waku is available on many systems and environments and used by several applications and SDKs for decentralized communications.

This involved research efforts in various domains: conversational security, protocol incentivization, zero-knowledge, +etc.

Waku uses novel technologies. Hence, we knew that early dogfooding of Waku was necessary. Even if research +was still in progress [1]. Thus, as soon as Waku protocols and software were usable, we started to push +for the adoption of Waku. This started back in 2021.

Waku is the communication component of the Web3 trifecta. This trifecta was Ethereum (contracts), Swarm +(storage) and Whisper (communication). Hence, it made sense to first target dApps which already uses one of the pillars: +Ethereum.

As most dApps are web apps, we started the development of js-waku for the browser.

Once ready, we reached out to dApps to integrate Waku, added prizes to hackathons +and gave talks.

We also assumed we would see patterns in the usage of Waku, that we would facilitate with the help of +SDKs.

Finally, we created several web apps: +examples +and PoCs.

By discussing with Waku users and watching it being used, we learned a few facts:

  1. The potential use cases for Waku are varied and many:
  1. Many projects are interested in having an embedded chat in their dApp,
  2. There are complex applications that need Waku as a solution. Taking RAILGUN as an example:
  • Web wallet
  • + React Native mobile wallet
  • + NodeJS node/backend.

(1) means that it is not that easy to create SDKs for common use cases.

(2) was a clear candidate for an SDK. Yet, building a chat app is a complex task. Hence, the Status app team tackled +this in the form of Status Web.

Finally, (3) was the most important lesson. We learned that multi-tier applications need Waku for decentralized and +censorship-resistant communications. For these projects, js-waku is simply not enough. They need Waku to work in their +Golang backend, Unity desktop game and React Native mobile app.

We understood that we should see the whole Waku software suite +(js-waku, +nwaku, +go-waku, +waku-react-native, +etc) as an asset for its success. +That we should not limit outreach, marketing, documentation efforts to the web, but target all platforms.

From a market perspective, we identified several actors:

  • platforms: Projects that uses Waku to handle communication,
  • operators: Operators run Waku nodes and are incentivized to do so,
  • developers: Developers are usually part of a platforms or solo hackers learning Web3,
  • contributors: Developers and researchers with interests in decentralization, privacy, censorship-resistance, +zero-knowledge, etc.

Waku for All Decentralized Applications and Infrastructures

In 2022, we shifted our focus to make the various Waku implementations usable and used.

We made Waku multi-plaform.

We shifted Waku positioning to leverage all Waku implementations and better serve the user's needs:

We are consolidating the documentation for all implementations on a single website (work in progress) +to improve developer experience.

This year, we also started the operator outreach effort to push for users to run their own Waku nodes. We have +recently concluded our first operator trial run. +Nwaku's documentation, stability and performance has improved. It is now easier to +run your own Waku node.

Today, operator wannabes most likely run their own nodes to support or use the Waku network. +We are dogfooding +Waku RLN, our novel economic spam protection protocol, +and looking at incentivizing the Waku Store protocol. +This way, we are adding reasons to run your own Waku node.

For those who were following us in 2021, know that we are retiring the Waku Connect branding in favour of the Waku +branding.

Waku for Your Project

As discussed, Waku is now available on various platforms. The question remains: How can Waku benefit your project?

Here are a couple of use cases we recently investigated:

Layer-2 Decentralization

Most ([2] [3] roll-ups use a centralized sequencer or equivalent. Running several sequencers is not as straightforward as running several execution nodes. +Waku can help:

  • Provide a neutral marketplace for a mempool: If sequencers compete for L2 tx fees, they may not be incentivized to +share transactions with other sequencers. Waku nodes can act as a neutral network to enable all sequences to access +transactions.
  • Enable censorship-resistant wallet<>L2 communication,
  • Provide rate limiting mechanism for spam protection: Using RLN to prevent DDOS.

Device pairing and communication

With Waku Device Pairing, a user can setup a secure encrypted communication channel +between their devices. As this channel would operate over Waku, it would be censorship-resistant and privacy preserving. +These two devices could be:

  • Ethereum node and mobile phone to access a remote admin panel,
  • Alice's phone and Bob's phone for any kind of secure communication,
  • Mobile wallet and desktop/browser dApp for transaction and signature exchange.

Check js-waku#950 for the latest update on this.

Get Involved

Developer? Grab any of the Waku implementations and integrate it in your app: https://waku.org/platform.

Researcher? See https://vac.dev/contribute to participate in Waku research.

Tech-savvy? Try to run your own node: https://waku.org/operator.

Otherwise, play around with the various web examples.

If you want to help, we are hiring!

Moving Forward

What you can expect next:


References

  • [1] Waku is modular; it is a suite of protocols; hence some Waku protocols may be mature, while +new protocols are still being designed. Which means that research continues to be ongoing while +Waku is already used in production.
  • [2] The Optimism Foundation runs the only block produce on the Optimism network.
  • [3] Top 10 L2s are documented has having a centralized operator.
]]>
+
+ + <![CDATA[Building Privacy-Protecting Infrastructure]]> + https://vac.dev/rlog/building-privacy-protecting-infrastructure + https://vac.dev/rlog/building-privacy-protecting-infrastructure + Fri, 04 Nov 2022 12:00:00 GMT + + What is privacy-protecting infrastructure? Why do we need it and how we can build it? We'll look at Waku, the communication layer for Web3. We'll see how it uses ZKPs to incentivize and protect the Waku network. We'll also look at Zerokit, a library that makes it easier to use ZKPs in different environments. After reading this, I hope you'll better understand the importance of privacy-protecting infrastructure and how we can build it.

This write-up is based on a talk given at DevCon 6 in Bogota, a video can be found here

Intro

In this write-up, we are going to talk about building privacy-protecting +infrastructure. What is it, why do we need it and how can we build it?

We'll look at Waku, the communication layer for Web3. We'll look at how we are +using Zero Knowledge (ZK) technology to incentivize and protect the Waku +network. We'll also look at Zerokit, a library we are writing to make ZKP easier +to use in different environments.

At the end of this write-up, I hope you'll come away with an understanding of +the importance of privacy-protecting infrastructure and how we can build it.

About

First, briefly about Vac. We build public good protocols for the decentralized +web, with a focus on privacy and communication. We do applied research based on +which we build protocols, libraries and publications. We are also the custodians +of protocols that reflect a set of principles.

Principles

It has its origins in the Status app and trying to improve +the underlying protocols and infrastructure. We build Waku, +among other things.

Why build privacy-protecting infrastructure?

Privacy is the power to selectively reveal yourself. It is a requirement for +freedom and self-determination.

Just like you need decentralization in order to get censorship-resistance, you +need privacy to enable freedom of expression.

To build applications that are decentralized and privacy-protecting, you need +the base layer, the infrastructure itself, to have those properties.

We see this a lot. It is easier to make trade-offs at the application layer than +doing them at the base layer. You can build custodial solutions on top of a +decentralized and non-custodial network where participants control their own +keys, but you can't do the opposite.

If you think about it, buildings can be seen as a form of privacy-protecting +infrastructure. It is completely normal and obvious in many ways, but when it +comes to the digital realm our mental models and way of speaking about it hasn't +caught up yet for most people.

I'm not going too much more into the need for privacy or what happens when you +don't have it, but suffice to say it is an important property for any open +society.

When we have conversations, true peer-to-peer offline conversations, we can talk +privately. If we use cash to buy things we can do commerce privately.

On the Internet, great as it is, there are a lot of forces that makes this +natural state of things not the default. Big Tech has turned users into a +commodity, a product, and monetized user's attention for advertising. To +optimize for your attention they need to surveil your habits and activities, and +hence breach your privacy. As opposed to more old-fashioned models, where +someone is buying a useful service from a company and the incentives are more +aligned.

We need to build credibly neutral infrastructure that protects your privacy at +the base layer, in order to truly enable applications that are +censorship-resistant and encourage meaningful freedom of expression.

Web3 infrastructure

Infrastructure is what lies underneath. Many ways of looking at this but I'll +keep it simple as per the original Web3 vision. You had Ethereum for +compute/consensus, Swarm for storage, and Whisper for messaging. Waku has taken +over the mantle from Whisper and is a lot more +usable today than Whisper ever was, +for many reasons.

Web3 Infrastructure

On the privacy-front, we see how Ethereum is struggling. It is a big UX problem, +especially when you try to add privacy back "on top". It takes a lot of effort +and it is easier to censor. We see this with recent action around Tornado Cash. +Compare this with something like Zcash or Monero, where privacy is there by +default.

There are also problems when it comes to the p2p networking side of things, for +example with Ethereum validator privacy and hostile actors and jurisdictions. If +someone can easily find out where a certain validator is physically located, +that's a problem in many parts of the world. Being able to have stronger +privacy-protection guarantees would be very useful for high-value targets.

This doesn't begin to touch on the so called "dapps" that make a lot of +sacrifices in how they function, from the way domains work, to how websites are +hosted and the reliance on centralized services for communication. We see this +time and time again, where centralized, single points of failure systems work +for a while, but then eventually fail.

In many cases an individual user might not care enough though, and for platforms +the lure to take shortcuts is strong. That is why it is important to be +principled, but also pragmatic in terms of the trade-offs that you allow on top. +We'll touch more on this in the design goals around modularity that Waku has.

ZK for privacy-protecting infrastructure

ZKPs are a wonderful new tool. Just like smart contracts enables programmable +money, ZKPs allow us to express fundamentally new things. In line with the great +tradition of trust-minimization, we can prove statement while revealing the +absolute minimum information necessary. This fits the definition of privacy, the +power to selectively reveal yourself, perfectly. I'm sure I don't need to tell +anyone reading this but this is truly revolutionary. The technology is advancing +extremely fast and often it is our imagination that is the limit.

Zero knowledge

Waku

What is Waku? It is a set of modular protocols for p2p communication. It has a +focus on privacy, security and being able to run anywhere. It is the spiritual +success to Whisper.

By modular we mean that you can pick and choose protocols and how you use them +depending on constraints and trade-offs. For example, bandwidth usage vs +privacy.

It is designed to work in resource restricted environments, such as mobile +phones and in web browsers. It is important that infrastructure meets users +where they are and supports their real-world use cases. Just like you don't need +your own army and a castle to have your own private bathroom, you shouldn't need +to have a powerful always-on node to get reasonable privacy and +censorship-resistance. We might call this self-sovereignty.

Waku - adaptive nodes

One way of looking at Waku is as an open service network. There are nodes with +varying degrees of capabilities and requirements. For example when it comes to +bandwidth usage, storage, uptime, privacy requirements, latency requirements, +and connectivity restrictions.

We have a concept of adaptive nodes that can run a variety of protocols. A node +operator can choose which protocols they want to run. Naturally, there'll be +some nodes that do more consumption and other nodes that do more provisioning. +This gives rise to the idea of a service network, where services are provided +for and consumed.

Adaptive Nodes

Waku - protocol interactions

There are many protocols that interact. Waku Relay protocol is based on libp2p +GossipSub for p2p messaging. We have filter for bandwidth-restricted nodes to +only receive subset of messages. Lightpush for nodes with short connection +windows to push messages into network. Store for nodes that want to retrieve +historical messages.

On the payload layer, we provide support for Noise handshakes/key-exchanges. +This means that as a developers, you can get end-to-end encryption and expected +guarantees out of the box. We have support for setting up a secure channel from +scratch, and all of this paves the way for providing Signal's Double Ratchet at +the protocol level much easier. We also have experimental support for +multi-device usage. Similar features have existed in for example the Status app +for a while, but with this we make it easier for any platform using Waku to use +it.

There are other protocols too, related to peer discovery, topic usage, etc. See +specs for more details.

Protocol Interactions

Waku - Network

For the Waku network, there are a few problems. For example, when it comes to +network spam and incentivizing service nodes. We want to address these while +keeping privacy-guarantees of the base layer. I'm going to go into both of +these.

The spam problem arises on the gossip layer when anyone can overwhelm the +network with messages. The service incentivization is a problem when nodes don't +directly benefit from the provisioning of a certain service. This can happen if +they are not using the protocol directly themselves as part of normal operation, +or if they aren't socially inclined to provide a certain service. This depends a +lot on how an individual platform decides to use the network.

Waku Network

Dealing with network spam and RLN Relay

Since the p2p relay network is open to anyone, there is a problem with spam. If +we look at existing solutions for dealing with spam in traditional messaging +systems, a lot of entities like Google, Facebook, Twitter, Telegram, Discord use +phone number verification. While this is largely sybil-resistant, it is +centralized and not private at all.

Historically, Whisper used PoW which isn't good for heterogenerous networks. +Peer scoring is open to sybil attacks and doesn't directly address spam +protection in an anonymous p2p network.

The key idea here is to use RLN for private economic spam protection using +zkSNARKs.

I'm not going to go into too much detail of RLN here. If you are interested, I +gave a talk in Amsterdam at +Devconnect about this. We have some write-ups on RLN +here by Sanaz who has been pushing a lot of this +from our side. There's also another talk at Devcon by Tyler going into RLN in +more detail. Finally, here's the RLN spec.

I'll briefly go over what it is, the interface and circuit and then talk about +how it is used in Waku.

RLN - Overview and Flow

RLN stands for Rate Limiting Nullifier. It is an anonyomous rate limiting +mechanism based on zkSNARKs. By rate limiting we mean you can only send N +messages in a given period. By anonymity we mean that you can't link message to +a publisher. We can think of it as a voting booth, where you are only allowed to +vote once every election.

Voting Booth

It can be used for spam protection in p2p messaging systems, and also rate +limiting in general, such as for a decentralized captcha.

There are three parts to it. You register somewhere, then you can signal and +finally there's a verification/slashing phase. You put some capital at risk, +either economic or social, and if you double signal you get slashed.

RLN - Circuit

Here's what the private and public inputs to the circuit look like. The identity +secret is generated locally, and we create an identity commitment that is +inserted into a Merkle tree. We then use Merkle proofs to prove membership. +Registered member can only signal once for a given epoch or external nullifier, +for example every ten seconds in Unix time. RLN identifer is for a specific RLN +app.

We also see what the circuit output looks like. This is calculated locally. y +is a share of the secret equation, and the (internal) nullifier acts as a unique +fingerprint for a given app/user/epoch combination. How do we calculate y and +the internal nullifier?

// Private input
signal input identity_secret;
signal input path_elements[n_levels][1];
signal input identity_path_index[n_levels];

// Public input
signal input x; // signal_hash
signal input epoch; // external_nullifier
signal input rln_identifier;

// Circuit output
signal output y;
signal output root;
signal output nullifier;

RLN - Shamir's secret sharing

This is done using Shamir's secret +sharing. Shamir’s +secret sharing is based on idea of splitting a secret into shares. This is how +we enable slashing of funds.

In this case, we have two shares. If a given identity a0 signals twice in +epoch/external nullifier, a1 is the same. For a given RLN app, +internal_nullifier then stays the same. x is signal hash which is different, +and y is public, so we can reconstruct identity_secret. With the identity +secret revealed, this gives access to e.g. financial stake.

a_0 = identity_secret // secret S
a_1 = poseidonHash([a0, external_nullifier])

y = a_0 + x * a_1

internal_nullifier = poseidonHash([a_1, rln_identifier])

Shamir&#39;s secret sharing

RLN Relay

This is how RLN is used with Relay/GossipSub protocol. A node registers and +locks up funds, and after that it can send messages. It publishes a message +containing the Zero Knowledge proof and some other details.

Each relayer node listens to the membership contract for new members, and it +also keeps track of relevant metadata and merkle tree. Metadata is needed to be +able to detect double signaling and perform slashing.

Before forwarding a message, it does some verification checks to ensure there +are no duplicate messages, ZKP is valid and no double signaling has occured. It +is worth noting that this can be combined with peer scoring, for example for +duplicate messages or invalid ZK proofs.

In line of Waku's goals of modularity, RLN Relay is applied on a specific subset +of pubsub and content topics. You can think of it as an extra secure channel.

RLN Relay

RLN Relay cross-client testnet

Where are we with RLN Relay deployment? We've recently launched our second +testnet. This is using RLN Relay with a smart contract on Goerli. It integrates +with our example p2p chat application, and it does so through three different +clients, nwaku, go-waku and js-waku for browsers. This is our first p2p +cross-client testnet for RLN Relay.

Here's a video that shows a user +registering in a browser, signaling through JS-Waku. It then gets relayed to a +nwaku node, that verifies the proof. The second +video shows what happens in the +spam case. when more than one message is sent in a given epoch, it detects it as +spam and discards it. Slashing hasn't been implemented fully yet in the client +and is a work in progress.

If you are curious and want to participate, you can join the effort on our Vac +Discord. We also have +tutorials +setup for all clients so you can play around with it.

As part of this, and to make it work in multiple different environments, we've +also been developing a new library called Zerokit. I'll talk about this a bit +later.

Private settlement / Service credentials

Going back to the service network idea, let's talk about service credentials. +The idea behind service credentials and private settlement is to enable two +actors to pay for and provide services without compromising their privacy. We do +not want the payment to create a direct public link between the service provider +and requester.

Recall the Waku service network illustration with adaptive nodes that choose +which protocols they want to run. Many of these protocols aren't very heavy and +just work by default. For example the relay protocol is enabled by default. +Other protocols are much heavier to provide, such as storing historical +messages.

It is desirable to have additional incentives for this, especially for platforms +that aren't community-based where some level of altruism can be assumed (e.g. +Status Communities, or WalletConnect cloud infrastructure).

You have a node Alice that is often offline and wants to consume historical +messages on some specific content topics. You have another node Bob that runs a +server at home where they store historical messages for the last several weeks. +Bob is happy to provide this service for free because he's excited about running +privacy-preserving infrastructure and he's using it himself, but his node is +getting overwhelmed by freeloaders and he feels like he should be paid something +for continuing to provide this service.

Alice deposits some funds in a smart contract which registers it in a tree, +similar to certain other private settlement mechanisms. A fee is taken or +burned. In exchange, she gets a set of tokens or service credentials. When she +wants to do a query with some criteria, she sends this to Bob. Bob responds with +size of response, cost, and receiver address. Alice then sends a proof of +delegation of a service token as a payment. Bob verifies the proof and resolves +the query.

The end result is that Alice has consumed some service from Bob, and Bob has +received payment for this. There's no direct transaction link between Alice and +Bob, and gas fees can be minimized by extending the period before settling on +chain.

This can be complemented with altruistic service provisioning, for example by +splitting the peer pool into two slots, or only providing a few cheap queries +for free.

The service provisioning is general, and can be generalized for any kind of +request/response service provisoning that we want to keep private.

This isn't a perfect solution, but it is an incremental improvement on top of +the status quo. It can be augmented with more advanced techniques such as better +non-repudiable node reputation, proof of correct service provisioning, etc.

We are currently in the raw spec / proof of concept stage of this. We expect to +launch a testnet of this later this year or early next year.

Service credentials flow

Zerokit

Zerokit is a set of Zero Knowledge modules, +written in Rust and designed to be used in many different environments. The +initial goal is to get the best of both worlds with Circom/Solidity/JS and +Rust/ZK ecosystem. This enables people to leverage Circom-based constructs from +non-JS environments.

For the RLN module, it is using Circom circuits via ark-circom and Rust for +scaffolding. It exposes a C FFI API that can be used through other system +programming environments, like Nim and Go. It also exposes an experimental WASM +API that can be used through web browsers.

Waku is p2p infrastructure running in many different environments, such as +Nim/JS/Go/Rust, so this a requirement for us.

Circom and JS strengths are access to Dapp developers, tooling, generating +verification code, circuits etc. Rust strengths is that it is systems-based and +easy to interface with other language runtime such as Nim, Go, Rust, C. It also +gives access to other Rust ZK ecosystems such as arkworks. This opens door for +using other constructs, such as Halo2. This becomes especially relevant for +constructs where you don't want to do a trusted setup or where circuits are more +complex/custom and performance requirements are higher.

In general with Zerokit, we want to make it easy to build and use ZKP in a +multitude of environments, such as mobile phones and web browsers. Currently it +is too complex to write privacy-protecting infrastructure with ZKPs considering +all the languages and tools you have to learn, from JS, Solidity and Circom to +Rust, WASM and FFI. And that isn't even touching on things like secure key +storage or mobile dev. Luckily more and more projects are working on this, +including writing DSLs etc. It'd also be exciting if we can make a useful +toolstack for JS-less ZK dev to reduce cognitive overhead, similar to what we +have with something like Foundry.

Other research

I also want to mention a few other things we are doing. One thing is +protocol specifications. We think this is very important +for p2p infra, and we see a lot of other projects that claim to do it p2p +infrastructure but they aren't clear about guarantees or how stable something +is. That makes it hard to have multiple implementations, to collaborate across +different projects, and to analyze things objectively.

Related to that is publishing papers. We've put +out three so far, related to Waku and RLN-Relay. This makes it easier to +interface with academia. There's a lot of good researchers out there and we want +to build a better bridge between academia and industry.

Another thing is network +privacy. Waku is modular with +respect to privacy guarantees, and there are a lot of knobs to turn here +depending on specific deployments. For example, if you are running the full +relay protocol you currently have much stronger receiver anonymity than if you +are running filter protocol from a bandwidth or connectivity-restricted node.

We aim to make this pluggable depending on user needs. E.g. mixnets such as Nym +come with some trade-offs but are a useful tool in the arsenal. A good mental +model to keep in mind is the anonymity trilemma, where you can only pick 2/3 out +of low latency, low bandwidth usage and strong anonymity.

We are currently exploring Dandelion-like +additions to the relay/gossip +protocol, which would provide for stronger sender anonymity, especially in a +multi-node/botnet attacker model. As part of this we are looking into different +parameters choices and general possibilities for lower latency usage. This could +make it more amenable for latency sensitive environments, such as validator +privacy, for specific threat models. The general theme here is we want to be +rigorous with the guarantees we provide, under what conditions and for what +threat models.

Another thing mentioned earlier is Noise payload +encryption, and specifically things like allowing +for pairing different devices with e.g. QR codes. This makes it easier for +developers to provide secure messaging in many realistic scenarios in a +multi-device world.

Other research

Summary

We've gone over what privacy-protecting infrastructure is, why we want it and +how we can build it. We've seen how ZK is a fundamental building block for this. +We've looked at Waku, the communication layer for Web3, and how it uses Zero +Knowledge proofs to stay private and function better. We've also looked at +Zerokit and how we can make it easier to do ZKP in different environments.

Finally we also looked at some other research we've been doing. All of the +things mentioned in this article, and more, is available as +write-ups, specs, or +discussions on our forum or Github.

If you find any of this exciting to work on, feel free to reach out on our +Discord. We are also hiring, and we have started +expanding into other privacy infrastructure tech like private and provable +computation with ZK-WASM.

]]>
+
+ + <![CDATA[Waku Privacy and Anonymity Analysis Part I: Definitions and Waku Relay]]> + https://vac.dev/rlog/wakuv2-relay-anon + https://vac.dev/rlog/wakuv2-relay-anon + Fri, 22 Jul 2022 10:00:00 GMT + + Introducing a basic threat model and privacy/anonymity analysis for the Waku v2 relay protocol.

Waku v2 enables secure, privacy preserving communication using a set of modular P2P protocols. +Waku v2 also aims at protecting the user's anonymity. +This post is the first in a series about Waku v2 security, privacy, and anonymity. +The goal is to eventually have a full privacy and anonymity analysis for each of the Waku v2 protocols, as well as covering the interactions of various Waku v2 protocols. +This provides transparency with respect to Waku's current privacy and anonymity guarantees, and also identifies weak points that we have to address.

In this post, we first give an informal description of security, privacy and anonymity in the context of Waku v2. +For each definition, we summarize Waku's current guarantees regarding the respective property. +We also provide attacker models, an attack-based threat model, and a first anonymity analysis of Waku v2 relay within the respective models.

Waku comprises many protocols that can be combined in a modular way. +For our privacy and anonymity analysis, we start with the relay protocol because it is at the core of Waku v2 enabling Waku's publish subscribe approach to P2P messaging. +In its current form, Waku relay is a minor extension of libp2p GossipSub.

Figure 1: The Waku v2 relay mesh is based on the [GossipSub mesh](https://docs.libp2p.io/concepts/publish-subscribe#types-of-peering)

Informal Definitions: Security, Privacy, and Anonymity

The concepts of security, privacy, and anonymity are linked and have quite a bit of overlap.

Security

Of the three, Security has the clearest agreed upon definition, +at least regarding its key concepts: confidentiality, integrity, and availability.

  • confidentiality: data is not disclosed to unauthorized entities.
  • integrity: data is not modified by unauthorized entities.
  • availability: data is available, i.e. accessible by authorized entities.

While these are the key concepts, the definition of information security has been extended over time including further concepts, +e.g. authentication and non-repudiation. +We might cover these in future posts.

Privacy

Privacy allows users to choose which data and information

  • they want to share
  • and with whom they want to share it.

This includes data and information that is associated with and/or generated by users. +Protected data also comprises metadata that might be generated without users being aware of it. +This means, no further information about the sender or the message is leaked. +Metadata that is protected as part of the privacy-preserving property does not cover protecting the identities of sender and receiver. +Identities are protected by the anonymity property.

Often privacy is realized by the confidentiality property of security. +This neither makes privacy and security the same, nor the one a sub category of the other. +While security is abstract itself (its properties can be realized in various ways), privacy lives on a more abstract level using security properties. +Privacy typically does not use integrity and availability. +An adversary who has no access to the private data, because the message has been encrypted, could still alter the message.

Waku offers confidentiality via secure channels set up with the help of the Noise Protocol Framework. +Using these secure channels, message content is only disclosed to the intended receivers. +They also provide good metadata protection properties. +However, we do not have a metadata protection analysis as of yet, +which is part of our privacy/anonymity roadmap.

Anonymity

Privacy and anonymity are closely linked. +Both the identity of a user and data that allows inferring a user's identity should be part of the privacy policy. +For the purpose of analysis, we want to have a clearer separation between these concepts.

We define anonymity as unlinkablity of users' identities and their shared data and/or actions.

We subdivide anonymity into receiver anonymity and sender anonymity.

Receiver Anonymity

We define receiver anonymity as unlinkability of users' identities and the data they receive and/or related actions. +The data transmitted via Waku relay must be a Waku message, which contains a content topic field. +Because each message is associated with a content topic, and each receiver is interested in messages with specific content topics, +receiver anonymity in the context of Waku corresponds to subscriber-topic unlinkability. +An example for the "action" part of our receiver anonymity definition is subscribing to a specific topic.

The Waku message's content topic is not related to the libp2p pubsub topic. +For now, Waku uses a single libp2p pubsub topic, which means messages are propagated via a single mesh of peers. +With this, the receiver discloses its participation in Waku on the gossipsub layer. +We will leave the analysis of libp2p gossipsub to a future article within this series, and only provide a few hints and pointers here.

Waku offers k-anonymity regarding content topic interest in the global adversary model. +K-anonymity in the context of Waku means an attacker can link receivers to content topics with a maximum certainty of 1/k1/k. +The larger kk, the less certainty the attacker gains. +Receivers basically hide in a pool of kk content topics, any subset of which could be topics they subscribed to. +The attacker does not know which of those the receiver actually subscribed to, +and the receiver enjoys plausible deniability regarding content topic subscription. +Assuming there are nn Waku content topics, a receiver has nn-anonymity with respect to association to a specific content topic.

Technically, Waku allows distributing messages over several libp2p pubsub topics. +This yields kk-anonymity, assuming kk content topics share the same pubsub topic. +However, if done wrongly, such sharding of pubsub topics can breach anonymity. +A formal specification of anonymity-preserving topic sharding building on the concepts of partitioned topics is part of our roadmap.

Also, Waku is not directly concerned with 1:1 communication, so for this post, 1:1 communication is out of scope. +Channels for 1:1 communication can be implemented on top of Waku relay. +In the future, a 1:1 communication protocol might be added to Waku. +Similar to topic sharding, it would maintain receiver anonymity leveraging partitioned topics.

Sender Anonymity

We define sender anonymity as unlinkability of users' identities and the data they send and/or related actions. +Because the data in the context of Waku is Waku messages, sender anonymity corresponds to sender-message unlinkability.

In summary, Waku offers weak sender anonymity because of Waku's strict no sign policy, +which has its origins in the Ethereum consensus specs. +17/WAKU-RLN-RELAY and 18/WAKU2-SWAP mitigate replay and injection attacks.

Waku currently does not offer sender anonymity in stronger attacker models, as well as cannot protect against targeted attacks in weaker attacker models like the single or multi node attacker. +We will cover this in more detail in later sections.

Anonymity Trilemma

The Anonymity trilemma states that only two out of strong anonymity, low bandwidth, and low latency can be guaranteed in the global on-net attacker model. +Waku's goal, being a modular set of protocols, is to offer any combination of two out of these three properties, as well as blends. +An example for blending is an adjustable number of pubsub topics and peers in the respective pubsub topic mesh; this allows tuning the trade-off between anonymity and bandwidth.

Figure 2: Anonymity Trilemma: pick two.

A fourth factor that influences the anonymity trilemma is frequency and patterns of messages. +The more messages there are, and the more randomly distributed they are, the better the anonymity protection offered by a given anonymous communication protocol. +So, incentivising users to use the protocol, for instance by lowering entry barriers, helps protecting the anonymity of all users. +The frequency/patterns factor is also related to the above described k-anonymity.

Censorship Resistance

Another security related property that Waku aims to offer is censorship resistance. +Censorship resistance guarantees that users can participate even if an attacker tries to deny them access. +So, censorship resistance ties into the availability aspect of security. +In the context of Waku that means users should be able to send messages as well as receive all messages they are interested in, +even if an attacker tries to prevent them from disseminating messages or tries to deny them access to messages.

Currently, Waku only guarantees censorship resistance in the weak single node attacker model. +While currently employed secure channels mitigate targeted censorship, e.g. blocking specific content topics, +general censorship resistance in strong attacker models is part of our roadmap. +Among other options, we will investigate Pluggable Transports in future articles.

Attacker Types

The following lists various attacker types with varying degrees of power. +The more power an attacker has, the more difficult it is to gain the respective attacker position.

Each attacker type comes in a passive and an active variant. +While a passive attacker can stay hidden and is not suspicious, +the respective active attacker has more (or at least the same) deanonymization power.

We also distinguish between internal and external attackers.

Internal

With respect to Waku relay, an internal attacker participates in the same pubsub topic as its victims. +Without additional measures on higher layer protocols, access to an internal position is easy to get.

Single Node

This attacker controls a single node. +Because this position corresponds to normal usage of Waku relay, it is trivial to obtain.

Multi Node

This attacker controls several nodes. We assume a smaller static number of controlled nodes. +The multi node position can be achieved relatively easily by setting up multiple nodes. +Botnets might be leveraged to increase the number of available hosts. +Multi node attackers could use Sybil attacks to increase the number of controlled nodes. +A countermeasure is for nodes to only accept libp2p gossipsub graft requests from peers with different IP addresses, or even different subnets.

Linearly Scaling Nodes

This attacker controls a number of nodes that scales linearly with the number of nodes in the network. +This attacker is especially interesting to investigate in the context of DHT security, +which Waku uses for ambient peer discovery.

External

An external attacker can only see encrypted traffic (protected by a secure channel set up with Noise). +Because an internal position can be easily obtained, +in practice external attackers would mount combined attacks that leverage both internal an external attacks. +We cover this more below when describing attacks.

Local

A local attacker has access to communication links in a local network segment. +This could be a rogue access point (with routing capability).

AS

An AS attacker controls a single AS (autonomous system). +A passive AS attacker can listen to traffic on arbitrary links within the AS. +An active AS attacker can drop, inject, and alter traffic on arbitrary links within the AS.

In practice, a malicious ISP would be considered as an AS attacker. +A malicious ISP could also easily setup a set of nodes at specific points in the network, +gaining internal attack power similar to a strong multi node attacker.

Global On-Net

A global on-net attacker has complete overview over the whole network. +A passive global attacker can listen to traffic on all links, +while the active global attacker basically carries the traffic: it can freely drop, inject, and alter traffic at all positions in the network. +This basically corresponds to the Dolev-Yao model.

An entity with this power would, in practice, also have the power of the internal linearly scaling nodes attacker.

Attack-based Threat Analysis

The following lists various attacks including the weakest attacker model in which the attack can be successfully performed. +The respective attack can be performed in all stronger attacker models as well.

An attack is considered more powerful if it can be successfully performed in a weaker attacker model.

If not stated otherwise, we look at these attacks with respect to their capability to deanonymize the message sender.

Scope

In this post, we introduce a simple tightly scoped threat model for Waku v2 Relay, which will be extended in the course of this article series.

In this first post, we will look at the relay protocol in isolation. +Even though many threats arise from layers Waku relay is based on, and layers that in turn live on top of relay, +we want to first look at relay in isolation because it is at the core of Waku v2. +Addressing and trying to solve all security issues of a complex system at once is an overwhelming task, which is why we focus on the soundness of relay first.

This also goes well with the modular design philosophy of Waku v2, as layers of varying levels of security guarantees can be built on top of relay, all of which can relay on the guarantees that Waku provides. +Instead of looking at a multiplicative explosion of possible interactions, we look at the core in this article, and cover the most relevant combinations in future posts.

Further restricting the scope, we will look at the data field of a relay message as a black box. +In a second article on Waku v2 relay, we will look into the data field, which according to the specification of Waku v2 relay must be a Waku v2 message. +We only consider messages with version field 2, which indicates that the payload has to be encoded using 35/WAKU2-NOISE.

Prerequisite: Get a Specific Position in the Network

Some attacks require the attacker node(s) to be in a specific position in the network. +In most cases, this corresponds to trying to get into the mesh peer list for the desired pubsub topic of the victim node.

In libp2p gossipsub, and by extension Waku v2 relay, nodes can simply send a graft message for the desired topic to the victim node. +If the victim node still has open slots, the attacker gets the desired position. +This only requires the attacker to know the gossipsub multiaddress of the victim node.

A linearly scaling nodes attacker can leverage DHT based discovery systems to boost the probability of malicious nodes being returned, which in turn significantly increases the probability of attacker nodes ending up in the peer lists of victim nodes. +Waku v2 discv5 will employ countermeasures that mitigate the amplifying effect this attacker type can achieve.

Replay Attack

In the scope we defined above, Waku v2 is resilient against replay attacks. +GossipSub nodes, and by extension Waku relay nodes, feature a seen cache, and only relay messages they have not seen before. +Further, replay attacks will be punished by RLN and SWAP.

Neighbourhood Surveillance

This attack can be performed by a single node attacker that is connected to all peers of the victim node vv with respect to a specific topic mesh. +The attacker also has to be connected to vv. +In this position, the attacker will receive messages mvm_v sent by vv both on the direct path from vv, and on indirect paths relayed by peers of vv. +It will also receive messages mxm_x that are not sent by vv. These messages mxm_x are relayed by both vv and the peers of vv. +Messages that are received (significantly) faster from vv than from any other of vv's peers are very likely messages that vv sent, +because for these messages the attacker is one hop closer to the source.

The attacker can (periodically) measure latency between itself and vv, and between itself and the peers of vv to get more accurate estimates for the expected timings. +An AS attacker (and if the topology allows, even a local attacker) could also learn the latency between vv and its well-behaving peers. +An active AS attacker could also increase the latency between vv and its peers to make the timing differences more prominent. +This, however, might lead to vv switching to other peers.

This attack cannot (reliably) distinguish messages mvm_v sent by vv from messages mym_y relayed by peers of vv the attacker is not connected to. +Still, there are hop-count variations that might be leveraged. +Messages mvm_v always have a hop-count of 1 on the path from vv to the attacker, while all other paths are longer. +Messages mym_y might have the same hop-count on the path from vv as well as on other paths.

Controlled Neighbourhood

If a multi node attacker manages to control all peers of the victim node, it can trivially tell which messages originated from vv.

Observing Messages

If Waku relay was not protected with Noise, the AS attacker could simply check for messages leaving vv which have not been relayed to vv. +These are the messages sent by vv. +Waku relay protects against this attack by employing secure channels setup using Noise.

Correlation

Monitoring all traffic (in an AS or globally), allows the attacker to identify traffic correlated with messages originating from vv. +This (alone) does not allow an external attacker to learn which message vv sent, but it allows identifying the respective traffic propagating through the network. +The more traffic in the network, the lower the success rate of this attack.

Combined with just a few nodes controlled by the attacker, the actual message associated with the correlated traffic can eventually be identified.

DoS

An active single node attacker could run a disruption attack by

  • (1) dropping messages that should be relayed
  • (2) flooding neighbours with bogus messages

While (1) has a negative effect on availability, the impact is not significant. +A linearly scaling botnet attacker, however, could significantly disrupt the network with such an attack. +(2) is thwarted by RLN. +Also SWAP helps mitigating DoS attacks.

A local attacker can DoS Waku by dropping all Waku traffic within its controlled network segment. +An AS attacker can DoS Waku within its authority, while a global attacker can DoS the whole network. +A countermeasure are censorship resistance techniques like Pluggable Transports.

Summary and Future Work

Currently, Waku v2 relay offers k-anonymity with respect to receiver anonymity. +This also includes k-anonymity towards legitimate members of the same topic.

Waku v2 relay offers sender anonymity in the single node attacker model with its strict no sign policy. +Currently, Waku v2 does not guarantee sender anonymity in the multi node and stronger attacker models. +However, we are working on modular anonymity-preserving protocols and building blocks as part of our privacy/anonymity roadmap. +The goal is to allow tunable anonymity with respect to trade offs between strong anonymity, low bandwidth, and low latency. +All of these cannot be fully guaranteed as the the anonymity trilemma states. +Some applications have specific requirements, e.g. low latency, which require a compromise on anonymity. +Anonymity-preserving mechanisms we plan to investigate and eventually specify as pluggable anonymity protocols for Waku comprise

  • Dandelion++ for lightweight anonymity;
  • onion routing as a building block adding a low latency anonymization layer;
  • a mix network for providing strong anonymity (on top of onion routing) even in the strongest attacker model at the cost of higher latency.

These pluggable anonymity-preserving protocols will form a sub-set of the Waku v2 protocol set. +As an intermediate step, we might directly employ Tor for onion-routing, and Nym as a mix-net layer.

In future research log posts, we will cover further Waku v2 protocols and identify anonymity problems that will be added to our roadmap. +These protocols comprise

  • 13/WAKU2-STORE, which can violate receiver anonymity as it allows filtering by content topic. +A countermeasure is using the content topic exclusively for local filters.
  • 12/WAKU2-FILTER, which discloses nodes' interest in topics;
  • 19/WAKU2-LIGHTPUSH, which also discloses nodes' interest in topics and links the lightpush client as the sender of a message to the lightpush service node;
  • 21/WAKU2-FTSTORE, which discloses nodes' interest in specific time ranges allowing to infer information like online times.

While these protocols are not necessary for the operation of Waku v2, and can be seen as pluggable features, +we aim to provide alternatives without the cost of lowering the anonymity level.

References

]]>
+
+ + <![CDATA[Noise handshakes as key-exchange mechanism for Waku]]> + https://vac.dev/rlog/wakuv2-noise + https://vac.dev/rlog/wakuv2-noise + Tue, 17 May 2022 10:00:00 GMT + + We provide an overview of the Noise Protocol Framework as a tool to design efficient and secure key-exchange mechanisms in Waku2.

Introduction

In this post we will provide an overview of how Waku v2 users can adopt Noise handshakes to agree on cryptographic keys used to securely encrypt messages.

This process belongs to the class of key-exchange mechanisms, consisting of all those protocols that, with different levels of complexity and security guarantees, allow two parties to publicly agree on a secret without letting anyone else know what this secret is.

But why do we need key-exchange mechanisms in the first place?

With the advent of public-key cryptography, it become possible to decouple encryption from decryption through use of two distinct cryptographic keys: one public, used to encrypt information and that can be made available to anyone, and one private (kept secret), which enables decryption of messages encrypted with its corresponding public key. The same does not happen in the case of symmetric encryption schemes where, instead, the same key is used for both encryption and decryption operations and hence cannot be publicly revealed as for public keys.

In order to address specific application needs, many different public, symmetric and hybrid cryptographic schemes were designed: Waku v1 and Waku v2, which inherits part of their design from the Ethereum messaging protocol Whisper, provide support to both public-key primitives (ECIES, ECDSA) and symmetric primitives (AES-256-GCM, KECCAK-256), used to sign, hash, encrypt and decrypt exchanged messages.

In principle, when communications employ public-key based encryption schemes (ECIES, in the case of Waku), there is no need for a key-agreement among parties: messages can be directly encrypted using the recipient's public-key before being sent over the network. However, public-key encryption and decryption primitives are usually very inefficient in processing large amount of data, and this may constitute a bottleneck for many of today's applications. Symmetric encryption schemes such as AES-256-GCM, on the other hand, are much more efficient, but the encryption/decryption key needs to be shared among users beforehand any encrypted messages is exchanged.

To counter the downsides given by each of these two approaches while taking advantage of their strengths, hybrid constructions were designed. In these, public-key primitives are employed to securely agree on a secret key which, in turn, is used with a symmetric cipher for encrypting messages. In other words, such constructions specify a (public-key based) key-agreement mechanism!

Waku, up to payload version 1, does not implement nor recommend any protocol for exchanging symmetric ciphers' keys, leaving such task to the application layer. It is important to note that the kind of key-agreement employed has a direct impact on the security properties that can be granted on later encrypted messages, while security requirements usually depend on the specific application for which encryption is needed in the first place.

In this regard, Status, which builds on top of Waku, implements a custom version of the X3DH key-agreement protocol, in order to allow users to instantiate end-to-end encrypted communication channels. However, although such a solution is optimal when applied to (distributed) E2E encrypted chats, it is not flexible enough to fit or simplify the variety of applications Waku aims to address. +Hence, proposing and implementing one or few key-agreements which provide certain (presumably strong) security guarantees, would inevitably degrade performances of all those applications for which, given their security requirements, more tailored and efficient key-exchange mechanisms can be employed.

Guided by different examples, in the following sections we will overview Noise, a protocol framework we are currently integrating in Waku, for building secure key-agreements between two parties. One of the great advantage of using Noise is that it is possible to add support to new key-exchanges by just specifying users' actions from a predefined list, requiring none to minimal modifications to existing implementations. Furthermore, Noise provides a framework to systematically analyze protocols' security properties and the corresponding attacker threat models. This allows not only to easily design new key-agreements eventually optimized for specific applications we want to address, but also to easily analyze or even formally verify any of such custom protocol!

We believe that with its enormous flexibility and features, Noise represents a perfect candidate for bringing key-exchange mechanisms in Waku.

The Diffie-Hellman Key-exchange

The formalization of modern public-key cryptography started with the pioneering work of Whitefield Diffie and Martin Hellman, who detailed one of the earliest known key-agreement protocols: the famous Diffie-Hellman Key-Exchange.

Diffie-Hellman (DH) key-exchange is largely used today and represents the main cryptographic building block on which Noise handshakes' security is based.

In turn, the security of DH is based on a mathematical problem called discrete logarithm which is believed to be hard when the agreement is practically instantiated using certain elliptic curves EE defined over finite fields Fp\mathbb{F}_p.

Informally, a DH exchange between Alice and Bob proceeds as follows:

  • Alice picks a secret scalar sAFps_A\in\mathbb{F}_p and computes, using the underlying curve's arithmetic, the point PA=sAPE(Fp)P_A = s_A\cdot P\in E(\mathbb{F}_p) for a certain pre-agreed public generator PP of the elliptic curve E(Fp)E(\mathbb{F}_p). She then sends PAP_A to Bob.
  • Similarly, Bob picks a secret scalar sBFps_B\in\mathbb{F}_p, computes PB=sBPE(Fp)P_B = s_B\cdot P\in E(\mathbb{F}_p) and sends PBP_B to Alice.
  • By commutativity of scalar multiplication, both Alice and Bob can now compute the point PAB=sAsBPP_{AB} = s_As_B\cdot P, using the elliptic curve point received from the other party and their secret scalar.

The assumed hardness of computing discrete logarithms in the elliptic curve, ensures that it is not possible to compute sAs_A or sBs_B from PAP_A and PBP_B, respectively. Another security assumption (named Computational Diffie-Hellman assumption) ensures that it is not possible to compute PABP_{AB} from PP, PAP_A and PBP_B. Hence the point PABP_{AB} shared by Alice and Bob at the end of the above protocol cannot be efficiently computed by an attacker intercepting PAP_A and PBP_B, and can then be used to generate a secret to be later employed, for example, as a symmetric encryption key.

On a side note, this protocol shows the interplay between two components typical to public-key based schemes: the scalars sAs_A and sBs_B can be seen as private keys associated to the public keys PAP_A and PBP_B, respectively, which allow Alice and Bob only to compute the shared secret point PABP_{AB}.

Ephemeral and Static Public Keys

Although we assumed that it is practically impossible for an attacker to compute the randomly picked secret scalar from the corresponding public elliptic curve point, it may happen that such scalar gets compromised or can be guessed due to a faulty employed random number generator. In such cases, an attacker will be able to recover the final shared secret and all encryption keys eventually derived from that, with clear catastrophic consequences for the privacy of exchanged messages.

To mitigate such issues, multiple DH operations can be combined using two different types of exchanged elliptic curve points or, better, public keys: ephemeral keys, that is random keys used only once in a DH operation, and long-term static keys, used mainly for authentication purposes since employed multiple times.

Just to provide an example, let us suppose Alice and Bob perform the following custom DH-based key-exchange protocol:

  • Alice generates an ephemeral key EA=eAPE_A=e_A\cdot P by picking a random scalar eAe_A and sends EAE_A to Bob;
  • Similarly, Bob generates an ephemeral key EB=eBPE_B=e_B\cdot P and sends EBE_B to Alice;
  • Alice and Bob computes EAB=eAeBPE_{AB} = e_Ae_B \cdot P and from it derive a secret encryption key kk.
  • Bob sends to Alice his static key SB=sBPS_B = s_B\cdot P encrypted with kk.
  • Alice encrypts with kk her static key SA=sAPS_A = s_A\cdot P and sends it to Bob.
  • Alice and Bob decrypt the received static keys, compute the secret SAB=sAsBPS_{AB} = s_As_B \cdot P and use it together with EABE_{AB} to derive a new encryption key k~\tilde{k} to be later used with a symmetric cipher.

In this protocol, if Alice's and/or Bob's static keys get compromised, it would not possible to derive the final secret key k~\tilde{k}, since at least one ephemeral key among EAE_A and EBE_B has to be compromised too in order to recover the secret EABE_{AB}. Furthermore, since Alice's and Bob's long-term static keys are encrypted, an attacker intercepting exchanged (encrypted) public keys will not be able to link such communication to Alice or Bob, unless one of the ephemeral key is compromised (and, even in such case, none of the messages encrypted under the key k~\tilde{k} can be decrypted).

The Noise Protocol Framework

In previous section we gave a small intuition on how multiple DH operations over ephemeral and static users' public keys can be combined to create different key-exchange protocols.

The Noise Protocol Framework, defines various rules for building custom key-exchange protocols while allowing easy analysis of the security properties and threat models provided given the type and order of the DH operations employed.

In Noise terminology, a key-agreement or Noise protocol consists of one or more Noise handshakes. During a Noise handshake, Alice and Bob exchange multiple (handshake) messages containing their ephemeral keys and/or static keys. These public keys are then used to perform a handshake-dependent sequence of Diffie-Hellman operations, whose results are all hashed into a shared secret key. Similarly as we have seen above, after a handshake is complete, each party will use the derived secret key to send and receive authenticated encrypted data by employing a symmetric cipher.

Depending on the handshake pattern adopted, different security guarantees can be provided on messages encrypted using a handshake-derived key.

The Noise handshakes we support in Waku all provide the following security properties:

  • Confidentiality: the adversary should not be able to learn what data is being sent between Alice and Bob.
  • Strong forward secrecy: an active adversary cannot decrypt messages nor infer any information on the employed encryption key, even in the case he has access to Alice's and Bob's long-term private keys (during or after their communication).
  • Authenticity: the adversary should not be able to cause either Alice or Bob to accept messages coming from a party different than their original senders.
  • Integrity: the adversary should not be able to cause Alice or Bob to accept data that has been tampered with.
  • Identity-hiding: once a secure communication channel is established, a passive adversary should not be able to link exchanged encrypted messages to their corresponding sender and recipient by knowing their long-term static keys.

We refer to Noise specification for more formal security definitions and precise threat models relative to Waku supported Noise Handshake patterns.

Message patterns

Noise handshakes involving DH operations over ephemeral and static keys can be succinctly sketched using the following set of handshake message tokens: e,s,ee,se,es,ss.

Tokens employing single letters denote (the type of) users' public keys: e refers to randomly generated ephemeral key(s), while s indicates the users' long-term static key(s).

Two letters tokens, instead, denotes DH operations over the two users' public keys the token refers to, given that the left token letter refers to the handshake initiator's public key, while the right token letter indicates the used responder's public key. Thus, if Alice started a handshake with Bob, the es token will shortly represent a DH operation among Alice's ephemeral key e and Bob's static key s.

Since, in order to perform any DH operations users need to share (or pre-share) the corresponding public keys, Noise compactly represents messages' exchanges using the two direction -> and <-, where the -> denotes a message (arbitrary and/or DH public key) from the initiator to the responder, while <- the opposite.

Hence a message pattern consisting of a direction and one or multiple tokens such as <- e, s, es has to be interpreted one token at a time: in this example, the responder is sending his ephemeral and static key to the initiator and is then executing a DH operation over the initiator's ephemeral key e (shared in a previously exchanged message pattern) and his static key s. On the other hand, such message indicates also that the initiator received the responder's ephemeral and static keys e and s, respectively, and performed a DH operation over his ephemeral key and the responder's just received static key s. In this way, both parties will be able to derive at the end of each message pattern processed the same shared secret, which is eventually used to update any derived symmetric encryption keys computed so far.

In some cases, DH public keys employed in a handshake are pre-shared before the handshake itself starts. In order to chronologically separate exchanged keys and DH operations performed before and during a handshake, Noise employs the ... delimiter.

For example, the following message patterns

<- e
...
-> e, ee

indicates that the initiator knew the responder's ephemeral key before he sends his own ephemeral key and executes a DH operation between both parties ephemeral keys (similarly, the responder receives the initiator's ephemeral key and does a ee DH operation).

At this point it should be clear how such notation is able to compactly represent a large variety of DH based key-agreements. Nevertheless, we can easily define additional tokens and processing rules in order to address specific applications and security requirements, such as the psk token used to process arbitrary pre-shared key material.

As an example of Noise flexibility, the custom protocol we detailed above can be shortly represented as (Alice is on the left):

-> e
<- e, ee, s
-> s, ss

where after each DH operation an encryption key is derived (along with the secrets computed by all previously executed DH operations) in order to encrypt/decrypt any subsequent sent/received message.

Another example is given by the possibility to replicate within Noise the well established Signal's X3DH key-agreement protocols, thus making the latter a general framework to design and study security of many practical and widespread DH-based key-exchange protocols.

The Noise State Objects

We mentioned multiple times that parties derive an encryption key each time they perform a DH operation, but how does this work in more details?

Noise defines three state object: a Handshake State, a Symmetric State and a Cipher State, each encapsulated into each other and instantiated during the execution of a handshake.

The Handshake State object stores the user's and other party's received ephemeral and static keys (if any) and embeds a Symmetric State object.

The Symmetric State, instead, stores a handshake hash value h, iteratively updated with any message read/received and DH secret computed, and a chaining key ck, updated using a key derivation function every time a DH secret is computed. This object further embeds a Cipher State.

Lastly, the Cipher State stores a symmetric encryption k key and a counter n used to encrypt and decrypt messages exchanged during the handshake (not only static keys, but also arbitrary payloads). These key and counter are refreshed every time the chaining key is updated.

While processing each handshake's message pattern token, all these objects are updated according to some specific processing rules which employ a combination of public-key primitives, hash and key-derivation functions and symmetric ciphers. It is important to note, however, that at the end of each processed message pattern, the two users will share the same Symmetric and Cipher State embedded in their respective Handshake States.

Once a handshake is complete, users derive two new Cipher States and can then discard the Handshake State object (and, thus, the embedded Symmetric State and Cipher State objects) +employed during the handshake.

These two Cipher states are used to encrypt and decrypt all outbound and inbound after-handshake messages, respectively, and only to these will be granted the confidentiality, authenticity, integrity and identity-hiding properties we detailed above.

For more details on processing rules, we refer to Noise specifications.

Supported Noise Handshakes in Waku

The Noise handshakes we provided support to in Waku address four typical scenarios occurring when an encrypted communication channel between Alice and Bob is going to be created:

  • Alice and Bob know each others' static key.
  • Alice knows Bob's static key;
  • Alice and Bob share no key material and they don't know each others' static key.
  • Alice and Bob share some key material, but they don't know each others' static key.

The possibility to have handshakes based on the reciprocal knowledge parties have of each other, allows designing Noise handshakes that can quickly reach the desired level of security on exchanged encrypted messages while keeping the number of interactions between Alice and Bob minimum.

Nonetheless, due to the pure token-based nature of handshake processing rules, implementations can easily add support to any custom handshake pattern with minor modifications, in case more specific application use-cases need to be addressed.

On a side note, we already mentioned that identity-hiding properties can be guaranteed against a passive attacker that only reads the communication occurring between Alice and Bob. However, an active attacker who compromised one party's static key and actively interferes with the parties' exchanged messages, may lower the identity-hiding security guarantees provided by some handshake patterns. In our security model we exclude such adversary, but, for completeness, in the following we report a summary of possible de-anonymization attacks that can be performed by such an active attacker.

For more details on supported handshakes and on how these are implemented in Waku, we refer to 35/WAKU2-NOISE RFC.

The K1K1 Handshake

If Alice and Bob know each others' static key (e.g., these are public or were already exchanged in a previous handshake) , they MAY execute a K1K1 handshake. In Noise notation (Alice is on the left) this can be sketched as:

 K1K1:
-> s
<- s
...
-> e
<- e, ee, es
-> se

We note that here only ephemeral keys are exchanged. This handshake is useful in case Alice needs to instantiate a new separate encrypted communication channel with Bob, e.g. opening multiple parallel connections, file transfers, etc.

Security considerations on identity-hiding (active attacker): no static key is transmitted, but an active attacker impersonating Alice can check candidates for Bob's static key.

The XK1 Handshake

Here, Alice knows how to initiate a communication with Bob and she knows his public static key: such discovery can be achieved, for example, through a publicly accessible register of users' static keys, smart contracts, or through a previous public/private advertisement of Bob's static key.

A Noise handshake pattern that suits this scenario is XK1:

 XK1:
<- s
...
-> e
<- e, ee, es
-> s, se

Within this handshake, Alice and Bob reciprocally authenticate their static keys s using ephemeral keys e. We note that while Bob's static key is assumed to be known to Alice (and hence is not transmitted), Alice's static key is sent to Bob encrypted with a key derived from both parties ephemeral keys and Bob's static key.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party. An active attacker initiating the handshake can check candidates for Bob's static key against recorded/accepted exchanged handshake messages.

The XX and XXpsk0 Handshakes

If Alice is not aware of any static key belonging to Bob (and neither Bob knows anything about Alice), she can execute an XX handshake, where each party tranXmits to the other its own static key.

The handshake goes as follows:

 XX:
-> e
<- e, ee, s, es
-> s, se

We note that the main difference with XK1 is that in second step Bob sends to Alice his own static key encrypted with a key obtained from an ephemeral-ephemeral Diffie-Hellman exchange.

This handshake can be slightly changed in case both Alice and Bob pre-shares some secret psk which can be used to strengthen their mutual authentication during the handshake execution. One of the resulting protocol, called XXpsk0, goes as follow:

 XXpsk0:
-> psk, e
<- e, ee, s, es
-> s, se

The main difference with XX is that Alice's and Bob's static keys, when transmitted, would be encrypted with a key derived from psk as well.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party for both XX and XXpsk0 handshakes. In XX, Bob's static key is encrypted with forward secrecy but is transmitted to a non-authenticated user which can then be an active attacker. In XXpsk0, instead, Bob's secret key is protected by forward secrecy to a partially authenticated party (through the pre-shared secret psk but not through any static key), provided that psk was not previously compromised (in such case identity-hiding properties provided by the XX handshake applies).

Session Management and Multi-Device Support

When two users complete a Noise handshake, an encryption/decryption session - or Noise session - consisting of two Cipher States is instantiated.

By identifying Noise session with a session-id derived from the handshake's cryptographic material, we can take advantage of the PubSub/GossipSub protocols used by Waku for relaying messages in order to manage instantiated Noise sessions.

The core idea is to exchange after-handshake messages (encrypted with a Cipher State specific to the Noise session), over a content topic derived from the (secret) session-id the corresponding session refers to.

This allows to decouple the handshaking phase from the actual encrypted communication, thus improving users' identity-hiding capabilities.

Furthermore, by publicly revealing a value derived from session-id on the corresponding session content topic, a Noise session can be marked as stale, enabling peers to save resources by discarding any eventually stored message sent to such content topic.

One relevant aspect in today's applications is the possibility for users to employ different devices in their communications. In some cases, this is non-trivial to achieve since, for example, encrypted messages might be required to be synced on different devices which do not necessarily share the necessary key material for decryption and may be temporarily offline.

We address this by requiring each user's device to instantiate multiple Noise sessions either with all user's other devices which, in turn, all together share a Noise session with the other party, or by directly instantiating a Noise session with all other party's devices.

We named these two approaches N11MN11M and NMNM, respectively, which are in turn loosely based on the paper “Multi-Device for Signal” and Signal’s Sesame Algorithm.

Informally, in the N11MN11M session management scheme, once the first Noise session between any of Alice’s and Bob’s device is instantiated, its session information is securely propagated to all other devices using previously instantiated Noise sessions. Hence, all devices are able to send and receive new messages on the content topic associated to such session.

In the NMNM session management scheme, instead, all pairs of Alice's and Bob's devices have a distinct Noise session: a message is then sent from the currently-in-use sender’s device to all recipient’s devices, by properly encrypting and sending it to the content topics of each corresponding Noise session. If sent messages should be available on all sender’s devices as well, we require each pair of sender’s devices to instantiate a Noise session used for syncing purposes.

For more technical details on how Noise sessions are instantiated and managed within these two mechanisms and the different trade-offs provided by the latter, we refer to 37/WAKU2-NOISE-SESSIONS.

Conclusions

In this post we provided an overview of Noise, a protocol framework for designing Diffie-Hellman based key-exchange mechanisms allowing systematic security and threat model analysis.

The flexibility provided by Noise components allows not only to fully replicate with same security guarantees well established key-exchange primitives such as X3DH, currently employed by Status 5/TRANSPORT-SECURITY, but enables also optimizations based on the reciprocal knowledge parties have of each other while allowing easier protocols' security analysis and (formal) verification.

Furthermore, different handshakes can be combined and executed one after each other, a particularly useful feature to authenticate multiple static keys employed by different applications but also to ease keys revocation.

The possibility to manage Noise sessions over multiple devices and the fact that handshakes can be concretely instantiated using modern, fast and secure cryptographic primitives such as ChaChaPoly and BLAKE2b, make Noise one of the best candidates for efficiently and securely address the many different needs of applications built on top of Waku requiring key-agreement.

Future steps

The available implementation of Noise in nwaku, although mostly complete, is still in its testing phase. As future steps we would like to:

  • have an extensively tested and robust Noise implementation;
  • formalize, implement and test performances of the two proposed N11MN11M and NMNM session management mechanisms and their suitability for common use-case scenarios;
  • provide Waku network nodes a native protocol to readily support key-exchanges, strongly-encrypted communication and multi-device session management mechanisms with none-to-little interaction besides applications' connection requests.

References

]]>
+
+ + <![CDATA[Waku v2 Ambient Peer Discovery]]> + https://vac.dev/rlog/wakuv2-apd + https://vac.dev/rlog/wakuv2-apd + Mon, 09 May 2022 10:00:00 GMT + + Introducing and discussing ambient peer discovery methods currently used by Waku v2, as well as future plans in this area.

Waku v2 comprises a set of modular protocols for secure, privacy preserving communication. +Avoiding centralization, these protocols exchange messages over a P2P network layer. +In order to build a P2P network, participating nodes first have to discover peers within this network. +This is where ambient peer discovery comes into play: +it allows nodes to find peers, making it an integral part of any decentralized application.

In this post the term node to refers to our endpoint or the endpoint that takes action, +while the term peer refers to other endpoints in the P2P network. +These endpoints can be any device connected to the Internet: e.g. servers, PCs, notebooks, mobile devices, or applications like a browser. +As such, nodes and peers are the same. We use these terms for the ease of explanation without loss of generality.

In Waku's modular design, ambient peer discovery is an umbrella term for mechanisms that allow nodes to find peers. +Various ambient peer discovery mechanisms are supported, and each is specified as a separate protocol. +Where do these protocols fit into Waku's protocol stack? +The P2P layer of Waku v2 builds on libp2p gossipsub. +Nodes participating in a gossipsub protocol manage a mesh network that is used for routing messages. +This mesh network is an unstructured P2P network +offering high robustness and resilience against attacks. +Gossipsub implements many improvements overcoming the shortcomings typically associated with unstructured P2P networks, e.g. inefficient flooding based routing. +The gossipsub mesh network is managed in a decentralized way, which requires each node to know other participating peers. +Waku v2 may use any combination of its ambient discovery protocols to find appropriate peers.

Summarizing, Waku v2 comprises a peer management layer based on libp2p gossipsub, +which manages the peers of nodes, and an ambient peer discovery layer, +which provides information about peers to the peer management layer.

We focus on ambient peer discovery methods that are in line with our goal of building a fully decentralized, generalized, privacy-preserving and censorship-resistant messaging protocol. +Some of these protocols still need adjustments to adhere to our privacy and anonymity requirements. For now, we focus on operational stability and feasibility. +However, when choosing techniques, we pay attention to selecting mechanisms that can feasibly be tweaked for privacy in future research efforts. +Because of the modular design and the fact that Waku v2 has several discovery methods at its disposal, we could even remove a protocol in case future evaluation deems it not fitting our standards.

This post covers the current state and future considerations of ambient peer discovery for Waku v2, +and gives reason for changes and modifications we made or plan to make. +The ambient peer discovery protocols currently supported by Waku v2 are a modified version of Ethereum's Discovery v5 +and DNS-based discovery. +Waku v2 further supports gossipsub's peer exchange protocol. +In addition, we plan to introduce protocols for general peer exchange and capability discovery, respectively. +The former allows resource restricted nodes to outsource querying for peers to stronger peers, +the latter allows querying peers for their supported capabilities. +Besides these new protocols, we are working on integrating capability discovery in our existing ambient peer discovery protocols.

Static Node Lists

The simplest method of learning about peers in a P2P network is via static node lists. +These can be given to nodes as start-up parameters or listed in a config-file. +They can also be provided in a script-parseable format, e.g. in JSON. +While this method of providing bootstrap nodes is very easy to implement, it requires static peers, which introduce centralized elements. +Also, updating static peer information introduces significant administrative overhead: +code and/or config files have to be updated and released. +Typically, static node lists only hold a small number of bootstrap nodes, which may lead to high load on these nodes.

DNS-based Discovery

Compared to static node lists, +DNS-based discovery (specified in EIP-1459) +provides a more dynamic way of discovering bootstrap nodes. +It is very efficient, can easily be handled by resource restricted devices and provides very good availability. +In addition to a naive DNS approach, Ethereum's DNS-based discovery introduces efficient authentication leveraging Merkle trees.

A further advantage over static node lists is the separation of code/release management and bootstrap node management. +However, changing and updating the list of bootstrap nodes still requires administrative privileges because DNS records have to be added or updated.

While this method of discovery still requires centralized elements, +node list management can be delegated to various DNS zones managed by other entities mitigating centralization.

Discovery V5

A much more dynamic method of ambient peer discovery is Discovery v5, which is Ethereum's peer discovery protocol. +It is based on the Kademlia distributed hashtable (DHT). +An introduction to discv5 and its history, and a discv5 Waku v2 feasibility study +can be found in previous posts on this research log.

We use Discovery v5 as an ambient peer discovery method for Waku v2 because it is decentralized, efficient, actively researched, and has web3 as its main application area. +Discv5 also offers mitigation techniques for various attacks, which we cover later in this post.

Using a DHT (structured P2P network) as a means for ambient peer discovery, while using the gossipsub mesh network (unstructured P2P network) for transmitting actual messages, +Waku v2 leverages advantages from both worlds. +One of the main benefits of DHTs is offering a global view over participating nodes. +This, in turn, allows sampling random sets of nodes which is important for equally distributing load. +Gossipsub, on the other hand, offers great robustness and resilience against attacks. +Even if discv5 discovery should not work in advent of a DoS attack, Waku v2 can still operate switching to different discovery methods.

Discovery methods that use separate P2P networks still depend on bootstrapping, +which Waku v2 does via parameters on start-up or via DNS-based discovery. +This might raise the question of why such discovery methods are beneficial. +The answer lies in the aforementioned global view of DHTs. Without discv5 and similar methods, the bootstrap nodes are used as part of the gossipsub mesh. +This might put heavy load on these nodes and further, might open pathways to inference attacks. +Discv5, on the other hand, uses the bootstrap nodes merely as an entry to the discovery network and can provide random sets of nodes (sampled from a global view) +for bootstrapping or expanding the mesh.

DHT Background

Distributed Hash Tables are a class of structured P2P overlay networks. +A DHT can be seen as a distributed node set of which each node is responsible for a part of the hash space. +In contrast to unstructured P2P networks, e.g. the mesh network maintained by gossipsub, +DHTs have a global view over the node set and the hash space (assuming the participating nodes behave well).

DHTs are susceptible to various kinds of attacks, especially Sybil attacks +and eclipse attacks. +While security aspects have been addressed in various research papers, general practical solutions are not available. +However, discv5 introduced various practical mitigation techniques.

Random Walk Discovery

While discv5 is based on the Kademlia DHT, it only uses the distributed node set aspect of DHTs. +It does not map values (items) into the distributed hash space. +This makes sense, because the main purpose of discv5 is discovering other nodes that support discv5, which are expected to be Ethereum nodes. +Ethereum nodes that want to discover other Ethereum nodes simply query the discv5 network for a random set of peers. +If Waku v2 would do the same, only a small subset of the retrieved nodes would support Waku v2.

A first naive solution for Waku v2 discv5 discovery is

  • retrieve a random node set, which is achieved by querying for a set of randomly chosen node IDs
  • filter the returned nodes on the query path based on Waku v2 capability via the Waku v2 ENR
  • repeat until enough Waku v2 capable nodes are found

This query process boils down to random walk discovery, which is very resilient against attacks, but also very inefficient if the number of nodes supporting the desired capability is small. +We refer to this as the needle-in-the-haystack problem.

Random Walk Performance Estimation

This subsection provides a rough estimation of the overhead introduced by random walk discovery.

Given the following parameters:

  • nn number of total nodes participating in discv5
  • pp percentage of nodes supporting Waku
  • WW the event of having at least one Waku node in a random sample
  • kk the size of a random sample (default = 16)
  • α\alpha the number of parallel queries started
  • bb bits per hop
  • qq the number of queries

A query takes log2bnlog_{2^b}n hops to retrieve a random sample of nodes.

P(W)=1(1p/100)kP(W) = 1 - (1-p/100)^k is the probability of having at least one Waku node in the sample.

P(Wq)=1(1p/100)kqP(W^q) = 1 - (1-p/100)^{kq} is the probability of having at least one Waku node in the union of qq samples.

Expressing this in terms of qq, we can write: +P(Wq)=1(1p/100)kq    q=log(1p/100)k(1P(Wq))P(W^q) = 1 - (1-p/100)^{kq} \iff q = log_{(1-p/100)^k}(1-P(W^q))

Figure 1 shows a log-log plot for P(Wq)=90%P(W^q) = 90\%.

Figure 1: log-log plot showing the number of queries necessary to retrieve a Waku v2 node with a probability of 90% in relation to the Waku v2 node concentration in the network.

Assuming p=0.1p=0.1, we would need

0.9=1(10.1/100)16q=>q1440.9 = 1 - (1-0.1/100)^{16q} => q \approx 144

queries to get a Waku node with 90% probability, which leads to 14418=2592\approx 144 * 18 = 2592 overlay hops. +Choosing b=3b=3 would reduce the number to 1446=864\approx 144 * 6 = 864. +Even when choosing α=10\alpha = 10 we would have to wait at least 80 RTTs. +This effort is just for retrieving a single Waku node. Ideally, we want at least 3 Waku nodes for bootstrapping a Waku relay.

The discv5 doc roughly estimates p=1p=1% to be the threshold for acceptably efficient random walk discovery. +This is in line with our estimation:

0.9=1(11/100)16q=>q140.9 = 1 - (1-1/100)^{16q} => q \approx 14

The number of necessary queries is linearly dependent on the percentage pp of Waku nodes. +The number of hops per query is logarithmically dependent on nn. +Thus, random walk searching is inefficient for small percentages pp. +Still, random walks are more resilient against attacks.

We can conclude that a Waku node concentration below 1% renders vanilla discv5 unfit for our needs. +Our current solution and future plans for solving this issue are covered in the next subsections.

Simple Solution: Separate Discovery Network

The simple solution we currently use for Waku v2 discv5 is a separate discv5 network. +All (well behaving) nodes in this network support Waku v2, resulting in a very high query efficiency. +However, this solution reduces resilience because the difficulty of attacking a DHT scales with the number of participating nodes.

Discv5 Topic Discovery

We did not base our solution on the current version of discv5 topic discovery, +because, similar to random walk discovery, it suffers from poor performance for relatively rare capabilities/topics.

However, there is ongoing research in discv5 topic discovery which is close to ideas we explored when pondering efficient and resilient Waku discv5 solutions. +We keep a close eye on this research, give feedback, and make suggestions, as we plan to switch to this version of topic discovery in the future.

In a nutshell, topic discovery will manage separate routing tables for each topic. +These topic specific tables are initialized with nodes from the discv5 routing table. +While the buckets of the discv5 routing table represent distance intervals from the node's node ID, the topic table buckets represent distance intervals from topic IDs.

Nodes that want to register a topic try to register that topic at one random peer per bucket. +This leads to registering the topic at peers in closer and closer neighbourhoods around the topic ID, which +yields a very efficient and resilient compromise between random walk discovery and DHT discovery. +Peers in larger neighbourhoods around the topic ID are less efficient to discover, however more resilient against eclipse attacks and vice versa.

Further, this works well with the overload and DoS protection discv5 employs. +Discv5 limits the amount of nodes registered per topic on a single peer. Further, discv5 enforces a waiting time before nodes can register topics at peers. +So, for popular topics, a node might fail to register the topic in a close neighbourhood. +However, because the topic is popular (has a high occurrence percentage pp), it can still be efficiently discovered.

In the future, we also plan to integrate Waku v2 capability discovery, which will not only allow asking for nodes that support Waku v2, +but asking for Waku v2 nodes supporting specific Waku v2 protocols like filter or store. +For the store protocol we envision sub-capabilities reflecting message topics and time frames of messages. +We will also investigate related security implications.

Attacks on DHTs

In this post, we only briefly describe common attacks on DHTs. +These attacks are mainly used for denial of service (DoS), +but can also used as parts of more sophisticated attacks, e.g. deanonymization attacks. +A future post on this research log will cover security aspects of ambient peer discovery with a focus on privacy and anonymity.

Sybil Attack

The power of an attacker in a DHT is proportional to the number of controlled nodes. +Controlling nodes comes at a high resource cost and/or requires controlling a botnet via a preliminary attack.

In a Sybil attack, an attacker generates lots of virtual node identities. +This allows the attacker to control a large portion of the ID space in a DHT at a relatively low cost. +Sybil attacks are especially powerful when the attacker can freely choose the IDs of generated nodes, +because this allows positioning at chosen points in the DHT.

Because Sybil attacks amplify the power of many attacks against DHTs, +making Sybil attacks as difficult as possible is the basis for resilient DHT operation. +The typical abstract mitigation approach is binding node identities to physical network interfaces. +To some extend, this can be achieved by introducing IP address based limits. +Further, generating node IDs can be bound by proof of work (PoW), +which, however, comes with a set of shortcomings, e.g. relatively high costs on resource restricted devices. +The discv5 doc +describes both Sybil and eclipse attacks, as well as concrete mitigation techniques employed by discv5.

Eclipse Attack

In an eclipse attack, nodes controlled by the attacker poison the routing tables of other nodes in a way that parts of the DHT become eclipsed, i.e. invisible. +When a controlled node is asked for the next step in a path, +it provides another controlled node as the next step, +effectively navigating the querying node around or away from certain areas of the DHT. +While several mitigation techniques have been researched, there is no definitive protection against eclipse attacks available as of yet. +One mitigation technique is increasing α\alpha, the number of parallel queries, and following each concurrent path independently for the lookup.

The eclipse attack becomes very powerful in combination with a successful Sybil attack; +especially when the attacker can freely choose the position of the Sybil nodes.

The aforementioned new topic discovery of discv5 provides a good balance between protection against eclipse attacks and query performance.

Peer Exchange Protocol

While discv5 based ambient peer discovery has many desirable properties, resource restricted nodes and nodes behind restrictive NAT setups cannot run discv5 satisfactory. +With these nodes in mind, we started working on a simple peer exchange protocol based on ideas proposed here. +The peer exchange protocol will allow nodes to ask peers for additional peers. +Similar to discv5, the peer exchange protocol will also support capability discovery.

The new peer exchange protocol can be seen as a simple replacement for the Rendezvous protocol, which Waku v2 does not support. +While the rendezvous protocol involves nodes registering at rendezvous peers, the peer exchange protocol simply allows nodes to ask any peer for a list of peers (with a certain set of capabilities). +Rendezvous tends to introduce centralized elements as rendezvous peers have a super-peer role.

In the future, we will investigate resource usage of Waku v2 discv5 and provide suggestions for minimal resources nodes should have to run discv5 satisfactory.

Waku v2 comprises further protocols related to ambient peer discovery. We shortly mention them for context, even though they are not strictly ambient peer discovery protocols.

Gossipsub Peer Exchange Protocol

Gossipsub provides an integrated peer exchange mechanism which is also supported by Waku v2. +Gossipsub peer exchange works in a push manner. Nodes send peer lists to peers they prune from the active mesh. +This pruning is part of the gossipsub peer management, blurring the boundaries of peer management and ambient peer discovery.

We will investigate anonymity implications of this protocol and might disable it in favour of more anonymity-preserving protocols. +Sending a list of peers discloses information about the sending node. +We consider restricting these peer lists to cached peers that are currently not used in the active gossipsub mesh.

Capability Negotiation

Some of the ambient peer discovery methods used by Waku2 will support capability discovery. +This allows to narrow down the set of retrieved peers to peers that support specific capabilities. +This is efficient because it avoids establishing connections to nodes that we are not interested in.

However, the ambient discovery interface does not require capability discovery, which will lead to nodes having peers with unknown capabilities in their peer lists. +We work on a capability negotiation protocol which allows nodes to ask peers

  • for their complete list of capabilities, and
  • whether they support a specific capability

We will investigate security implications, especially when sending full capability lists.

NAT traversal

For NAT traversal, Waku v2 currently supports the port mapping protocols UPnP and NAT-PMP / PCP.

In the future, we plan to add support for parts of ICE, e.g. STUN. +We do not plan to support TURN because TURN relays would introduce a centralized element. +A modified decentralized version of TURN featuring incentivization might be an option in the future; +strong peers could offer a relay service similar to TURN.

There are plans to integrate more NAT traversal into discv5, in which we might participate. +So far, the only traversal technique supported by discv5 is nodes receiving their external IP address in pong messages.

While NAT traversal is very important, adding more NAT traversal techniques is not a priority at the moment. +Nodes behind restrictive symmetric NAT setups cannot be discovered, but they can still discover peers in less restrictive setups. +While we wish to have as many nodes as possible to be discoverable via ambient peer discovery, two nodes behind a restrictive symmetric NAT can still exchange Waku v2 messages if they discovered a shared peer. +This is one of the nice resilience related properties of flooding based routing algorithms.

For mobile nodes, which suffer from changing IP addresses and double NAT setups, we plan using the peer exchange protocol to ask peers for more peers. +Besides saving resources on resource restricted devices, this approach works as long as peers are in less restrictive environments.

Conclusion and Future Prospects

Ambient peer discovery is an integral part of decentralized applications. It allows nodes to learn about peers in the network. +As of yet, Waku v2 supports DNS-based discovery and a slightly modified version of discv5. +We are working on further protocols, including a peer exchange protocol that allows resource restricted nodes to ask stronger peers for peer lists. +Further, we are working on adding capability discovery to our ambient discovery protocols, allowing nodes to find peers with desired properties.

These protocols can be combined in a modular way and allow Waku v2 nodes to build a strong and resilient mesh network, +even if some discovery methods are not available in a given situation.

We will investigate security properties of these discovery mechanisms with a focus on privacy and anonymity in a future post on this research log. +As an outlook we can already state that DHT approaches typically allow inferring information about the querying node. +Further, sending peer lists allows inferring the position of a node within the mesh, and by extension information about the node. +Waku v2 already provides some mitigation, because the mesh for transmitting actual messages, and the peer discovery network are separate. +To mitigate information leakage by transmitting peer lists, we plan to only reply with lists of peers that nodes do not use in their active meshes.


References

]]>
+
+ + <![CDATA[Introducing nwaku]]> + https://vac.dev/rlog/introducing-nwaku + https://vac.dev/rlog/introducing-nwaku + Tue, 12 Apr 2022 10:00:00 GMT + + Introducing nwaku, a Nim-based Waku v2 client, including a summary of recent developments and preview of current and future focus areas.

Background

If you've been following our research log, +you'll know that many things have happened in the world of Waku v2 since our last general update. +In line with our long term goals, +we've introduced new protocols, +tweaked our existing protocols +and expanded our team. +We've also shown in a series of practical experiments that Waku v2 does indeed deliver on some of the theoretical advantages it was designed to have over its predecessor, Waku v1. +A sustainability and business workshop led to the formulation of a clearer vision for Vac as a team.

From the beginning, our protocol development has been complemented by various client implementations of these protocols, +first in Nim, +but later also in JavaScript +and Go. +A follow-up post will clarify the purposes, similarities and differences between these three clients. +The Nim client, is our reference implementation, +developed by the research team in parallel with the specs +and building on a home-grown implementation of libp2p. +The Nim client is suitable to run as a standalone adaptive node, +managed by individual operators +or as an encapsulated service node in other applications. +This post looks at some recent developments within the Nim client.

1. nim-waku is now known as nwaku

Pronounced NWHA-koo. +You may already have seen us refer to "nwaku" on Vac communication channels, +but it is now official: +The nim-waku Waku v2 client has been named nwaku. +Why? Well, we needed a recognizable name for our client that could easily be referred to in everyday conversations +and nim-waku just didn't roll off the tongue. +We've followed the example of the closely related nimbus project to find a punchier name +that explicitly links the client to both the Waku set of protocols and the Nim language.

2. Improvements in stability and performance

The initial implementation of Waku v2 demonstrated how the suite of protocols can be applied +to form a generalized, peer-to-peer messaging network, +while addressing a wide range of adaptive requirements. +This allowed us to lift several protocol specifications from raw to draft status, +indicating that a reference implementation exists for each. +However, as internal dogfooding increased and more external applications started using nwaku, +we stepped up our focus on the client's stability and performance. +This is especially true where we want nwaku to run unsupervised in a production environment +without any degradation in the services it provides.

Some of the more significant productionization efforts over the last couple of months included:

  1. Reworking the store implementation to maintain stable memory usage +while storing historical messages +and serving multiple clients querying history simultaneously. +Previously, a store node would see gradual service degradation +due to inefficient memory usage when responding to history queries. +Queries that often took longer than 8 mins now complete in under 100 ms.

  2. Improved peer management. +For example, filter nodes will now remove unreachable clients after a number of connection failures, +whereas they would previously keep accumulating dead peers.

  3. Improved disk usage. +nwaku nodes that persist historical messages on disk now manage their own storage size based on the --store-capacity. +This can significantly improve node start-up times.

More stability issues may be addressed in future as nwaku matures, +but we've noticed a marked improvement in the reliability of running nwaku nodes. +These include environments where nwaku nodes are expected to run with a long uptime. +Vac currently operates two long-running fleets of nwaku nodes, wakuv2.prod and wakuv2.test, +for internal dogfooding and +to serve as experimental bootstrapping nodes. +Status has also recently deployed similar fleets for production and testing based on nwaku. +Our goal is to have nwaku be stable, performant and flexible enough +to be an attractive option for operators to run and maintain their own Waku v2 nodes. +See also the future work section below for more on our general goal of nwaku for operators.

3. Improvements in interoperability

We've implemented several features that improve nwaku's usability in different environments +and its interoperability with other Waku v2 clients. +One major step forward here was adding support for both secure and unsecured WebSocket connections as libp2p transports. +This allows direct connectivity with js-waku +and paves the way for native browser usage. +We've also added support for parsing and resolving DNS-type multiaddrs, +i.e. multiaddress protocol schemes dns, dns4, dns6 and dnsaddr. +A nwaku node can now also be configured with its own IPv4 DNS domain name +allowing dynamic IP address allocation without impacting a node's reachability by its peers.

4. Peer discovery

Peer discovery is the method by which nodes become aware of each other’s existence. +The question of peer discovery in a Waku v2 network has been a focus area since the protocol was first conceptualized. +Since then several different approaches to discovery have been proposed and investigated. +We've implemented three discovery mechanisms in nwaku so far:

DNS-based discovery

nwaku nodes can retrieve an authenticated, updateable list of peers via DNS to bootstrap connection to a Waku v2 network. +Our implementation is based on EIP-1459.

GossipSub peer exchange

GossipSub Peer Exchange (PX) is a GossipSub v1.1 mechanism +whereby a pruning peer may provide a pruned peer with a set of alternative peers +where it can connect to reform its mesh. +This is a very suitable mechanism to gradually discover more peers +from an initial connection to a small set of bootstrap peers. +It is enabled in a nwaku node by default.

Waku Node Discovery Protocol v5

This is a DHT-based discovery mechanism adapted to store and relay node records. +Our implementation is based on Ethereum's Discovery v5 protocol +with some minor modifications to isolate our discovery network from that of Ethereum. +The decision to separate the Waku Discovery v5 network from Ethereum's was made on considerations of lookup efficiency. +This comes at a possible tradeoff in network resilience. +We are considering merging with the Ethereum Discovery v5 network in future, +or even implement a hybrid solution. +This post explains the decision and future steps.

5. Spam protection using RLN

An early addition to our suite of protocols was an extension of 11/WAKU-RELAY +that provided spam protection using Rate Limiting Nullifiers (RLN). +The nwaku client now contains a working demonstration and integration of RLN relay. +Check out this tutorial to see the protocol in action using a toy chat application built on nwaku. +We'd love for people to join us in dogfooding RLN spam protection as part of our operator incentive testnet. +Feel free to join our Vac Discord server +and head to the #rln channel for more information.

Future work

As we continue working towards our goal of a fully decentralized, generalized and censorship-resistant messaging protocol, +these are some of the current and future focus areas for nwaku:

Reaching out to operators:

We are starting to push for operators to run and maintain their own Waku v2 nodes, +preferably contributing to the default Waku v2 network as described by the default pubsub topic (/waku/2/default-waku/proto). +Amongst other things, a large fleet of stable operator-run Waku v2 nodes will help secure the network, +provide valuable services to a variety of applications +and ensure the future sustainability of both Vac as a research organization and the Waku suite of protocols.

We are targeting nwaku as the main option for operator-run nodes.
+Specifically, we aim to provide through nwaku:

  1. a lightweight and robust Waku v2 client. +This client must be first in line to support innovative and new Waku v2 protocols, +but configurable enough to serve the adaptive needs of various operators.
  2. an easy-to-follow guide for operators to configure, +set up and maintain their own nodes
  3. a set of operator-focused tools to monitor and maintain a running node

Better conversational security layer guarantees

Conversational security guarantees in Waku v2 are currently designed around the Status application. +Developers building their own applications on top of Waku would therefore +either have to reimplement a set of tools similar to Status +or build their own security solutions on the application layer above Waku. +We are working on a set of features built into Waku +that will provide the general security properties Waku users may desire +and do so in a modern and simple way. +This is useful for applications outside of Status that want similar security guarantees. +As a first step, we've already made good progress toward integrating noise handshakes as a key exchange mechanism in Waku v2.

Protocol incentivization

We want to design incentivization around our protocols to encourage desired behaviors in the Waku network, +rewarding nodes providing costly services +and punishing adversarial actions. +This will increase the overall security of the network +and encourage operators to run their own Waku nodes. +In turn, the sustainability of Vac as an organization will be better guaranteed. +As such, protocol incentivization was a major focus in our recent Vac Sustainability and Business Workshop. +Our first step here is to finish integrating RLN relay into Waku +with blockchain interaction to manage members, +punish spammers +and reward spam detectors. +After this, we want to design monetary incentivization for providers of store, lightpush and filter services. +This may also tie into a reputation mechanism for service nodes based on a network-wide consensus on service quality. +A big challenge for protocol incentivization is doing it in a private fashion, +so we can keep similar metadata protection guarantees as the Waku base layer. +This ties into our focus on Zero Knowledge tech.

Improved store capacity

The nwaku store currently serves as an efficient in-memory store for historical messages, +dimensioned by the maximum number of messages the store node is willing to keep. +This makes the nwaku store appropriate for keeping history over a short term +without any time-based guarantees, +but with the advantage of providing fast responses to history queries. +Some applications, such as Status, require longer-term historical message storage +with time-based dimensioning +to guarantee that messages will be stored for a specified minimum period. +Because of the relatively high cost of memory compared to disk space, +a higher capacity store, with time guarantees, should operate as a disk-only database of historical messages. +This is an ongoing effort.

Multipurpose discovery

In addition to the three discovery methods already implemented in nwaku, +we are working on improving discovery on at least three fronts:

Capability discovery:

Waku v2 nodes may be interested in peers with specific capabilities, for example:

  1. peers within a specific pubsub topic mesh,
  2. peers with store capability,
  3. store peers with x days of history for a specific content topic, etc.

Capability discovery entails mechanisms by which such capabilities can be advertised and discovered/negotiated. +One major hurdle to overcome is the increased complexity of finding a node with specific capabilities within the larger network (a needle in a haystack). +See the original problem statement for more.

Improvements in Discovery v5

Of the implemented discovery methods, +Discovery v5 best addresses our need for a decentralized and scalable discovery mechanism. +With the basic implementation done, +there are some improvements planned for Discovery v5, +including methods to increase security such as merging with the Ethereum Discovery v5 network, +introducing explicit NAT traversal +and utilizing topic advertisement. +The Waku v2 Discovery v5 Roadmap contains more details.

Generalized peer exchange

nwaku already implements GossipSub peer exchange. +We now need a general request-response mechanism outside of GossipSub +by which a node may learn about other Waku v2 nodes +by requesting and receiving a list of peers from a neighbor. +This could, for example, be a suitable way for resource-restricted devices to request a stronger peer +to perform a random Discovery v5 lookup on their behalf +or simply to be informed of a subset of the peers known to that neighbor. +See this issue for more.


This concludes a general outline of some of the main recent developments in the nwaku client +and a summary of the current and future focus areas. +Much more is happening behind the scenes, of course, +so for more information, or to join the conversation, +feel free to join our Vac Discord server +or to check out the nwaku repo on Github. +You can also view the changelog for past releases here.

References

]]>
+
+ + <![CDATA[Opinion: Pseudo-ethics in the Surveillance Tech Industry]]> + https://vac.dev/rlog/ethics-surveillance-tech + https://vac.dev/rlog/ethics-surveillance-tech + Fri, 03 Dec 2021 10:00:00 GMT + + A look at typical ethical shortfalls in the global surveillance tech industry.

This is an opinion piece by pseudonymous contributor, circe.

Preface

The Vac team aims to provide a public good in the form of freely available, open source tools and protocols for decentralized communication. +As such, we value our independence and the usefulness of our protocols for a wide range of applications. +At the same time, we realize that all technical development, including ours, has a moral component. +As a diverse team we are guided by a shared devotion to the principles of human rights and liberty. +This explains why we place such a high premium on security, censorship-resistance and privacy - +a stance we share with the wider Status Network. +The post below takes a different approach from our usual more technical analyses, +by starting to peel back the curtain on the ethical shortfalls of the global surveillance tech industry.

Spotlight on an industry

Apple's announcement of their lawsuit against Israel's NSO Group +marks the latest in a series of recent setbacks for the surveillance tech company. +In early November, the United States blacklisted the firm, +citing concerns about the use of their spyware by foreign governments targeting civilians such as "journalists, businesspeople, activists" and more. +The company is already embroiled in a lawsuit with Whatsapp +over their exploit of the chat app's video calling service to install malware on target devices. +NSO Group's most infamous product, Pegasus, operates as a hidden exploit installed on victims' mobile phones, +sometimes without even requiring as much as an unguarded click on a malicious link. +It has the potential to lay bare, and report to its owners, everything within the reach of the infected device. +For most people this amounts to a significant portion of their private lives and thoughts. +Pegasus can read your private messages (even encrypted), collect your passwords, record calls, track your location and access your device's microphone and camera. +No activity or application on an infected phone would be hidden.

The latest controversies are perhaps less because of the novelty of the revelations - +the existence of Pegasus has been known to civil activists since at least 2016. +Rather, the public was reminded again of the potential scope of surveillance tech +in the indiscriminate use of Pegasus on private citizens. +This has far-reaching implications for human freedoms worldwide. +Earlier this year, a leaked list of over 50,000 targets, or possible targets, of Pegasus included +the phone numbers of human rights advocates, independent journalists, lawyers and political activists. +This should have come as no surprise. +The type of autocratically inclined agents, and governments, who would venture to buy and use such invasive cyber-arms often target those they find politically inconvenient. +Pegasus, and similar technologies, simply extend the reach and capacity of such individuals and governments - +no border or distance, no political rank or social advantage, no sanctity of profession or regard for dignity, +provide any indemnity from becoming a victim. +Your best hope is to remain uninteresting enough to escape consideration.

The NSO Group has, of course, denied allegations of culpability and questions the authenticity of the list. +At this stage, the latter is almost beside the point: +Amnesty International's cybersecurity team, Security Lab, did find forensic evidence of Pegasus on the phones of several volunteers whose numbers appeared on the original list, +including those of journalists and human rights activists. +(Security Lab has since opened up their infection finding tool to the public.) +French intelligence has similarly inspected and confirmed infection of at least three devices belonging to journalists. +The phones of several people who were close to the Saudi-American journalist, Jamal Khashoggi, were confirmed hacked +both before and after Khashoggi's brutal murder at the Saudi embassy in Istanbul in 2018. +More reports of confirmed Pegasus hacks are still published with some regularity. +It is now an open secret that many authoritarian governments have bought Pegasus. +It's not difficult to extrapolate from existing reports and such clients' track records +what the potential injuries to human freedoms are that they can inflict with access to such a powerful cyberweapon.

A typical response

NSO's response to the allegations follows a textbook approach +of avoiding earnest ethical introspection on the manufacturing, and selling, of cyber-arms. +Firstly, shift ethical responsibility to a predetermined process, a list of checkboxes of your own making. +The Group, for example, claims to sell only to "vetted governments", following a classification process +of which they have now published some procedural details but no tangible criteria. +The next step is to reaffirm continuously, and repetitively, your dedication to the legal combat against crime, +"legitimate law enforcement agencies" (note the almost tautological phrasing), +adherence to international arms trade laws, +compliance clauses in customer contracts, etc. +Thirdly, having been absolved of any moral suspicions that might exist about product and process, +from conception to engineering to trade, +distance yourself from the consequences of its use in the world. +"NSO does not operate its technology, does not collect, nor possesses, nor has any access to any kind of data of its customers." +It is interesting that directly after this statement they claim with contradictory confidence that +their "technology was not associated in any way with the heinous murder of Jamal Khashoggi". +The unapologetic tone seems hardly appropriate when the same document confirms that the Group had to +shut down customers' systems due to "confirmed misuse" and have had to do so "multiple times" in the past. +Given all this, the response manages to evade any serious interrogation of the "vetting" process itself, +which forced the company to reject "approximately 15% of potential new opportunities for Pegasus" in one year. +Courageous.

We have heard this all before. +There exists a multi-billion dollar industry of private companies and engineering firms thriving on proceeds from +selling surveillance tools and cyber-arms to dubious agencies and foreign governments. +In turn, the most power-hungry and oppressive regimes often rely on such technological innovations - +for which they lack the in-country engineering expertise - +to maintain control, suppress uprisings, intimidate opposing journalists, and track their citizens. +It's a lucrative business opportunity, and resourceful companies have sprung up everywhere to supply this demand, +often in countries where citizens, including employees of the company, would be horrified if they were similarly subject to the oppressions of their own products. +When, in 2014, Italy's HackingTeam were pulsed by the United Nations about their (then alleged) selling of spyware to Sudan, +which would have been a contravention of the UN's weapon export ban, +they simply replied that their product was not controlled as a weapon and therefore not subject to such scrutiny. +They remained within their legal bounds, technically. +Furthermore, they similarly shifted ethical responsibility to external standards of legitimacy, +claiming their "software is not sold to governments that are blacklisted by the EU, the US, NATO, and similar international organizations". +When the company themselves were hacked in 2015, +revelations (confirmations, that is) of widespread misuse by repressive governments were damaging enough to force them to disappear and rebrand as Memento Labs. +Their website boasts an impressive list of statutes, regulations, procedures, export controls and legal frameworks, +all of which the rebranded hackers proudly comply with. +Surely no further ethical scrutiny is necessary?

Ethics != the law

The law is trailing behind

Such recourse to the legality of your action as ethical justification is moot for several reasons. +The first is glaringly obvious - +our laws are ill-equipped to address the implications of modern technology. +Legal systems are a cumbersome inheritance built over generations. +This is especially true of the statutes and regulations governing international trade, behind which these companies so often hide. +Our best legal systems are trailing miles behind the technology for which we seek guidelines. +Legislators are still struggling to make sense of technologies like face recognition, +the repercussions of smart devices acting "on their own" and biases in algorithms. +To claim you are performing ethical due diligence by resorting to an outdated and incomplete system of legal codes is disingenuous.

The law depends on ethics

The second reason is more central to my argument, +and an important flaw in these sleight of hand justifications appearing from time to time in the media. +Ethics can in no way be confused as synonymous with legality or legitimacy. +These are incommensurable concepts. +In an ideal world, of course, the law is meant to track the minimum standards of ethical conduct in a society. +Laws are often drafted exactly from some ethical, and practical, impulse to minimize harmful conduct +and provide for corrective and punitive measures where transgressions do occur. +The law, however, has a much narrower scope than ethics. +It can be just or unjust. +In fact, it is in need of ethics to constantly reform. +Ethics and values are born out of collective self-reflection. +It develops in our conversation with ourselves and others about the type of society we strive for. +As such, an ethical worldview summarizes our deepest intuitions about how we should live and measure our impact on the world. +For this reason, ethics is primarily enforced by social and internal pressures, not legal boundaries - +our desire to do what ought to be done, however we define that. +Ethics is therefore a much grander scheme than global legal systems +and the diplomatic frameworks that grants legitimacy to governments. +These are but one limited outflow of the human aspiration to form societies in accordance with our ideologies and ethics.

International law is vague and exploitable

Of course, the cyber-arms trade has a favorite recourse, international law, which is even more limited. +Since such products are seldomly sold to governments and agencies within the country of production, +it enables a further distancing from consequences. +Many private surveillance companies are based in fairly liberal societies with (seemingly) strict emphases on human rights in their domestic laws. +International laws are much more complicated - for opportunists a synonym for "more grey areas in which to hide". +Company conduct can now be governed, and excused, by a system that follows +the whims of autocrats with exploitative intent and vastly different ethical conceptions from the company's purported aims. +International law, and the ways it is most often enforced by way of, say, UN-backed sanctions, +have long been shaped by the compromises of international diplomacy. +To be blunt: these laws are weak and subject to exactly the sort of narrow interests behind which mercenaries have always hidden. +The surveillance tech industry is no exception.

Conclusion

My point is simple: +selling cyber-arms with the potential to become vast tools of oppression to governments and bodies with blatant histories of human rights violations, +and all but the publicly announced intention to continue operating in this way, +is categorically unconscionable. +This seems obvious no matter what ethics system you argue from, +provided it harbors any consideration for human dignity and freedom. +It is a sign of poor moral discourse that such recourses to law and legitimacy are often considered synonymous with ethical justification. +"I have acted within the bounds of law", "We supply only to legitimate law enforcement agencies", etc. are no substitutes. +Ethical conduct requires an honest evaluation of an action against some conception of "the good", +however you define that. +Too often the surveillance tech industry precisely sidesteps this question, +both in internal processes and external rationalisations to a concerned public.

John Locke, he of the life-liberty-and-property, articulated the idea that government exists solely through the consent of the governed. +Towards the end of the 17th century, he wrote in his Second Treatise on Civil Government, +"[w]henever legislators endeavor to take away, +and destroy the property of the people, or to reduce them to slavery under arbitrary power, +they put themselves in a state of war with the people, who are thereupon absolved from any further obedience". +The inference is straightforward and humanist in essence: +legitimacy is not something that is conferred by governments and institutions. +Rather, they derive their legitimacy from us, their citizens, holding them to standards of ethics and societal ideals. +This legitimacy only remains in tact as long as this mandate is honored and continuously extended by a well-informed public. +This is the principle of informed consent on which all reciprocal ethics is based.

The surveillance tech industry may well have nothing more or less noble in mind than profit-making within legal bounds +when developing and selling their products. +However, when such companies are revealed again and again to have supplied tools of gross human rights violations to known human rights violators, +they will do well to remember that ethics always precedes requirements of legality and legitimacy. +It is a fallacy to take normative guidance from the concept of "legitimacy" +if the concept itself depends on such normative guidelines for definition. +Without examining the ethical standards by which institutions, governments, and laws, were created, +no value-judgements about their legitimacy can be made. +Hiding behind legal compliance as substitute for moral justification is not enough. +Targets of increasingly invasive governmental snooping are too often chosen precisely to suppress the mechanisms from which the legitimacy of such governments flow - +the consent of ordinary civilians. +Free and fair elections, free speech, free media, freedom of thought are all at risk.

References

]]>
+
+ + <![CDATA[Waku v1 vs Waku v2: Bandwidth Comparison]]> + https://vac.dev/rlog/waku-v1-v2-bandwidth-comparison + https://vac.dev/rlog/waku-v1-v2-bandwidth-comparison + Wed, 03 Nov 2021 10:00:00 GMT + + A local comparison of bandwidth profiles showing significantly improved scalability in Waku v2 over Waku v1.

Background

The original plan for Waku v2 suggested theoretical improvements in resource usage over Waku v1, +mainly as a result of the improved amplification factors provided by GossipSub. +In its turn, Waku v1 proposed improvements over its predecessor, Whisper.

Given that Waku v2 is aimed at resource restricted environments, +we are specifically interested in its scalability and resource usage characteristics. +However, the theoretical performance improvements of Waku v2 over Waku v1, +has never been properly benchmarked and tested.

Although we're working towards a full performance evaluation of Waku v2, +this would require significant planning and resources, +if it were to simulate "real world" conditions faithfully and measure bandwidth and resource usage across different network connections, +robustness against attacks/losses, message latencies, etc. +(There already exists a fairly comprehensive evaluation of GossipSub v1.1, +on which 11/WAKU2-RELAY is based.)

As a starting point, +this post contains a limited and local comparison of the bandwidth profile (only) between Waku v1 and Waku v2. +It reuses and adapts existing network simulations for Waku v1 and Waku v2 +and compares bandwidth usage for similar message propagation scenarios.

Theoretical improvements in Waku v2

Messages are propagated in Waku v1 using flood routing. +This means that every peer will forward every new incoming message to all its connected peers (except the one it received the message from). +This necessarily leads to unnecessary duplication (termed amplification factor), +wasting bandwidth and resources. +What's more, we expect this effect to worsen the larger the network becomes, +as each connection will receive a copy of each message, +rather than a single copy per peer.

Message routing in Waku v2 follows the libp2p GossipSub protocol, +which lowers amplification factors by only sending full message contents to a subset of connected peers. +As a Waku v2 network grows, each peer will limit its number of full-message ("mesh") peerings - +libp2p suggests a maximum of 12 such connections per peer. +This allows much better scalability than a flood-routed network. +From time to time, a Waku v2 peer will send metadata about the messages it has seen to other peers ("gossip" peers).

See this explainer for a more detailed discussion.

Methodology

The results below contain only some scenarios that provide an interesting contrast between Waku v1 and Waku v2. +For example, star network topologies do not show a substantial difference between Waku v1 and Waku v2. +This is because each peer relies on a single connection to the central node for every message, +which barely requires any routing: +each connection receives a copy of every message for both Waku v1 and Waku v2. +Hybrid topologies similarly show only a difference between Waku v1 and Waku v2 for network segments with mesh-like connections, +where routing decisions need to be made.

For this reason, the following approach applies to all iterations:

  1. Simulations are run locally. +This limits the size of possible scenarios due to local resource constraints, +but is a way to quickly get an approximate comparison.
  2. Nodes are treated as a blackbox for which we only measure bandwidth, +using an external bandwidth monitoring tool. +In other words, we do not consider differences in the size of the envelope (for v1) or the message (for v2).
  3. Messages are published at a rate of 50 new messages per second to each network, +except where explicitly stated otherwise.
  4. Each message propagated in the network carries 8 bytes of random payload, which is encrypted. +The same symmetric key cryptographic algorithm (with the same keys) are used in both Waku v1 and v2.
  5. Traffic in each network is generated from 10 nodes (randomly-selected) and published in a round-robin fashion to 10 topics (content topics for Waku v2). +In practice, we found no significant difference in average bandwidth usage when tweaking these two parameters (the number of traffic generating nodes and the number of topics).
  6. Peers are connected in a decentralized full mesh topology, +i.e. each peer is connected to every other peer in the network. +Waku v1 is expected to flood all messages across all existing connections. +Waku v2 gossipsub will GRAFT some of these connections for full-message peerings, +with the rest being gossip-only peerings.
  7. After running each iteration, we verify that messages propagated to all peers (comparing the number of published messages to the metrics logged by each peer).

For Waku v1, nodes are configured as "full" nodes (i.e. with full bloom filter), +while Waku v2 nodes are relay nodes, all subscribing and publishing to the same PubSub topic.

Network size comparison

Iteration 1: 10 nodes

Let's start with a small network of 10 nodes only and see how Waku v1 bandwidth usage compares to that of Waku v2. +At this small scale we don't expect to see improved bandwidth usage in Waku v2 over Waku v1, +since all connections, for both Waku v1 and Waku v2, will be full-message connections. +The number of connections is low enough that Waku v2 nodes will likely GRAFT all connections to full-message peerings, +essentially flooding every message on every connection in a similar fashion to Waku v1. +If our expectations are confirmed, it helps validate our methodology, +showing that it gives more or less equivalent results between Waku v1 and Waku v2 networks.

Sure enough, the figure shows that in this small-scale setup, +Waku v1 actually has a lower per-peer bandwidth usage than Waku v2. +One reason for this may be the larger overall proportion of control messages in a gossipsub-routed network such as Waku v2. +These play a larger role when the total network traffic is comparatively low, as in this iteration. +Also note that the average bandwidth remains more or less constant as long as the rate of published messages remains stable.

Iteration 2: 30 nodes

Now, let's run the same scenario for a larger network of highly-connected nodes, this time consisting of 30 nodes. +At this point, the Waku v2 nodes will start pruning some connections to limit the number of full-message peerings (to a maximum of 12), +while the Waku v1 nodes will continue flooding messages to all connected peers. +We therefore expect to see a somewhat improved bandwidth usage in Waku v2 over Waku v1.

Bandwidth usage in Waku v2 has increased only slightly from the smaller network of 10 nodes (hovering between 2000 and 3000 kbps). +This is because there are only a few more full-message peerings than before. +Compare this to the much higher increase in bandwidth usage for Waku v1, which now requires more than 4000 kbps on average.

Iteration 3: 50 nodes

For an even larger network of 50 highly connected nodes, +the divergence between Waku v1 and Waku v2 is even larger. +The following figure shows comparative average bandwidth usage for a throughput of 50 messages per second.

Average bandwidth usage (for the same message rate) has remained roughly the same for Waku v2 as it was for 30 nodes, +indicating that the number of full-message peerings per node has not increased.

Iteration 4: 85 nodes

We already see a clear trend in the bandwidth comparisons above, +so let's confirm by running the test once more for a network of 85 nodes. +Due to local resource constraints, the effective throughput for Waku v1 falls to below 50 messages per second, +so the v1 results below have been normalized and are therefore approximate. +The local Waku v2 simulation maintains the message throughput rate without any problems.

Iteration 5: 150 nodes

Finally, we simulate message propagation in a network of 150 nodes. +Due to local resource constraints, we run this simulation at a lower rate - +35 messages per second - +and for a shorter amount of time.

Notice how the Waku v1 bandwidth usage is now more than 10 times worse than that of Waku v2. +This is to be expected, as each Waku v1 node will try to flood each new message to 149 other peers, +while the Waku v2 nodes limit their full-message peerings to no more than 12.

Discussion

Let's summarize average bandwidth growth against network growth for a constant message propagation rate. +Since we are particularly interested in how Waku v1 compares to Waku v2 in terms of bandwidth usage, +the results are normalised to the Waku v2 average bandwidth usage for each network size.

Extrapolation is a dangerous game, +but it's safe to deduce that the divergence will only grow for even larger network topologies. +Although control signalling contributes more towards overall bandwidth for Waku v2 networks, +this effect becomes less noticeable for larger networks. +For network segments with more than ~18 densely connected nodes, +the advantage of using Waku v2 above Waku v1 becomes clear.

Network traffic comparison

The analysis above controls the average message rate while network size grows. +In reality, however, active users (and therefore message rates) are likely to grow in conjunction with the network. +This will have an effect on bandwidth for both Waku v1 and Waku v2, though not in equal measure. +Consider the impact of an increasing rate of messages in a network of constant size:

The rate of increase in bandwidth for Waku v2 is slower than that for Waku v1 for a corresponding increase in message propagation rate. +In fact, for a network of 30 densely-connected nodes, +if the message propagation rate increases by 1 per second, +Waku v1 requires an increased average bandwidth of almost 70kbps at each node. +A similar traffic increase in Waku v2 requires on average 40kbps more bandwidth per peer, just over half that of Waku v1.

Conclusions

  • Waku v2 scales significantly better than Waku v1 in terms of average bandwidth usage, +especially for densely connected networks.
  • E.g. for a network consisting of 150 or more densely connected nodes, +Waku v2 provides more than 10x better average bandwidth usage rates than Waku v1.
  • As the network continues to scale, both in absolute terms (number of nodes) and in network traffic (message rates) the disparity between Waku v2 and Waku v1 becomes even larger.

Future work

Now that we've confirmed that Waku v2's bandwidth improvements over its predecessor matches theory, +we can proceed to a more in-depth characterisation of Waku v2's resource usage. +Some questions that we want to answer include:

  • What proportion of Waku v2's bandwidth usage is used to propagate payload versus bandwidth spent on control messaging to maintain the mesh?
  • To what extent is message latency (time until a message is delivered to its destination) affected by network size and message rate?
  • How reliable is message delivery in Waku v2 for different network sizes and message rates?
  • What are the resource usage profiles of other Waku v2 protocols (e.g.12/WAKU2-FILTER and 19/WAKU2-LIGHTPUSH)?

Our aim is to get ever closer to a "real world" understanding of Waku v2's performance characteristics, +identify and fix vulnerabilities +and continually improve the efficiency of our suite of protocols.

References

]]>
+
+ + <![CDATA[[Talk at COSCUP] Vac, Waku v2 and Ethereum Messaging]]> + https://vac.dev/rlog/waku-v2-ethereum-coscup + https://vac.dev/rlog/waku-v2-ethereum-coscup + Fri, 06 Aug 2021 12:00:00 GMT + + Learn more about Waku v2, its origins, goals, protocols, implementation and ongoing research. Understand how it is used and how it can be useful for messaging in Ethereum.

This is the English version of a talk originally given in Chinese at COSCUP in Taipei.

video recording with Chinese and English subtitles.


Introduction

Hi everyone!

Today I'll talk to you about Waku v2. What it is, what problems it is solving, +and how it can be useful for things such as messaging in Ethereum. First, let me +start with some brief background.

Brief history and background

Back when Ethereum got started, there used to be this concept of the "holy +trinity". You had Ethereum for compute/consensus, Swarm for storage, and Whisper +for messaging. This is partly where the term Web3 comes from.

Status started out as an app with the goal of being a window onto Ethereum and +a secure messenger. As one of the few, if not the only, apps using Whisper in +production, not to mention on a mobile phone, we quickly realized there were +problems with the underlying protocols and infrastructure. Protocols such as +Whisper weren't quite ready for prime time yet when it came to things such as +scalability and working in the real world.

As we started addressing some of these challenges, and moved from app +developement to focusing on protocols, research and infrastructure, we created +Vac. Vac is an r&d unit doing protocol research focused on creating modular p2p +messaging protocols for private, secure, censorship resistant communication.

I won't go into too much detail on the issues with Whisper, if you are +interested in this check out this talk +here or this +article.

In a nutshell, we forked Whisper to address immediate shortcomings and this +became Waku v1. Waku v2 is complete re-thought implementation from scratch on top +of libp2p. This will be the subject of today's talk.

Waku v2

Overview

Waku v2 is a privacy-preserving peer-to-peer messaging protocol for resource +restricted devices. We can look at Waku v2 as several things:

  • Set of protocols
  • Set of implementations
  • Network of nodes

Let's first look at what the goals are.

Goals

Waku v2 provides a PubSub based messaging protocol with the following +characteristics:

  1. Generalized messaging. Applications that require a messaging protocol to +communicate human to human, machine to machine, or a mix.
  2. Peer-to-peer. For applications that require a p2p solution.
  3. Resource restricted. For example, running with limited bandwidth, being +mostly-offline, or in a browser.
  4. Privacy. Applications that have privacy requirements, such as pseudonymity, +metadata protection, etc.

And to provide these properties in a modular fashion, where applications can +choose their desired trade-offs.

Protocols

Waku v2 consists of several protocols. Here we highlight a few of the most +important ones:

  • 10/WAKU2 - main specification, details how all the pieces fit together
  • 11/RELAY - thin layer on top of GossipSub for message dissemination
  • 13/STORE - fetching of historical messages
  • 14/MESSAGE - message payload

This is the recommended subset for a minimal Waku v2 client.

In addition to this there are many other types of specifications at various +stages of maturity, such as: content based filtering, bridge mode to Waku v1, +JSON RPC API, zkSNARKS based spam protection with RLN, accounting and +settlements with SWAP, fault-tolerant store nodes, recommendations around topic +usage, and more.

See https://rfc.vac.dev/ for a full overview.

Implementations

Waku v2 consists of multiple implementations. This allows for client diversity, +makes it easier to strengthen the protocols, and allow people to use Waku v2 in +different contexts.

  • nim-waku - the reference client written in Nim, most full-featured.
  • js-waku - allow usage of Waku v2 from browsers, focus on interacting with dapps.
  • go-waku - subset of Waku v2 to ease integration into the Status app.

Testnet Huilong and dogfooding

In order to test the protocol we have setup a testnet across all implementations +called Huilong. Yes, that's the Taipei subway station!

Among us core devs we have disabled the main #waku Discord channel used for +development, and people run their own node connected to this toy chat application.

Feel free to join and say hi! Instructions can be found here:

Research

While Waku v2 is being used today, we are actively researching improvements. +Since the design is modular, we can gracefully introduce new capabilities. Some +of these research areas are:

  • Privacy-preserving spam protection using zkSNARKs and RLN
  • Accounting and settlement of resource usage to incentivize nodes to provide services with SWAP
  • State synchronization for store protocol to make it easier to run a store node without perfect uptime
  • Better node discovery
  • More rigorous privacy analysis
  • Improving interaction with wallets and dapp

Use cases

Let's look at where Waku v2 is and can be used.

Prelude: Topics in Waku v2

To give some context, there are two different types of topics in Waku v2. One is +a PubSub topic, for routing. The other is a content topic, which is used for +content based filtering. Here's an example of the default PubSub topic:

/waku/2/default-waku/proto

This is recommended as it increases privacy for participants and it is stored by +default, however this is up to the application.

The second type of topic is a content topic, which is application specific. For +example, here's the content topic used in our testnet:

/toychat/2/huilong/proto

For more on topics, see https://rfc.vac.dev/spec/23/

Status app

In the Status protocol, content topics - topics in Whisper/Waku v1 - are used for several things:

  • Contact code topic to discover X3DH bundles for perfect forward secrecy
    • Partitioned into N (currently 5000) content topics to balance privacy with efficiency
  • Public chats correspond to hash of the plaintext name
  • Negotiated topic for 1:1 chat with DHKE derived content topic

See more here https://specs.status.im/spec/10

Currently, Status app is in the process of migrating to and testing Waku v2.

DappConnect: Ethereum messaging

It is easy to think of Waku as being for human messaging, since that's how it is +primarily used in the Status app, but the goal is to be useful for generalized +messaging, which includes Machine-To-Machine (M2M) messaging.

Recall the concept of the holy trinity with Ethereum/Swarm/Whisper and Web3 that +we mentioned in the beginning. Messaging can be used as a building block for +dapps, wallets, and users to communicate with each other. It can be used for +things such as:

  • Multisig and DAO vote transactions only needing one on-chain operation
  • Giving dapps ability to send push notifications to users
  • Giving users ability to directly respond to requests from dapps
  • Decentralized WalletConnect
  • Etc

Basically anything that requires communication and doesn't have to be on-chain.

WalletConnect v2

WalletConnect is an open protocol for connecting dapps to wallets with a QR +code. Version 2 is using Waku v2 as a communication channel to do so in a +decentralized and private fashion.

See for more: https://docs.walletconnect.org/v/2.0/tech-spec

WalletConnect v2 is currently in late alpha using Waku v2.

More examples

  • Gasless voting and vote aggregation off-chain
  • Dapp games using Waku as player discovery mechanism
  • Send encrypted message to someone with an Ethereum key
  • <Your dapp here>

These are all things that are in progress / proof of concept stage.

Contribute

We'd love to see contributions of any form!

Conclusion

In this talk we've gone over the original vision for Web3 and how Waku came to +be. We've also looked at what Waku v2 aims to do. We looked at its protocols, +implementations, the current testnet as well as briefly on some ongoing +research for Vac.

We've also looked at some specific use cases for Waku. First we looked at how +Status uses it with different topics. Then we looked at how it can be useful for +messaging in Ethereum, including for things like WalletConnect.

I hope this talk gives you a better idea of what Waku is, why it exists, and +that it inspires you to contribute, either to Waku itself or by using it in your +own project!

]]>
+
+ + <![CDATA[Presenting JS-Waku: Waku v2 in the Browser]]> + https://vac.dev/rlog/presenting-js-waku + https://vac.dev/rlog/presenting-js-waku + Fri, 04 Jun 2021 12:00:00 GMT + + JS-Waku is bringing Waku v2 to the browser. Learn what we achieved so far and what is next in our pipeline!

For the past 3 months, we have been working on bringing Waku v2 to the browser. +Our aim is to empower dApps with Waku v2, and it led to the creation of a new library. +We believe now is good time to introduce it!

Waku v2

First, let's review what Waku v2 is and what problem it is trying to solve.

Waku v2 comes from a need to have a more scalable, better optimised solution for the Status app to achieve decentralised +communications on resource restricted devices (i.e., mobile phones).

The Status chat feature was initially built over Whisper. +However, Whisper has a number of caveats which makes it inefficient for mobile phones. +For example, with Whisper, all devices are receiving all messages which is not ideal for limited data plans.

To remediate this, a Waku mode (then Waku v1), based on devp2p, was introduced. +To further enable web and restricted resource environments, Waku v2 was created based on libp2p. +The migration of the Status chat feature to Waku v2 is currently in progress.

We see the need of such solution in the broader Ethereum ecosystem, beyond Status. +This is why we are building Waku v2 as a decentralised communication platform for all to use and build on. +If you want to read more about Waku v2 and what it aims to achieve, +checkout What's the Plan for Waku v2?.

Since last year, we have been busy defining and implementing Waku v2 protocols in nim-waku, +from which you can build wakunode2. +Wakunode2 is an adaptive and modular Waku v2 node, +it allows users to run their own node and use the Waku v2 protocols they need. +The nim-waku project doubles as a library, that can be used to add Waku v2 support to native applications.

Waku v2 in the browser

We believe that dApps and wallets can benefit from the Waku network in several ways. +For some dApps, it makes sense to enable peer-to-peer communications. +For others, machine-to-machine communications would be a great asset. +For example, in the case of a DAO, +Waku could be used for gas-less voting. +Enabling the DAO to notify their users of a new vote, +and users to vote without interacting with the blockchain and spending gas.

Murmur was the first attempt to bring Whisper to the browser, +acting as a bridge between devp2p and libp2p. +Once Waku v2 was started and there was a native implementation on top of libp2p, +a chat POC was created to demonstrate the potential of Waku v2 +in web environment. +It showed how using js-libp2p with few modifications enabled access to the Waku v2 network. +There was still some unresolved challenges. +For example, nim-waku only support TCP connections which are not supported by browser applications. +Hence, to connect to other node, the POC was connecting to a NodeJS proxy application using websockets, +which in turn could connect to wakunode2 via TCP.

However, to enable dApp and Wallet developers to easily integrate Waku in their product, +we need to give them a library that is easy to use and works out of the box: +introducing JS-Waku.

JS-Waku is a JavaScript library that allows your dApp, wallet or other web app to interact with the Waku v2 network. +It is available right now on npm:

npm install js-waku.

As it is written in TypeScript, types are included in the npm package to allow easy integration with TypeScript, ClojureScript and other typed languages that compile to JavaScript.

Key Waku v2 protocols are already available: +message, store, relay and light push, +enabling your dApp to:

  • Send and receive near-instant messages on the Waku network (relay),
  • Query nodes for messages that may have been missed, e.g. due to poor cellular network (store),
  • Send messages with confirmations (light push).

JS-Waku needs to operate in the same context from which Waku v2 was born: +a restricted environment were connectivity or uptime are not guaranteed; +JS-Waku brings Waku v2 to the browser.

Achievements so far

We focused the past month on developing a ReactJS Chat App. +The aim was to create enough building blocks in JS-Waku to enable this showcase web app that +we now use for dogfooding purposes.

Most of the effort was on getting familiar with the js-libp2p library +that we heavily rely on. +JS-Waku is the second implementation of Waku v2 protocol, +so a lot of effort on interoperability was needed. +For example, to ensure compatibility with the nim-waku reference implementation, +we run our tests against wakunode2 as part of the CI.

This interoperability effort helped solidify the current Waku v2 specifications: +By clarifying the usage of topics +(#327, #383), +fix discrepancies between specs and nim-waku +(#418, #419) +and fix small nim-waku & nim-libp2p bugs +(#411, #439).

To fully access the waku network, JS-Waku needs to enable web apps to connect to nim-waku nodes. +A standard way to do so is using secure websockets as it is not possible to connect directly to a TCP port from the browser. +Unfortunately websocket support is not yet available in nim-libp2p so +we ended up deploying websockify alongside wakunode2 instances.

As we built the web chat app, +we were able to fine tune the API to provide a simple and succinct interface. +You can start a node, connect to other nodes and send a message in less than ten lines of code:

import { Waku } from 'js-waku'

const waku = await Waku.create({})

const nodes = await getStatusFleetNodes()
await Promise.all(nodes.map((addr) => waku.dial(addr)))

const msg = WakuMessage.fromUtf8String(
'Here is a message!',
'/my-cool-app/1/my-use-case/proto',
)
await waku.relay.send(msg)

We have also put a bounty at 0xHack for using JS-Waku +and running a workshop. +We were thrilled to have a couple of hackers create new software using our libraries. +One of the projects aimed to create a decentralised, end-to-end encrypted messenger app, +similar to what the ETH-DM protocol aims to achieve. +Another project was a decentralised Twitter platform. +Such projects allow us to prioritize the work on JS-Waku and understand how DevEx can be improved.

As more developers use JS-Waku, we will evolve the API to allow for more custom and fine-tune usage of the network +while preserving this out of the box experience.

What's next?

Next, we are directing our attention towards Developer Experience. +We already have documentation available but we want to provide more: +Tutorials, various examples +and showing how JS-Waku can be used with Web3.

By prioritizing DevEx we aim to enable JS-Waku integration in dApps and wallets. +We think JS-Waku builds a strong case for machine-to-machine (M2M) communications. +The first use cases we are looking into are dApp notifications: +Enabling dApp to notify their user directly in their wallets! +Leveraging Waku as a decentralised infrastructure and standard so that users do not have to open their dApp to be notified +of events such as DAO voting.

We already have some POC in the pipeline to enable voting and polling on the Waku network, +allowing users to save gas by not broadcasting each individual vote on the blockchain.

To facilitate said applications, we are looking at improving integration with Web3 providers by providing examples +of signing, validating, encrypting and decrypting messages using Web3. +Waku is privacy conscious, so we will also provide signature and encryption examples decoupled from users' Ethereum identity.

As you can read, we have grand plans for JS-Waku and Waku v2. +There is a lot to do, and we would love some help so feel free to +check out the new role in our team: +js-waku: Wallet & Dapp Integration Developer. +We also have a number of positions open to work on Waku protocol and nim-waku.

If you are as excited as us by JS-Waku, why not build a dApp with it? +You can find documentation on the npmjs page.

Whether you are a developer, you can come chat with us using WakuJS Web Chat +or chat2. +You can get support in #dappconnect-support on Vac Discord or Telegram. +If you have any ideas on how Waku could enable a specific dapp or use case, do share, we are always keen to hear it.

]]>
+
+ + <![CDATA[Privacy-preserving p2p economic spam protection in Waku v2]]> + https://vac.dev/rlog/rln-relay + https://vac.dev/rlog/rln-relay + Fri, 05 Mar 2021 12:00:00 GMT + + This post is going to give you an overview of how spam protection can be achieved in Waku Relay through rate-limiting nullifiers. We will cover a summary of spam-protection methods in centralized and p2p systems, and the solution overview and details of the economic spam-protection method. The open issues and future steps are discussed in the end.

Introduction

This post is going to give you an overview of how spam protection can be achieved in Waku Relay protocol2 through Rate-Limiting Nullifiers3 4 or RLN for short.

Let me give a little background about Waku(v2)1. Waku is a privacy-preserving peer-to-peer (p2p) messaging protocol for resource-restricted devices. Being p2p means that Waku relies on No central server. Instead, peers collaboratively deliver messages in the network. Waku uses GossipSub16 as the underlying routing protocol (as of the writeup of this post). At a high level, GossipSub is based on publisher-subscriber architecture. That is, peers, congregate around topics they are interested in and can send messages to topics. Each message gets delivered to all peers subscribed to the topic. In GossipSub, a peer has a constant number of direct connections/neighbors. In order to publish a message, the author forwards its message to a subset of neighbors. The neighbors proceed similarly till the message gets propagated in the network of the subscribed peers. The message publishing and routing procedures are part of the Waku Relay17 protocol. +

Figure 1: An overview of privacy-preserving p2p economic spam protection in Waku v2 RLN-Relay protocol.

What do we mean by spamming?

In centralized messaging systems, a spammer usually indicates an entity that uses the messaging system to send an unsolicited message (spam) to large numbers of recipients. However, in Waku with a p2p architecture, spam messages not only affect the recipients but also all the other peers involved in the routing process as they have to spend their computational power/bandwidth/storage capacity on processing spam messages. As such, we define a spammer as an entity that uses the messaging system to publish a large number of messages in a short amount of time. The messages issued in this way are called spam. In this definition, we disregard the intention of the spammer as well as the content of the message and the number of recipients.

Possible Solutions

Has the spamming issue been addressed before? Of course yes! Here is an overview of the spam protection techniques with their trade-offs and use-cases. In this overview, we distinguish between protection techniques that are targeted for centralized messaging systems and those for p2p architectures.

Centralized Messaging Systems

In traditional centralized messaging systems, spam usually signifies unsolicited messages sent in bulk or messages with malicious content like malware. Protection mechanisms include

  • authentication through some piece of personally identifiable information e.g., phone number
  • checksum-based filtering to protect against messages sent in bulk
  • challenge-response systems
  • content filtering on the server or via a proxy application

These methods exploit the fact that the messaging system is centralized and a global view of the users' activities is available based on which spamming patterns can be extracted and defeated accordingly. Moreover, users are associated with an identifier e.g., a username which enables the server to profile each user e.g., to detect suspicious behavior like spamming. Such profiling possibility is against the user's anonymity and privacy.

Among the techniques enumerated above, authentication through phone numbers is a some-what economic-incentive measure as providing multiple valid phone numbers will be expensive for the attacker. Notice that while using an expensive authentication method can reduce the number of accounts owned by a single spammer, cannot address the spam issue entirely. This is because the spammer can still send bulk messages through one single account. For this approach to be effective, a centralized mediator is essential. That is why such a solution would not fit the p2p environments where no centralized control exists.

P2P Systems

What about spam prevention in p2p messaging platforms? There are two techniques, namely Proof of Work8 deployed by Whisper9 and Peer scoring6 method (namely reputation-based approach) adopted by LibP2P. However, each of these solutions has its own shortcomings for real-life use-cases as explained below.

Proof of work

The idea behind the Proof Of Work i.e., POW8 is to make messaging a computationally costly operation hence lowering the messaging rate of all the peers including the spammers. In specific, the message publisher has to solve a puzzle and the puzzle is to find a nonce such that the hash of the message concatenated with the nonce has at least z leading zeros. z is known as the difficulty of the puzzle. Since the hash function is one-way, peers have to brute-force to find a nonce. Hashing is a computationally-heavy operation so is the brute-force. While solving the puzzle is computationally expensive, it is comparatively cheap to verify the solution.

POW is also used as the underlying mining algorithm in Ethereum and Bitcoin blockchain. There, the goal is to contain the mining speed and allow the decentralized network to come to a consensus, or agree on things like account balances and the order of transactions.

While the use of POW makes perfect sense in Ethereum / Bitcoin blockchain, it shows practical issues in heterogeneous p2p messaging systems with resource-restricted peers. Some peers won't be able to carry the designated computation and will be effectively excluded. Such exclusion showed to be practically an issue in applications like Status, which used to rely on POW for spam-protection, to the extent that the difficulty level had to be set close to zero.

Peer Scoring

The peer scoring method6 that is utilized by libp2p is to limit the number of messages issued by a peer in connection to another peer. That is each peer monitors all the peers to which it is directly connected and adjusts their messaging quota i.e., to route or not route their messages depending on their past activities. For example, if a peer detects its neighbor is sending more than x messages per month, can drop its quota to z.x where z is less than one. The shortcoming of this solution is that scoring is based on peers' local observations and the concept of the score is defined in relation to one single peer. This leaves room for an attack where a spammer can make connections to k peers in the system and publishes k.(x-1) messages by exploiting all of its k connections. Another attack scenario is through botnets consisting of a large number of e.g., a million bots. The attacker rents a botnet and inserts each of them as a legitimate peer to the network and each can publish x-1 messages per month7.

Economic-Incentive Spam protection

Is this the end of our spam-protection journey? Shall we simply give up and leave spammers be? Certainly not! +Waku RLN-Relay gives us a p2p spam-protection method which:

  • suits p2p systems and does not rely on any central entity.
  • is efficient i.e., with no unreasonable computational, storage, memory, and bandwidth requirement! as such, it fits the network of heterogeneous peers.
  • respects users privacy unlike reputation-based and centralized methods.
  • deploys economic-incentives to contain spammers' activity. Namely, there is a financial sacrifice for those who want to spam the system. How? follow along ...

We devise a general rule to save everyone's life and that is

No one can publish more than M messages per epoch without being financially charged!

We set M to 1 for now, but this can be any arbitrary value. You may be thinking "This is too restrictive! Only one per epoch?". Don't worry, we set the epoch to a reasonable value so that it does not slow down the communication of innocent users but will make the life of spammers harder! Epoch here can be every second, as defined by UTC date-time +-20s.

The remainder of this post is all about the story of how to enforce this limit on each user's messaging rate as well as how to impose the financial cost when the limit gets violated. This brings us to the Rate Limiting Nullifiers and how we integrate this technique into Waku v2 (in specific the Waku Relay protocol) to protect our valuable users against spammers.

Technical Terms

Zero-knowledge proof: Zero-knowledge proof (ZKP)14 allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do the trust-minimized computation that is also privacy-preserving. As a basic example, instead of showing your ID when going to a bar you simply give them proof that you are over 18, without showing the doorman your id. In this write-up, by ZKP we essentially mean zkSNARK15 which is one of the many types of ZKPs.

Threshold Secret Sharing Scheme: (m,n) Threshold secret-sharing is a method by which you can split a secret value s into n pieces in a way that the secret s can be reconstructed by having m pieces (m <= n). The economic-incentive spam protection utilizes a (2,n) secret sharing realized by Shamir Secret Sharing Scheme13.

Overview: Economic-Incentive Spam protection through Rate Limiting Nullifiers

Context: We started the idea of economic-incentive spam protection more than a year ago and conducted a feasibility study to identify blockers and unknowns. The results are published in our prior post. Since then major progress has been made and the prior identified blockers that are listed below are now addressed. Kudos to Barry WhiteHat, Onur Kilic, Koh Wei Jie for all of their hard work, research, and development which made this progress possible.

  • the proof time22 which was initially in the order of minutes ~10 mins and now is almost 0.5 seconds
  • the prover key size21 which was initially ~110MB and now is ~3.9MB
  • the lack of Shamir logic19 which is now implemented and part of the RLN repository4
  • the concern regarding the potential multi-party computation for the trusted setup of zkSNARKs which got resolved20
  • the lack of end-to-end integration that now we made it possible, have it implemented, and are going to present it in this post. New blockers are also sorted out during the e2e integration which we will discuss in the Feasibility and Open Issues section.

Now that you have more context, let's see how the final solution works. The fundamental point is to make it economically costly to send more than your share of messages and to do so in a privacy-preserving and e2e fashion. To do that we have the following components:

  • 1- Group: We manage all the peers inside a large group (later we can split peers into smaller groups, but for now consider only one). The group management is done via a smart contract which is devised for this purpose and is deployed on the Ethereum blockchain.
  • 2- Membership: To be able to send messages and in specific for the published messages to get routed by all the peers, publishing peers have to register to the group. Membership involves setting up public and private key pairs (think of it as the username and password). The private key remains at the user side but the public key becomes a part of the group information on the contract (publicly available) and everyone has access to it. Public keys are not human-generated (like usernames) and instead they are random numbers, as such, they do not reveal any information about the owner (think of public keys as pseudonyms). Registration is mandatory for the users who want to publish a message, however, users who only want to listen to the messages are more than welcome and do not have to register in the group.
  • Membership fee: Membership is not for free! each peer has to lock a certain amount of funds during the registration (this means peers have to have an Ethereum account with sufficient balance for this sake). This fund is safely stored on the contract and remains intact unless the peer attempts to break the rules and publish more than one message per epoch.
  • Zero-knowledge Proof of membership: Do you want your message to get routed to its destination, fine, but you have to prove that you are a member of the group (sorry, no one can escape the registration phase!). Now, you may be thinking that should I attach my public key to my message to prove my membership? Absolutely Not! we said that our solution respects privacy! membership proofs are done in a zero-knowledge manner that is each message will carry cryptographic proof asserting that "the message is generated by one of the current members of the group", so your identity remains private and your anonymity is preserved!
  • Slashing through secret sharing: Till now it does not seem like we can catch spammers, right? yes, you are right! now comes the exciting part, detecting spammers and slashing them. The core idea behind the slashing is that each publishing peer (not routing peers!) has to integrate a secret share of its private key inside the message. The secret share is deterministically computed over the private key and the current epoch. The content of this share is harmless for the peer's privacy (it looks random) unless the peer attempts to publish more than one message in the same epoch hence disclosing more than one secret share of its private key. Indeed two distinct shares of the private key under the same epoch are enough to reconstruct the entire private key. Then what should you do with the recovered private key? hurry up! go to the contract and withdraw the private key and claim its fund and get rich!! Are you thinking what if spammers attach junk values instead of valid secret shares? Of course, that wouldn't be cool! so, there is a zero-knowledge proof for this sake as well where the publishing peer has to prove that the secret shares are generated correctly.

A high-level overview of the economic spam protection is shown in Figure 1.

Flow

In this section, we describe the flow of the economic-incentive spam detection mechanism from the viewpoint of a single peer. An overview of this flow is provided in Figure 3.

Setup and Registration

A peer willing to publish a message is required to register. Registration is moderated through a smart contract deployed on the Ethereum blockchain. The state of the contract contains the list of registered members' public keys. An overview of registration is illustrated in Figure 2.

For the registration, a peer creates a transaction that sends x amount of Ether to the contract. The peer who has the "private key" sk associated with that deposit would be able to withdraw x Ether by providing valid proof. Note that sk is initially only known by the owning peer however it may get exposed to other peers in case the owner attempts spamming the system i.e., sending more than one message per epoch. +The following relation holds between the sk and pk i.e., pk = H(sk) where H denotes a hash function. +

Figure 2: Registration

Maintaining the membership Merkle Tree

The ZKP of membership that we mentioned before relies on the representation of the entire group as a Merkle Tree. The tree construction and maintenance is delegated to the peers (the initial idea was to keep the tree on the chain as part of the contract, however, the cost associated with member deletion and insertion was high and unreasonable, please see Feasibility and Open Issues for more details). As such, each peer needs to build the tree locally and sync itself with the contract updates (peer insertion and deletion) to mirror them on its tree. +Two pieces of information of the tree are important as they enable peers to generate zero-knowledge proofs. One is the root of the tree and the other is the membership proof (or the authentication path). The tree root is public information whereas the membership proof is private data (or more precisely the index of the peer in the tree).

Publishing

In order to publish at a given epoch, each message must carry a proof i.e., a zero-knowledge proof signifying that the publishing peer is a registered member, and has not exceeded the messaging rate at the given epoch.

Recall that the enforcement of the messaging rate was through associating a secret shared version of the peer's sk into the message together with a ZKP that the secret shares are constructed correctly. As for the secret sharing part, the peer generates the following data:

  1. shareX
  2. shareY
  3. nullifier

The pair (shareX, shareY) is the secret shared version of sk that are generated using Shamir secret sharing scheme. Having two such pairs for an identical nullifier results in full disclosure of peer's sk and hence burning the associated deposit. Note that the nullifier is a deterministic value derived from sk and epoch therefore any two messages issued by the same peer (i.e., using the same sk) for the same epoch are guaranteed to have identical nullifiers.

Finally, the peer generates a zero-knowledge proof zkProof asserting the membership of the peer in the group and the correctness of the attached secret share (shareX, shareY) and the nullifier. In order to generate a valid proof, the peer needs to have two private inputs i.e., its sk and its authentication path. Other inputs are the tree root, epoch, and the content of the message.

Privacy Hint: Note that the authentication path of each peer depends on the recent list of members (hence changes when new peers register or leave). As such, it is recommended (and necessary for privacy/anonymity) that the publisher updates her authentication path based on the latest status of the group and attempts the proof using the updated version.

An overview of the publishing procedure is provided in Figure 3.

Routing

Upon the receipt of a message, the routing peer needs to decide whether to route it or not. This decision relies on the following factors:

  1. If the epoch value attached to the message has a non-reasonable gap with the routing peer's current epoch then the message must be dropped (this is to prevent a newly registered peer spamming the system by messaging for all the past epochs).
  2. The message MUST contain valid proof that gets verified by the routing peer. +If the preceding checks are passed successfully, then the message is relayed. In case of an invalid proof, the message is dropped. If spamming is detected, the publishing peer gets slashed (see Spam Detection and Slashing).

An overview of the routing procedure is provided in Figure 3.

Spam Detection and Slashing

In order to enable local spam detection and slashing, routing peers MUST record the nullifier, shareX, and shareY of any incoming message conditioned that it is not spam and has valid proof. To do so, the peer should follow the following steps.

  1. The routing peer first verifies the zkProof and drops the message if not verified.
  2. Otherwise, it checks whether a message with an identical nullifier has already been relayed.
    • a) If such message exists and its shareX and shareY components are different from the incoming message, then slashing takes place (if the shareX and shareY fields of the previously relayed message is identical to the incoming message, then the message is a duplicate and shall be dropped).
    • b) If none found, then the message gets relayed.

An overview of the slashing procedure is provided in Figure 3. +

Figure 3: Publishing, Routing and Slashing workflow.

Feasibility and Open Issues

We've come a long way since a year ago, blockers resolved, now we have implemented it end-to-end. We learned lot and could identify further issues and unknowns some of which are blocking getting to production. The summary of the identified issues are presented below.

Storage overhead per peer

Currently, peers are supposed to maintain the entire tree locally and it imposes storage overhead which is linear in the size of the group (see this issue11 for more details). One way to cope with this is to use the light-node and full-node paradigm in which only a subset of peers who are more resourceful retain the tree whereas the light nodes obtain the necessary information by interacting with the full nodes. Another way to approach this problem is through a more storage efficient method (as described in this research issue12) where peers store a partial view of the tree instead of the entire tree. Keeping the partial view lowers the storage complexity to O(log(N)) where N is the size of the group. There are still unknown unknowns to this solution, as such, it must be studied further to become fully functional.

Cost-effective way of member insertion and deletion

Currently, the cost associated with RLN-Relay membership is around 30 USD10. We aim at finding a more cost-effective approach. Please feel free to share with us your solution ideas in this regard in this issue.

Exceeding the messaging rate via multiple registrations

While the economic-incentive solution has an economic incentive to discourage spamming, we should note that there is still expensive attack(s)23 that a spammer can launch to break the messaging rate limit. That is, the attacker can pay for multiple legit registrations e.g., k, hence being able to publish k messages per epoch. We believe that the higher the membership fee is, the less probable would be such an attack, hence a stronger level of spam-protection can be achieved. Following this argument, the high fee associated with the membership (which we listed above as an open problem) can indeed be contributing to a better protection level.

Conclusion and Future Steps

As discussed in this post, Waku RLN Relay can achieve a privacy-preserving economic spam protection through rate-limiting nullifiers. The idea is to financially discourage peers from publishing more than one message per epoch. In specific, exceeding the messaging rate results in a financial charge. Those who violate this rule are called spammers and their messages are spam. The identification of spammers does not rely on any central entity. Also, the financial punishment of spammers is cryptographically guaranteed. +In this solution, privacy is guaranteed since: 1) Peers do not have to disclose any piece of personally identifiable information in any phase i.e., neither in the registration nor in the messaging phase 2) Peers can prove that they have not exceeded the messaging rate in a zero-knowledge manner and without leaving any trace to their membership accounts. +Furthermore, all the computations are light hence this solution fits the heterogenous p2p messaging system. Note that the zero-knowledge proof parts are handled through zkSNARKs and the benchmarking result can be found in the RLN benchmark report5.

Future steps:

We are still at the PoC level, and the development is in progress. As our future steps,

  • we would like to evaluate the running time associated with the Merkle tree operations. Indeed, the need to locally store Merkle tree on each peer was one of the unknowns discovered during this PoC and yet the concrete benchmarking result in this regard is not available.
  • We would also like to pursue our storage-efficient Merkle Tree maintenance solution in order to lower the storage overhead of peers.
  • In line with the storage optimization, the full-node light-node structure is another path to follow.
  • Another possible improvement is to replace the membership contract with a distributed group management scheme e.g., through distributed hash tables. This is to address possible performance issues that the interaction with the Ethereum blockchain may cause. For example, the registration transactions are subject to delay as they have to be mined before being visible in the state of the membership contract. This means peers have to wait for some time before being able to publish any message.

Acknowledgement

Thanks to Onur Kılıç for his explanation and pointers and for assisting with development and runtime issues. Also thanks to Barry Whitehat for his time and insightful comments. Special thanks to Oskar Thoren for his constructive comments and his guides during the development of this PoC and the writeup of this post.

References


  1. RLN-Relay specification: https://rfc.vac.dev/spec/17/
  2. RLN documentation: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?both
  3. RLN repositories: https://github.com/kilic/RLN and https://github.com/kilic/rlnapp
  4. Waku v2: https://rfc.vac.dev/spec/10/
  5. GossipSub: https://docs.libp2p.io/concepts/publish-subscribe/
  6. Waku Relay: https://rfc.vac.dev/spec/11/
  7. Proof of work: http://www.infosecon.net/workshop/downloads/2004/pdf/clayton.pdf and https://link.springer.com/content/pdf/10.1007/3-540-48071-4_10.pdf
  8. EIP-627 Whisper: https://eips.ethereum.org/EIPS/eip-627
  9. Peer Scoring: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring
  10. Peer scoring security issues: https://github.com/vacp2p/research/issues/44
  11. Zero Knowledge Proof: https://dl.acm.org/doi/abs/10.1145/3335741.3335750 and https://en.wikipedia.org/wiki/Zero-knowledge_proof
  12. zkSNARKs: https://link.springer.com/chapter/10.1007/978-3-662-49896-5_11 and https://coinpare.io/whitepaper/zcash.pdf
  13. Shamir Secret Sharing Scheme: https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing
  14. zkSNARKs proof time: https://github.com/vacp2p/research/issues/7
  15. Prover key size: https://github.com/vacp2p/research/issues/8
  16. The lack of Shamir secret sharing in zkSNARKs: https://github.com/vacp2p/research/issues/10
  17. The MPC required for zkSNARKs trusted setup: https://github.com/vacp2p/research/issues/9
  18. Storage overhead per peer: https://github.com/vacp2p/research/issues/57
  19. Storage-efficient Merkle Tree maintenance: https://github.com/vacp2p/research/pull/54
  20. Cost-effective way of member insertion and deletion: https://github.com/vacp2p/research/issues/56
  21. Attack on the messaging rate: https://github.com/vacp2p/specs/issues/251
  22. RLN Benchmark: https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Benchmarks
]]>
+
+ + <![CDATA[[Talk] Vac, Waku v2 and Ethereum Messaging]]> + https://vac.dev/rlog/waku-v2-ethereum-messaging + https://vac.dev/rlog/waku-v2-ethereum-messaging + Tue, 10 Nov 2020 12:00:00 GMT + + Talk from Taipei Ethereum Meetup. Read on to find out about our journey from Whisper to Waku v2, as well as how Waku v2 can be useful for Etherum Messaging.

The following post is a transcript of the talk given at the Taipei Ethereum meetup, November 5. There is also a video recording.


0. Introduction

Hi! My name is Oskar and I'm the protocol research lead at Vac. This talk will be divided into two parts. First I'll talk about the journey from Whisper, to Waku v1 and now to Waku v2. Then I'll talk about messaging in Ethereum. After this talk, you should have an idea of what Waku v2 is, the problems it is trying to solve, as well as where it can be useful for messaging in Ethereum.

PART 1 - VAC AND THE JOURNEY FROM WHISPER TO WAKU V1 TO WAKU V2

1. Vac intro

First, what is Vac? Vac grew out of our efforts Status to create a window on to Ethereum and secure messenger. Vac is modular protocol stack for p2p secure messaging, paying special attention to resource restricted devices, privacy and censorship resistance.

Today we are going to talk mainly about Waku v2, which is the transport privacy / routing aspect of the Vac protocol stack. It sits "above" the p2p overlay, such as libp2p dealing with transports etc, and below a conversational security layer dealing with messaging encryption, such as using Double Ratchet etc.

2. Whisper to Waku v1

In the beginning, there was Whisper. Whisper was part of the holy trinity of Ethereum. You had Ethereum for consensus/computation, Whisper for messaging, and Swarm for storage.

However, for various reasons, Whisper didn't get the attention it deserved. Development dwindled, it promised too much and it suffered from many issues, such as being extremely inefficient and not being suitable for running on e.g. mobile phone. Despite this, Status used it in its app from around 2017 to 2019. As far as I know, it was one of very few, if not the only, production uses of Whisper.

In an effort to solve some of its immediate problems, we forked Whisper into Waku and formalized it with a proper specification. This solved immediate bandwidth issues for light nodes, introduced rate limiting for better spam protection, improved historical message support, etc.

If you are interested in this journey, checkout the EthCC talk Dean and I gave in Paris earlier this year.

Status upgraded to Waku v1 early 2020. What next?

3. Waku v1 to v2

We were far from done. The changes we had made were quite incremental and done in order to get tangible improvements as quickly as possible. This meant we couldn't address more fundamental issues related to full node routing scalability, running with libp2p for more transports, better security, better spam protection and incentivization.

This kickstarted Waku v2 efforts, which is what we've been working on since July. This work was and is initally centered around a few pieces:

(a) Moving to libp2p

(b) Better routing

(c) Accounting and user-run nodes

The general theme was: making the Waku network more scalable and robust.

We also did a scalability study to show at what point the network would run into issues, due to the inherent lack of routing that Whisper and Waku v1 provided.

You can read more about this here.

3.5 Waku v2 - Design goals

Taking a step back, what problem does Waku v2 attempt to solve compared to all the other solutions that exists out there? What type of applications should use it and why? We have the following design goals:

  1. Generalized messaging. Many applications requires some form of messaging protocol to communicate between different subsystems or different nodes. This messaging can be human-to-human or machine-to-machine or a mix.

  2. Peer-to-peer. These applications sometimes have requirements that make them suitable for peer-to-peer solutions.

  3. Resource restricted. These applications often run in constrained environments, where resources or the environment is restricted in some fashion. E.g.:

    • limited bandwidth, CPU, memory, disk, battery, etc
    • not being publicly connectable
    • only being intermittently connected; mostly-offline
  4. Privacy. These applications have a desire for some privacy guarantees, such as pseudonymity, metadata protection in transit, etc.

As well as to do so in a modular fashion. Meaning you can find a reasonable trade-off depending on your exact requirements. For example, you usually have to trade off some bandwidth to get metadata protection, and vice versa.

The concept of designing for resource restricted devices also leads to the concept of adaptive nodes, where you have more of a continuum between full nodes and light nodes. For example, if you switch your phone from mobile data to WiFi you might be able to handle more bandwidth, and so on.

4. Waku v2 - Breakdown

Where is Waku v2 at now, and how is it structured?

It is running over libp2p and we had our second internal testnet last week or so. As a side note, we name our testnets after subway stations in Taipei, the first one being Nangang, and the most recent one being Dingpu.

The main implementation is written in Nim using nim-libp2p, which is also powering Nimbus, an Ethereum 2 client. There is also a PoC for running Waku v2 in the browser. On a spec level, we have the following specifications that corresponds to the components that make up Waku v2:

  • Waku v2 - this is the main spec that explains the goals of providing generalized messaging, in a p2p context, with a focus on privacy and running on resources restricted devices.
  • Relay - this is the main PubSub spec that provides better routing. It builds on top of GossipSub, which is what Eth2 heavily relies on as well.
  • Store - this is a 1-1 protocol for light nodes to get historical messages, if they are mostly-offline.
  • Filter - this is a 1-1 protocol for light nodes that are bandwidth restricted to only (or mostly) get messages they care about.
  • Message - this explains the payload, to get some basic encryption and content topics. It corresponds roughly to envelopes in Whisper/Waku v1.
  • Bridge - this explains how to do bridging between Waku v1 and Waku v2 for compatibility.

Right now, all protocols, with the exception of bridge, are in draft mode, meaning they have been implemented but are not yet being relied upon in production.

You can read more about the breakdown in this update though some progress has been made since then, as well was in the main Waku v2 spec.

5. Waku v2 - Upcoming

What's coming up next? There are a few things.

For Status to use it in production, it needs to be integrated into the main app using the Nim Node API. The bridge also needs to be implemented and tested.

For other users, we are currently overhauling the API to allow usage from a browser, e.g. To make this experience great, there are also a few underlying infrastructure things that we need in nim-libp2p, such as a more secure HTTP server in Nim, Websockets and WebRTC support.

There are also some changes we made to at what level content encryption happens, and this needs to be made easier to use in the API. This means you can use a node without giving your keys to it, which is useful in some environments.

More generally, beyond getting to production-ready use, there are a few bigger pieces that we are working on or will work on soon. These are things like:

  • Better scaling, by using topic sharding.
  • Accounting and user-run nodes, to account for and incentives full nodes.
  • Stronger and more rigorous privacy guarantees, e.g. through study of GossipSub, unlinkable packet formats, etc.
  • Rate Limit Nullifier for privacy preserving spam protection, a la what Barry Whitehat has presented before.

As well as better support for Ethereum M2M Messaging. Which is what I'll talk about next.

PART 2 - ETHEREUM MESSAGING

A lot of what follows is inspired by exploratory work that John Lea has done at Status, previously Head of UX Architecture at Ubuntu.

6. Ethereum Messaging - Why?

It is easy to think that Waku v2 is only for human to human messaging, since that's how Waku is currently primarily used in the Status app. However, the goal is to be useful for generalized messaging, which includes other type of information as well as machine to machine messaging.

What is Ethereum M2M messaging? Going back to the Holy Trinity of Ethereum/Whisper/Swarm, the messaging component was seen as something that could facilitate messages between dapps and acts as a building block. This can help with things such as:

  • Reducing on-chain transactions
  • Reduce latency for operations
  • Decentralize centrally coordinated services (like WalletConnect)
  • Improve UX of dapps
  • Broadcast live information
  • A message transport layer for state channels

And so on.

7. Ethereum Messaging - Why? (Cont)

What are some examples of practical things Waku as used for Ethereum Messaging could solve?

  • Multisig transfers only needing one on chain transaction
  • DAO votes only needing one one chain transaction
  • Giving dapps ability to direct push notifications to users
  • Giving users ability to directly respond to requests from daps
  • Decentralized Wallet Connect

Etc.

8. What's needed to deliver this?

We can break it down into our actors:

  • Decentralized M2M messaging system (Waku)
  • Native wallets (Argent, Metamask, Status, etc)
  • Dapps that benefit from M2M messaging
  • Users whose problems are being solved

Each of these has a bunch of requirements in turn. The messaging system needs to be decentralized, scalable, robust, etc. Wallets need support for messaging layer, dapps need to integrate this, etc.

This is a lot! Growing adoption is a challenge. There is a catch 22 in terms of justifying development efforts for wallets, when no dapps need it, and likewise for dapps when no wallets support Waku. In addition to this, there must be proven usage of Waku before it can be relied on, etc. How can we break this up into smaller pieces of work?

9. Breaking up the problem and a high level roadmap

We can start small. It doesn't and need to be used for critical features first. A more hybrid approach can be taken where it acts more as nice-to-haves.

  1. Forking Whisper and solving scalablity, spam etc issues with it. +This is a work in progress. What we talked about in part 1.
  2. Expose messaging API for Dapp developers.
  3. Implement decentralized version of WalletConnect. +Currently wallets connect ot dapps with centralized service. Great UX.
  4. Solve DAO/Multi-Sig coordination problem. +E.g. send message to wallet-derived key when it is time to sign a transaction.
  5. Extend dapp-to-user and user-to-dapp communication to more dapps. +Use lessons learned and examples to drive adoptation for wallets/dapps.

And then build up from there.

10. We are hiring!

A lot of this will happen in Javascript and browsers, since that's the primarily environment for a lot of wallets and dapps. We are currently hiring for a Waku JS Wallet integration lead to help push this effort further.

Come talk to me after or apply here.

That's it! You can find us on Status, Telegram, vac.dev. I'm on twitter here.

Questions?


]]>
+
+ + <![CDATA[Waku v2 Update]]> + https://vac.dev/rlog/waku-v2-update + https://vac.dev/rlog/waku-v2-update + Mon, 28 Sep 2020 12:00:00 GMT + + A research log. Read on to find out what is going on with Waku v2, a messaging protocol. What has been happening? What is coming up next?

It has been a while since the last post. It is time for an update on Waku v2. Aside from getting more familiar with libp2p (specifically nim-libp2p) and some vacation, what have we been up to? In this post we'll talk about what we've gotten done since last time, and briefly talk about immediate next steps and future. But first, a recap.

Recap

In the last post (Waku v2 plan) we explained the rationale of Waku v2 - the current Waku network is fragile and doesn't scale. To solve this, Waku v2 aims to reduce amplification factors and get more user run nodes. We broke the work down into three separate tracks.

  1. Track 1 - Move to libp2p
  2. Track 2 - Better routing
  3. Track 3 - Accounting and user-run nodes

As well as various rough components for each track. The primary initial focus is track 1. This means things like: moving to FloodSub, simplify the protocol, core integration, topic interest behavior, historical message caching, and Waku v1<>v2 bridge.

Current state

Let's talk about the state of specs and our main implementation nim-waku. Then we'll go over our recent testnet, Nangang, and finish off with a Web PoC.

Specs

After some back and forth on how to best structure things, we ended up breaking down the specs into a few pieces. While Waku v2 is best thought of as a cohesive whole in terms of its capabilities, it is made up of several protocols. Here's a list of the current specs and their status:

Raw means there is not yet an implementation that corresponds fully to the spec, and draft means there is an implementation that corresponds to the spec. In the interest of space, we won't go into too much detail on the specs here except to note a few things:

  • The relay spec is essentially a thin wrapper on top of PubSub/FloodSub/GossipSub
  • The filter protocol corresponds to previous light client mode in Waku v1
  • The store protocol corresponds to the previous mailserver construct in Waku v1

The filter and store protocol allow for adaptive nodes, i.e. nodes that have various capabilities. For example, a node being mostly offline, or having limited bandwidth capacity. The bridge spec outlines how to bridge the Waku v1 and v2 networks.

Implementation

The main implementation we are working on is nim-waku. This builds on top of libraries such as nim-libp2p and others that the Nimbus team have been working on as part of their Ethereum 2.0 client.

Currently nim-waku implements the relay protocol, and is close to implementing filter and store protocol. It also exposes a Nim Node API that allows libraries such as nim-status to use it. Additionally, there is also a rudimentary JSON RPC API for command line scripting.

Nangang testnet

Last week we launched a very rudimentary internal testnet called Nangang. The goal was to test basic connectivity and make sure things work end to end. It didn't have things like: client integration, encryption, bridging, multiple clients, store/filter protocol, or even a real interface. What it did do is allow Waku developers to "chat" via RPC calls and looking in the log output. Doing this meant we exposed and fixed a few blockers, such as connection issues, deployment, topic subscription management, protocol and node integration, and basic scripting/API usage. After this, we felt confident enough to upgrade the main and relay spec to "draft" status.

Waku Web PoC

As a bonus, we wanted to see what it'd take to get Waku running in a browser. This is a very powerful capability that enables a lot of use cases, and something that libp2p enables with its multiple transport support.

Using the current stack, with nim-waku, would require quite a lot of ground work with WASM, WebRTC, Websockets support etc. Instead, we decided to take a shortcut and hack together a JS implementation called Waku Web Chat. This quick hack wouldn't be possible without the people behind js-libp2p-examples and js-libp2p and all its libraries. These are people like Jacob Heun, Vasco Santos, and Cayman Nava. Thanks!

It consists of a brower implementation, a NodeJS implementation and a bootstrap server that acts as a signaling server for WebRTC. It is largely a bastardized version of GossipSub, and while it isn't completely to spec, it does allow messages originating from a browser to eventually end up at a nim-waku node, and vice versa. Which is pretty cool.

Coming up

Now that we know what the current state is, what is still missing? what are the next steps?

Things that are missing

While we are getting closer to closing out work for track 1, there are still a few things missing from the initial scope:

  1. Store and filter protocols need to be finished. This means basic spec, implementation, API integration and proven to work in a testnet. All of these are work in progress and expected to be done very soon. Once the store protocol is done in a basic form, it needs further improvements to make it production ready, at least on a spec/basic implementation level.

  2. Core integration was mentioned in scope for track 1 initially. This work has stalled a bit, largely due to organizational bandwidth and priorities. While there is a Nim Node API that in theory is ready to be used, having it be used in e.g. Status desktop or mobile app is a different matter. The team responsible for this at Status (status-nim has been making progress on getting nim-waku v1 integrated, and is expected to look into nim-waku v2 integration soon. One thing that makes this a especially tricky is the difference in interface between Waku v1 and v2, which brings +us too...

  3. Companion spec for encryption. As part of simplifying the protocol, the routing is decoupled from the encryption in v2 (1, 2). There are multiple layers of encryption at play here, and we need to figure out a design that makes sense for various use cases (dapps using Waku on their own, Status app, etc).

  4. Bridge implementation. The spec is done and we know how it should work, but it needs to be implemented.

  5. General tightening up of specs and implementation.

While this might seem like a lot, a lot has been done already, and the majority of the remaining tasks are more amendable to be pursued in parallel with other efforts. It is also worth mentioning that part of track 2 and 3 have been started, in the form of moving to GossipSub (amplification factors) and basics of adaptive nodes (multiple protocols). This is in addition to things like Waku Web which were not part of the initial scope.

Upcoming

Aside from the things mentioned above, what is coming up next? There are a few areas of interest, mentioned in no particular order. For track 2 and 3, see previous post for more details.

  1. Better routing (track 2). While we are already building on top of GossipSub, we still need to explore things like topic sharding in more detail to further reduce amplification factors.

  2. Accounting and user-run nodes (track 3). With store and filter protocol getting ready, we can start to implement accounting and light connection game for incentivization in a bottom up and iterative manner.

  3. Privacy research. Study better and more rigorous privacy guarantees. E.g. how FloodSub/GossipSub behaves for common threat models, and how custom packet +format can improve things like unlinkability.

  4. zkSnarks RLN for spam protection and incentivization. We studied this last year and recent developments have made this relevant to study again. Create an experimental spec/PoC as an extension to the relay protocol. Kudos to Barry Whitehat and others like Kobi Gurkan and Koh Wei Jie for pushing this!

  5. Ethereum M2M messaging. Being able to run in the browser opens up a lot of doors, and there is an opportunity here to enable things like a decentralized WalletConnect, multi-sig transactions, voting and similar use cases. This was the original goal of Whisper, and we'd like to deliver on that.

As you can tell, quite a lot of thing! Luckily, we have two people joining as protocol engineers soon, which will bring much needed support for the current team of ~2-2.5 people. More details to come in further updates.


If you are feeling adventurous and want to use early stage alpha software, check out the docs. If you want to read the specs, head over to Waku spec. If you want to talk with us, join us on Status or on Telegram (they are bridged).

]]>
+
+ + <![CDATA[What's the Plan for Waku v2?]]> + https://vac.dev/rlog/waku-v2-plan + https://vac.dev/rlog/waku-v2-plan + Wed, 01 Jul 2020 12:00:00 GMT + + Read about our plans for Waku v2, moving to libp2p, better routing, adaptive nodes and accounting!

tldr: The Waku network is fragile and doesn't scale. Here's how to solve it.

NOTE: This post was originally written with Status as a primary use case in mind, which reflects how we talk about some problems here. However, Waku v2 is a general-purpose private p2p messaging protocol, especially for people running in resource restricted environments.

Problem

The Waku network is fragile and doesn't scale.

As Status is moving into a user-acquisition phase and is improving retention rates for users they need the infrastructure to keep up, specifically when it comes to messaging.

Based on user acquisition models, the initial goal is to support 100k DAU in September, with demand growing from there.

With the Status Scaling Model we have studied the current bottlenecks as a function of concurrent users (CCU) and daily active users (DAU). Here are the conclusions.

**1. Connection limits**. With 100 full nodes we reach ~10k CCU based on connection limits. This can primarily be addressed by increasing the number of nodes (cluster or user operated). This assumes node discovery works. It is also worth investigating the limitations of max number of connections, though this is likely to be less relevant for user-operated nodes. For a user-operated network, this means 1% of users have to run a full node. See Fig 1-2.

**2. Bandwidth as a bottleneck**. We notice that memory usage appears to not be +the primary bottleneck for full nodes, and the bottleneck is still bandwidth. To support 10k DAU, and full nodes with an amplification factor of 25 the required Internet speed is ~50 Mbps, which is a fast home Internet connection. For ~100k DAU only cloud-operated nodes can keep up (500 Mbps). See Fig 3-5.

**3. Amplification factors**. Reducing amplification factors with better routing, would have a high impact, but it is likely we'd need additional measures as well, such as topic sharding or similar. See Fig 8-13.

Figure 1-5:

+
+
+
+

See https://colab.research.google.com/drive/1Fz-oxRxxAFPpM1Cowpnb0nT52V1-yeRu#scrollTo=Yc3417FUJJ_0 for the full report.

What we need to do is:

  1. Reduce amplification factors
  2. Get more user-run full nodes

Doing this means the Waku network will be able to scale, and doing so in the right way, in a robust fashion. What would a fragile way of scaling be? Increasing our reliance on a Status Pte Ltd operated cluster which would paint us in a corner where we:

  • keep increasing requirements for Internet speed for full nodes
  • are vulnerable to censorship and attacks
  • have to control the topology in an artifical manner to keep up with load
  • basically re-invent a traditional centralized client-server app with extra steps
  • deliberately ignore most of our principles
  • risk the network being shut down when we run out of cash

Appetite

Our initial risk appetite for this is 6 weeks for a small team.

The idea is that we want to make tangible progress towards the goal in a limited period of time, as opposed to getting bogged down in trying to find a theoretically perfect generalized solution. Fixed time, variable scope.

It is likely some elements of a complete solution will be done separately. See later sections for that.

Solution

There are two main parts of the solution. One is to reduce amplification factors, and the other is incentivization to get more user run full nodes with desktop, etc.

What does a full node provide? It provides connectivity to the network, can act as a bandwidth "barrier" and be high or reasonably high availability. What this means right now is essentially topic interest and storing historical messages.

The goal is here to improve the status quo, not get a perfect solution from the get go. All of this can be iterated on further, for stronger guarantees, as well as replaced by other new modules.

Let's first look at the baseline, and then go into some of the tracks and their phases. Track 1 is best done first, after which track 2 and 3 can be executed in parallel. Track 1 gives us more options for track 2 and 3. The work in track 1 is currently more well-defined, so it is likely the specifics of track 2 and 3 will get refined at a later stage.

Baseline

Here's where we are at now. In reality, the amplification factor are likely even worse than this (15 in the graph below), up to 20-30. Especially with an open network, where we can't easily control connectivity and availability of nodes. Left unchecked, with a full mesh, it could even go as high x100, though this is likely excessive and can be dialed down. See scaling model for more details.

Track 1 - Move to libp2p

Moving to PubSub over libp2p wouldn't improve amplification per se, but it would be stepping stone. Why? It paves the way for GossipSub, and would be a checkpoint on this journey. Additionally, FloodSub and GossipSub are compatible, and very likely other future forms of PubSub such as GossipSub 1.1 (hardened/more secure), EpiSub, forwarding Kademlia / PubSub over Kademlia, etc. Not to mention security This would also give us access to the larger libp2p ecosystem (multiple protocols, better encryption, quic, running in the browser, security audits, etc, etc), as well as be a joint piece of infrastructured used for Eth2 in Nimbus. More wood behind fewer arrows.

See more on libp2p PubSub here: https://docs.libp2p.io/concepts/publish-subscribe/

As part of this move, there are a few individual pieces that are needed.

1. FloodSub

This is essentially what Waku over libp2p would look like in its most basic form.

One difference that is worth noting is that the app topics would not be the same as Waku topics. Why? In Waku we currently don't use topics for routing between full nodes, but only for edge/light nodes in the form of topic interest. In FloodSub, these topics are used for routing.

Why can't we use Waku topics for routing directly? PubSub over libp2p isn't built for rare and ephemeral topics, and nodes have to explicitly subscribe to a topic. See topic sharding section for more on this.

Moving to FloodSub over libp2p would also be an opportunity to clean up and simplify some components that are no longer needed in the Waku v1 protocol, see point below.

Very experimental and incomplete libp2p support can be found in the nim-waku repo under v2: https://github.com/status-im/nim-waku

2. Simplify the protocol

Due to Waku's origins in Whisper, devp2p and as a standalone protocol, there are a lot of stuff that has accumulated (https://rfc.vac.dev/spec/6/). Not all of it serves it purpose anymore. For example, do we still need RLP here when we have Protobuf messages? What about extremely low PoW when we have peer scoring? What about key management / encryption when have encryption at libp2p and Status protocol level?

Not everything has to be done in one go, but being minimalist at this stage will the protocol lean and make us more adaptable.

The essential characteristic that has to be maintained is that we don't need to change the upper layers, i.e. we still deal with (Waku) topics and some envelope like data unit.

3. Core integration

As early as possible we want to integrate with Core via Stimbus in order to mitigate risk and catch integration issues early in the process. What this looks like in practice is some set of APIs, similar to how Whisper and Waku were working in parallel, and experimental feature behind a toggle in core/desktop.

4. Topic interest behavior

While we target full node traffic here, we want to make sure we maintain the existing bandwidth requirements for light nodes that Waku v1 addressed (https://vac.dev/fixing-whisper-with-waku). This means implementing topic-interest in the form of Waku topics. Note that this would be separate from app topics notes above.

5. Historical message caching

Basically what mailservers are currently doing. This likely looks slightly different in a libp2p world. This is another opportunity to simplify things with a basic REQ-RESP architecture, as opposed to the roundabout way things are now. Again, not everything has to be done in one go but there's no reason to reimplement a poor API if we don't have to.

Also see section below on adaptive nodes and capabilities.

6. Waku v1 <> Libp2p bridge

To make the transition complete, there has to a be bridge mode between current Waku and libp2p. This is similar to what was done for Whisper and Waku, and allows any nodes in the network to upgrade to Waku v2 at their leisure. For example, this would likely look different for Core, Desktop, Research and developers.

Track 2 - Better routing

This is where we improve the amplification factors.

1. GossipSub

This is a subprotocol of FloodSub in the libp2p world. Moving to GossipSub would allow traffic between full nodes to go from an amplification factor of ~25 to ~6. This basically creates a mesh of stable bidirectional connections, together with some gossiping capabilities outside of this view.

Explaining how GossipSub works is out of scope of this document. It is implemented in nim-libp2p and used by Nimbus as part of Eth2. You can read the specs here in more detail if you are interested: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md and https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md

+
+
+

While we technically could implement this over existing Waku, we'd have to re-implement it, and we'd lose out on all the other benefits libp2p would provide, as well as the ecosystem of people and projects working on improving the scalability and security of these protocols.

2. Topic sharding

This one is slightly more speculative in terms of its ultimate impact. The basic idea is to split the application topic into N shards, say 10, and then each full node can choose which shards to listen to. This can reduce amplification factors by another factor of 10.

+

Note that this means a light node that listens to several topics would have to be connected to more full nodes to get connectivity. For a more exotic version of this, see https://forum.vac.dev/t/rfc-topic-propagation-extension-to-libp2p-pubsub/47

This is orthogonal from the choice of FloodSub or GossipSub, but due to GossipSub's more dynamic nature it is likely best combined with it.

3. Other factors

Not a primary focus, but worth a look. Looking at the scaling model, there might be other easy wins to improve overall bandwidth consumption between full nodes. For example, can we reduce envelope size by a significant factor?

Track 3 - Accounting and user-run nodes

This is where we make sure the network isn't fragile, become a true p2p app, get our users excited and engaged, and allow us to scale the network without creating an even bigger cluster.

To work in practice, this has a soft dependency on node discovery such as DNS based discovery (https://eips.ethereum.org/EIPS/eip-1459) or Discovery v5 (https://vac.dev/feasibility-discv5).

1. Adaptive nodes and capabilities

We want to make the gradation between light nodes, full nodes, storing (partial set of) historical messages, only acting for a specific shard, etc more flexible and explicit. This is required to identify and discover the nodes you want. See https://github.com/vacp2p/specs/issues/87

Depending on how the other tracks come together, this design should allow for a desktop node to identify as a full relaying node for some some app topic shard, but also express waku topic interest and retrieve historical messages itself.

E.g. Disc v5 can be used to supply node properties through ENR.

2. Accounting

This is based on a few principles:

  1. Some nodes contribute a lot more than other nodes in the network
  2. We can account for the difference in contribution in some fashion
  3. We want to incentivize nodes to tell the true, and be incentivized not to lie

Accounting here is a stepping stone, where accounting is the raw data upon which some settlement later occurs. It can have various forms of granularity. See https://forum.vac.dev/t/accounting-for-resources-in-waku-and-beyond/31 for discussion.

We also note that in GossipSub, the mesh is bidrectional. Additionally, it doesn't appears to be a high priority issue in terms of nodes misreporting. What is an issue is having people run full nodes in the first place. There are a few points to that. It has to be possible in the end-user UX, nodes have to be discovered, and it has to be profitable/visible that you are contributing. UX and discovery are out of scope for this work, whereas visibility/accounting is part of this scope. Settlement is a stretch goal here.

The general shape of the solution is inspired by the Swarm model, where we do accounting separate from settlement. It doesn't require any specific proofs, but nodes are incentivized to tell the truth in the following way:

  1. Both full node and light node do accounting in a pairwise, local fashion
  2. If a light node doesn't ultimately pay or lie about reporting, they get disconnected (e.g.)
  3. If a full node doesn't provide its service the light node may pick another full node (e.g.)

While accounting for individual resource usage is useful, for the ultimate end user experience we can ideally account for other things such as:

  • end to end delivery
  • online time
  • completeness of storage

This can be gradually enhanced and strengthened, for example with proofs, consistency checks, Quality of Service, reputation systems. See https://discuss.status.im/t/network-incentivisation-first-draft/1037 for one attempt to provide stronger guarantees with periodic consistency checks and a shared fund mechanism. And https://forum.vac.dev/t/incentivized-messaging-using-validity-proofs/51 for using validity proofs and removing liveness requirement for settlement.

All of this is optional at this stage, because our goal here is to improve the status quo for user run nodes. Accounting at this stage should be visible and correspond to the net benefit a node provides to another.

As a concrete example: a light node has some topic interest and cares about historical messages on some topic. A full node communicates envelopes as they come in, communicates their high availability (online time) and stores/forward stored messages. Both nodes have this information, and if they agree settlement (initially just a mock message) can be sending a payment to an address at some time interval / over some defined volume. See future sections for how this can be improved upon.

Also see below in section 4, using constructs such as eigentrust as a local reputation mechanism.

3. Relax high availability requirement

If we want desktop nodes to participate in the storing of historical messages, high availability is a problem. It is a problem for any node, especially if they lie about it, but assuming they are honest it is still an issue.

By being connected to multiple nodes, we can get an overlapping online window. Then these can be combined together to get consistency. This is obviously experimental and would need to be tested before being deployed, but if it works it'd be very useful.

Additionally or alternatively, instead of putting a high requirement on message availability, focus on detection of missing information. This likely requires re-thinking how we do data sync / replication.

4. Incentivize light and full nodes to tell the truth (policy, etc)

In accounting phase it is largely assumed nodes are honest. What happens when they lie, and how do we incentivize them to be honest? In the case of Bittorrent this is done with tit-for-tat, however this is a different kind of relationship. What follows are some examples of how this can be done.

For light nodes:

  • if they don't, they get disconnected
  • prepayment (especially to "high value" nodes)

For full nodes:

  • multiple nodes reporting to agree, where truth becomes a shelling point
  • use eigentrust
  • staking for discovery visibility with slashing

5. Settlement PoC

Can be done after phase 2 if so desired. Basically integrate payments based on accounting and policy.

Out of scope

  1. We assume the Status Base model requirements are accurate.
  2. We assume Core will improve retention rates.
  3. We assume the Stimbus production team will enable integration of nim-waku.
  4. We assume Discovery mechanisms such as DNS and Discovery v5 will be worked on separately.
  5. We assume Core will, at some point, provide an UX for integrating payment of services.
  6. We assume the desktop client is sufficiently usable.
  7. We assume Core and Infra will investigate ways of improving MaxPeers.
]]>
+
+ + <![CDATA[Feasibility Study: Discv5]]> + https://vac.dev/rlog/feasibility-discv5 + https://vac.dev/rlog/feasibility-discv5 + Mon, 27 Apr 2020 12:00:00 GMT + + Looking at discv5 and the theoretical numbers behind finding peers.

Disclaimer: some of the numbers found in this write-up could be inaccurate. They are based on the current understanding of theoretical parts of the protocol itself by the author and are meant to provide a rough overview rather than bindable numbers.

This post serves as a more authoritative overview of the discv5 study, for a discussionary post providing more context make sure to check out the corresponding discuss post. Additionally, if you are unfamiliar with discv5, check out my previous write-up: "From Kademlia to Discv5".

Motivating Problem

The discovery method currently used by Status, is made up of various components and grew over time to solve a mix of problems. We want to simplify this while maintaining some of the properties we currently have.

Namely, we want to ensure censorship resistance to state-level adversaries. One of the issues Status had which caused us them add to their discovery method was the fact that addresses from providers like AWS and GCP were blocked both in Russia and China. Additionally, one of the main factors required is the ability to function on resource restricted devices.

Considering we are talking about resource restricted devices, let's look at the implications and what we need to consider:

  • Battery consumption - constant connections like websockets consume a lot of battery life.
  • CPU usage - certain discovery methods may be CPU incentive, slowing an app down and making it unusable.
  • Bandwidth consumption - a lot of users will be using data plans, the discovery method needs to be efficient in order to accommodate those users without using up significant portions of their data plans.
  • Short connection windows - the discovery algorithm needs to be low latency, that means it needs to return results fast. This is because many users will only have the app open for a short amount of time.
  • Not publicly connectable - There is a good chance that most resource restricted devices are not publicly connectable.

For a node to be able to participate as both a provider, and a consumer in the discovery method. Meaning a node both reads from other nodes' stored DHTs and hosts the DHT for other nodes to read from, it needs to be publically connectable. This means another node must be able to connect to some public IP of the given node.

With devices that are behind a NAT, this is easier said than done. Especially mobile devices, that when connected to 4G LTE networks are often stuck behind a symmetric NAT, drastically reducing the the succeess rate of NAT traversal. Keeping this in mind, it becomes obvious that most resource restricted devices will be consumers rather than providers due to this technical limitation.

In order to answer our questions, we formulated the problem with a simple method for testing. The "needle in a haystack" problem was formulated to figure out how easily a specific node can be found within a given network. This issue was fully formulated in vacp2p/research#15.

Overview

The main things we wanted to investigate was the overhead on finding a peer. This means we wanted to look at both the bandwidth, latency and effectiveness of this. There are 2 methods which we can use to find a peer:

  • We can find a peer with a specific ID, using normal lookup methods as documented by Kademlia.
  • We can find a peer that advertises a capability, this is possible using either capabilities advertised in the ENR or through topic tables.

Feasbility

To be able to investigate the feasibility of discv5, we used various methods including rough calculations which can be found in the notebook, and a simulation isolated in vacp2p/research#19.

CPU & Memory Usage

The experimental discv5 has already been used within Status, however what was noticed was that the CPU and memory usage was rather high. It therefore should be investiaged if this is still the case, and if it is, it should be isolated where this stems from. Additionally it is worth looking at whether or not this is the case with both the go and nim implementation.

See details: vacp2p/research#31

NAT on Cellular Data

If a peer is not publically connectable it can not participate in the DHT both ways. A lot of mobile phones are behind symmetric NATs which UDP hole-punching close to impossible. It should be investigated whether or not mobile phones will be able to participate both ways and if there are good methods for doing hole-punching.

See details: vacp2p/research#29

Topic Tables

Topic Tables allow us the ability to efficiently find nodes given a specific topic. However, they are not implemented in the status-im/nim-eth implementation nor are they fully finalized in the spec. These are important if the network grows past a size where the concentration of specific nodes is relatively low making them hard to find.

See details: vacp2p/research#26

Finding a node

It is important to note, that given a network is relatively small sized, eg 100-500 nodes, then finding a node given a specific address is relatively managable. Additionally, if the concentration of a specific capability in a network is reasonable, then finding a node advertising its capabilities using an ENR rather than the topic table is also managable. A reasonable concentration for example would be 10%, which would give us an 80% chance of getting a node with that capability in the first lookup request. This can be explored more using our discv5 notebook.

Results

Research has shown that finding a node in the DHT has a relatively low effect on bandwidth, both inbound and outbound. For example when trying to find a node in a network of 100 nodes, it would take roughly 5668 bytes total. Additionally if we assume 100ms latency per request it would range at ≈ 300ms latency, translating to 3 requests to find a specific node.

General Thoughts

One of the main blockers right now is figuring out what the CPU and memory usage of discv5 is on mobile phones, this is a large blocker as it affects one of the core problems for us. We need to consider whether discv5 is an upgrade as it allows us to simplify our current discovery process or if it is too much of an overhead for resource restricted devices. The topic table feature could largely enhance discovery however it is not yet implemented. Given that CPU and memory isn't too high, discv5 could probably be used as the other issues are more "features" than large scale issues. Implementing it would already reduce the ability for state level adversaries to censor our nodes.

Acknowledgements

  • Oskar Thoren
  • Dmitry Shmatko
  • Kim De Mey
  • Corey Petty
]]>
+
+ + <![CDATA[What Would a WeChat Replacement Need?]]> + https://vac.dev/rlog/wechat-replacement-need + https://vac.dev/rlog/wechat-replacement-need + Thu, 16 Apr 2020 12:00:00 GMT + + What would a self-sovereign, private, censorship-resistant and open alternative to WeChat look like?

What would it take to replace WeChat? More specifically, what would a self-sovereign, private, censorship-resistant and open alternative look like? One that allows people to communicate, coordinate and transact freely.

Background

What WeChat provides to the end-user

Let's first look at some of the things that WeChat providers. It is a lot:

  • Messaging: 1:1 and group chat. Text, as well as voice and video. Post gifs. Share location.
  • Group chat: Limited to 500 people; above 100 people people need to verify with a bank account. Also has group video chat and QR code to join a group.
  • Timeline/Moments: Post comments with attachments and have people like/comment on it.
  • Location Discovery: See WeChat users that are nearby.
  • Profile: Nickname and profile picture; can alias people.
  • "Broadcast" messages: Send one message to many contacts, up to 200 people (spam limited).
  • Contacts: Max 5000 contacts (people get around it with multiple accounts and sim cards).
  • App reach: Many diferent web apps, extensions, native apps, etc. Scan QR code to access web app from phone.
  • Selective posting: Decide who can view your posts and who can view your comments on other people's post.
  • Transact: Send money gifts through red envelopes.
  • Transact: Use WeChat pay to transfer money to friends and businesses; linked account with Alipay that is connected to your bank account.
  • Services: Find taxis and get notifications; book flights, train tickets, hotels etc.
  • Mini apps: API for all kinds of apps that allow you to provide services etc.
  • Picture in picture: allowing you to have a video call while using the app.

And much more. Not going to through it all in detail, and there are probably many things I don't know about WeChat since I'm not a heavy user living in mainland China.

How WeChat works - a toy model

This is an overly simplistic model of how WeChat works, but it is sufficient for our purposes. This general design applies to most traditional client-server apps today.

To sign up for account you need a phone number or equivalent. To get access to some features you need to verify your identity further, for example with official ID and/or bank account.

When you signup this creates an entry in the WeChat server, from now on treated as a black box. You authenticate with that box, and thats where you get your messages from. If you go online the app asks that box for messages you have received while you were offline. If you login from a different app your contacts and conversations are synced from that box.

The box gives you an account, it deals with routing to your contacts, it stores messages and attachments and gives access to mini apps that people have uploaded. For transacting money, there is a partnership with a different company that has a different box which talks to your bank account.

This is done in a such a way that they can support a billion users with the features above, no sweat.

Whoever controls that box can sees who you are talking with and what the content of those messages are. There is no end to end encryption. If WeChat/Tencent disagrees with you for some reason they can ban you. This means you can't interact with the box under that name anymore.

What do we want?

We want something that is self-sovereign, private, censorship-resistant and open that allows individuals and groups of people to communicate and transact freely. To explore what this means in more detail, without getting lost in the weeds, we provide the following list of properties. A lot of these are tied together, and some fall out of the other requirements. Some of them stand in slight opposition to each other.

Self-sovereignity identity. Exercises authority within your own sphere. If you aren't harming anyone, you should be able to have an account and communicate with other people.

Pseudonymity, and ideally total anonymity. Not having your identity tied to your real name (e.g. through phone number, bank account, ID, etc). This allows people to act more freely without being overly worried about censorship and coercion in the real world. While total anonymity is even more desirable - especially to break multiple hops to a true-name action - real-world constraints sometimes makes this more challenging.

Private and secure communication. Your communication and who you transact with should be for your eyes only. This includes transactions (transfer of value) as a form of communication.

Censorship-resistance. Not being able to easily censor individuals on the platform. Both at an individual, group and collective level. Not having single points of failure that allow service to be disrupted.

Decentralization. Partly falls out of censorship-resistance and other properties. If infrastructure isn't decentralized it means there's a single point of failure that can be disrupted. This is more of a tool than a goal on its own, but it is an important tool.

Built for mass adoption. Includes scalabiltiy, UX (latency, reliability, bandwidth consumption, UI etc), and allowing for people to stick around. One way of doing this is to allow users to discover people they want to talk to.

Scalability. Infrastructure needs to support a lot of users to be a viabile alternative. Like, a billion of them (eventually).

Fundamentals in place to support great user experience. To be a viable alternative, aside from good UI and distribution, fundamentals such as latency, bandwidth usage, consistency etc must support great UX to be a viable alternative.

Works for resource restricted devices, including smartphones. Most people will use a smartphone to use this. This means it has to work well on them and similar devices, without becoming a second-class citizen where we ignore properties such as censorship-resistance and privacy. Some concession to reality will be necessary due to additional constraints, which leads us to...

Adaptive nodes. Nodes will have different capabilities, and perhaps at different times. To maintain a lot of the properties described here it is desirable if as many participants as possible are first-class citizens. If a phone is switching from a limited data plan to a WiFi network or from battery to AC power it can do more useful work, and so on. Likewise for a laptop with a lot of free disk space and spare compute power, etc.

Sustainable. If there's no centralized, top down ad-driven model, this means all the infrastructure has to be sustainable somehow. Since these are individual entitites, this means it has to be paid for. While altruistic modes and similar can be used, this likely requires some form of incentivization scheme for useful services provided in the network. Related: free rider problem.

Spam resistant. Relates to sustainability, scalability and built for mass adoption. Made more difficult by pseudonymous identity due to whitewashing attacks.

Trust-minimized. To know that properties are provided for and aren't compromised, various ways of minimizing trust requirements are useful. This also related to mass adoption and social cohesion. Examples include: open and audited protocols, open source, reproducible builds, etc. This also relates to how mini apps are provided for, since we may not know their source but want to be able to use them anyway.

Open source. Related to above, where we must be able to inspect the software to know that it functions as advertised and hasn't been compromised, e.g. by uploading private data to a third party.

Some of these are graded and a bit subtle, i.e.:

  • Censorship resistance would ideally be able to absorb Internet shutdowns. This would require an extensive MANET/meshnet infrastructure, which while desirable, requires a lot of challenges to be overcome to be feasible.
  • Privacy would ideally make all actions (optionally) totally anoymous, though this may incur undue costs on bandwidth and latency, which impacts user experience.
  • Decentralization, certain topologies, such as DHTs, are efficient and quite decentralized but still have some centralized aspects, which makes it attackable in various ways. Ditto for blockchains compared with bearer instruments which requires some coordinating infrastructure, compared with naturally occuring assets such as precious metals.
  • "Discover people" and striving for "total anonymity" might initially seem incompatible. The idea is to provide for sane defaults, and then allow people to decide how much information they want to disclose. This is the essence of privacy.
  • Users often want some form of moderation to get a good user experience, which can be seen as a form of censorship. The idea to raise the bar on the basics, the fundamental infrastructure. If individuals or specific communities want certain moderation mechanisms, that is still a compatible requirement.

Counterpoint 1

We could refute the above by saying that the design goals are undesirable. We want a system where people can censor others, and where everyone is tied to their real identity. Or we could say something like, freedom of speech is a general concept, and it doesn't apply to Internet companies, even if they provide a vital service. You can survive without it and you should've read the terms of service. This roughly charactericizes the mainstream view.

Additional factor here is the idea that a group of people know more about what's good for you then you do, so they are protecting you.

Counterpoint 2

We could agree with all these design goals, but think they are too extreme in terms of their requirements. For example, we could operate as a non profit, take donations and volunteers, and then host the whole infrastructure ourselves. We could say we are in a friendly legislation, so we won't be a single point of failure. Since we are working on this and maybe even our designs are open, you can trust us and we'll provide service and infrastructure that gives you what you want without having to pay for it or solve all these complex decentralized computation and so on problems. If you don't trust us for some reason, you shouldn't use us regardless. Also, this is better than status quo. And we are more likely to survive by doing this, either by taking shortcuts or by being less ambituous in terms of scope.

Principal components

There are many ways to skin a cat, but this is one way of breaking down the problem. We have a general direction with the properties listed above, together with some understanding of how WeChat works for the everday user. Now the question is, what infrastructure do we need to support this? How do we achieve the above properties, or at least get closer to them? We want to figure out the necessary building blocks, and one of doing this is to map out likely necessary components.

Background: Ethereum and Web3 stack

It is worth noting that a lot of the required infrastructure has been developed, at least as concepts, in the original Ethereum / Web3 vision. In it there is Ethereum for consensus/compute/transact, storage through Swarm, and communication through Whisper. That said, the main focus has been on the Ethereum blockchain itself, and a lot of things have happened in the last 5y+ with respect to technology around privacy and scalabilty. It is worth revisiting things from a fresh point of view, with the WeChat alternative in mind as a clear use case.

Account - self-sovereign identity and the perils of phone numbers

Starting from the most basic: what is an account and how do you get one? With most internet services today, WeChat and almost all popular messaging apps included, you need to signup with some centralized authority. Usually you also have to verify this with some data that ties this account to you as an individual. E.g. by requiring a phone number, which in most jurisdictions 1 means giving out your real ID. This also means you can be banned from using the service by a somewhat arbitrary process, with no due process.

Now, we could argue these app providers can do what they want. And they are right, in a very narrow sense. As apps like WeChat (and Google) become general-purpose platforms, they become more and more ingrained in our everyday lives. They start to provide utilities that we absolutely require to work to go about our day, such as paying for food or transportation. This means we need higher standard than this.

Justifications for requiring phone numbers are usually centered around three claims:

  1. Avoiding spam
  2. Tying your account to your real name, for various reasons
  3. Using as a commonly shared identifier as a social network discovery mechanism

Of course, many services require more than phone numbers. E.g. email, other forms of personal data such as voice recording, linking a bank account, and so on.

In contrast, a self-sovereign system would allow you to "create an account" completely on your own. This can easily be done with public key cryptograpy, and it also paves the way for end-to-end encryption to make your messages private.

The main issue with this that you need to get more creative about avoiding spam (e.g. through white washing attacks), and ideally there is some other form of social discovery mechanism.

Just having a public key as an account isn't enough though. If it goes through a central server, then nothing is stopping that server from arbitrarly blocking requests related to that public key. Of course, this also depends on how transparent such requests are. Fundamentally, lest we rely completely on goodwill, there needs to be multiple actors by which you can use the service. This naturally points to decentralization as a requirement. See counterpoint.

Even so, if the system is closed source we don't know what it is doing. Perhaps the app communicating is also uploading data to another place, or somehow making it possible to see who is who and act accordingly.

You might notice that just one simple property, self-sovereign identity, leads to a slew of other requirements and properties. You might also notice that WeChat is far from alone in this, even if their identity requirements might be a bit stringent than, say, Telegram. Their control aspects are also a bit more extreme, at least for someone with western sensibilities 2.

Most user facing applications have similar issues, Google Apps/FB/Twitter etc. For popular tools that have this built in, we can look at git - which is truly decentralized and have keypair at the bottom. It is for a very specific technical domain, and even then people rely on Github. Key management is fairly difficult even for technical people, and for normal people even more so. Banks are generally far behind on this tech, relying on arcane procedures and special purpose hardware for 2FA. That's another big issue.

Let's shift gears a bit and talk about some other functional requirements.

Routing - packets from A to B

In order to get a lot of the features WeChat provides, we need the ability to do three things: communicate, store data, and transact with people. We need a bit more than that, but let's focus on this for now.

To communicate with people, in the base case, we need to go from one phone to another phone that is separated by a large distance. This requires some form of routing. The most natural platform to build this on is the existing Internet, though not the only one. Most phones are resource restricted, and are only "on" for brief periods of time. This is needed to preserve battery and bandwidth. Additionally, Internet uses IPs as endpoints, which change as a phones move through space. NAT punching etc isn't always perfect either. This means we need a way to get a message from one public key to another, and through some intermediate nodes. We can think of these nodes as a form of service network. Similar to how a power grid works, or phone lines, or collection of ISPs.

One important property here is to ensure we don't end up in a situation like the centralized capture scenario above, something we've seen with centralized ISPs 3 4 where they can choose which traffic is good and which is bad. We want to allow the use of different service nodes, just like if a restaurant gives you food poisioning you can go to the one next door and then the first one goes out of business after a while. And the circle of life continues.

We shouldn't be naive though, and think that this is something nodes are likely to do for free. They need to be adequately compensated for their services, in some of incentivization scheme. That can either be monetary, or as in the case of Bittorrent, more of a barter situation where you use game theory to coordinate with strangers 5, and some form of reputation attached to it (for private trackers).

There are many ways of doing routing, and we won't go into too much technical detail here. Suffice to say is that you likely want both a structured and unstructured alternative, and that these comes with several trade-offs when it comes to efficiency, metadata protection, ability to incentivize, compatibility with existing topologies, and suitability for mobilephones (mostly offline, bandwidth restricted, not directly connectable). Expect more on this in a future article.

Some of these considerations naturally leads us into the storage and transaction components.

Storage - available and persistant for later

If mobile phones are mostly offline, we need some way to store these messages so they can be retrieved when online again. The same goes for various kinds attachments as well, and for when people are switching devices. A user might control their timeline, but in the WeChat case that timeline is stored on Tencent's servers, and queried from there as well. This naturally needs to happen by some other service nodes. In the WeChat case, and for most IMs, the way these servers are paid for is through some indirect ad mechanism. The entity controlling these ads and so on is the same one as the one operating the servers for storage. A more direct model with different entities would see these services being compensated for their work.

We also need storage for attachments, mini-apps, as well as a way of understanding the current state of consensus when it comes to the compute/transact module. In the WeChat case, this state is completely handled by the bank institution or one of their partners, such as Alibaba. When it comes to bearer instruments like cash, no state needs to be kept as that's a direct exchange in the physical world. This isn't directly compatible with transfering value over a distance.

All of this state requires availability and persistance. It should be done in a trust minimized fashion and decentralized, which requires some form of incentivization for keeping data around. If it isn't, you are relying on social cohesion which breaks down at very large scales.

Since data will be spread out across multiple nodes, you need a way to sync data and transfer it in the network. As well as being able to add and query data from it. All of this requires a routing component.

To make it more censorship resistant it might be better to keep it as a general-purpose store, i.e. individuals don't need to know what they storing. Otherwise, you naturally end up in a situation where individual nodes can be pressured to not store certain content.

Messaging - from me to you to all of us (not them)

This builds on top of routing, but it has a slightly different focus. The goal is to allow for individuals and groups to communicate in a private, secure and censorship-resistant manner.

It also needs to provide a decent interface to the end user, in terms of dealing seamlessly with offline messages, providing reliable and timely messaging.

In order to get closer to the ideal of total anonymity, it is useful to be able to hide metadata of who is talking to whom. This applies to both normal communication as well as for transactions. Ideally, no one but the parties involved can see who is taking part in a conversation. This can be achieved through various techniques such as mixnets, anonymous credentials, private information retrieval, and so on. Many of these techniques have a fundamental trade-off with latency and bandwidth, something that is a big concern for mobilephones. Being able to do some form of tuning, in an adaptive node manner, depending on your threat model and current capabilities is useful here.

The baseline here is pseudonymity, and having tools to allow individuals to "cut off" ties to their real world identity and transactions. People act different in different circles in the real world, and this should be mimicked online as well. Your company, family or government shouldn't be able to know what exactly you use your paycheck for, and who you are talking to.

Compute - transact, contract and settle

The most immediate need here is transaction from A to B. Direct exchange. There is also a more indirect need for private lawmaking and contracting.

We talked about routing and storage and how they likely need to be incentivized to work properly. How are they going to be compensated? While this could in theory work via existing banking system and so on, this would be rather heavy. It'd also very likely require tying your identifier to your legal name, something that goes against what we want to achieve. What we want is something that acts more as right-to-access, similar to the way cash functions in a society 6. I pay for a fruit with something that is valuable to you and then I'm on my way.

While there might be other candidates, such as pre-paid debit cards and so on, this transaction mode pretty much requires a cryptocurrency component. The alternative is to do it on a reputation basis, which might work for small communities, due to social cohesion, but quickly detoriates for large ones 7. Ad hoc models like private Bittorrent trackers are centralized and easy to censor.

Now, none of the existing cryptocurrency models are ideal. They also all suffer from lack of widespread use, and it is difficult to get onboarded to them in the first place. Transactions in Bitcoin are slow. Ethereum is faster and has more capabilities, but it still suffers from linking payments over time, which makes the privacy part of this more difficult. Zcash, Monero and similar are interesting, but also require more use. For Zcash, shielded transactions appear to only account for less than 2% of all transactions in 2019 8 9.

Another dimension is what sets general purpose cryptocurrencies like Ethereum apart. Aside from just paying from A to B, you can encode rules about when something should be paid out and not. This is very useful for doing a form of private lawmaking, contracting, for setting up service agreements with these nodes. If there's no trivial recourse as in the meatspace world, where you know someone's name and you can sue them, you need a different kind of model.

What makes something like Zcash interesting is that it works more like digital cash. Instead of leaving a public trail for everyone, where someone can see where you got the initial money from and then trace you across various usage, for Zcash every hop is privacy preserving.

To fulfill the general goals of being censorship resistance and secure, it is also vital that the system being used stays online and can't be easily disrupted. That points to disintermediation, as opposed to using gateways and exchanges. This is a case where something like cash, or gold, is more direct, since no one can censor this transaction without being physically present where this direct exchange is taking place. However, like before, this doesn't work over distance.

Secure chat - just our business

Similar to the messaging module above. The distinction here is that we assume the network part has already taken place. Here we are interested in keeping the contents of messages private, so that means confidentiality/end-to-end encryption, integrity, authentication, as well as forward secrecy and plausible deniability. This means that even if there's some actor that gets some private key material, or confiscated your phone, there is some level of...ephemerality to your conversations. Another issue here in terms of scalable private group chat.

Extensible mini apps

This relates to the compute and storage module above. Essentially we want to provide mini apps as in WeChat, but to do so in a way that is compatible with what we want to achieve more generally. This allows individuals and small businesses to create small tools for various purposes, and coordinate with strangers. E.g. booking a cab or getting an insurance, and so on.

This has a higher dependency on the contracting/general computation aspect. I.e. often it isn't only a transaction, but you might want to encode some specific rules here that strangers can abide by without having too high trust requirements. As a simple example: escrows.

This also needs an open API that anyone can use. It should be properly secured, so using one doesn't compromise the rest of the system it is operating in. To be censorship resistant it requires the routing and storage component to work properly.

Where are we now?

Let's look back at some of desirable properties we set out in the beginning and see how close we are to building out the necessary components. Is it realistic at all or just a pipe dream? We'll see that there are many building blocks in place, and there's reason for hope.

Self-sovereignity identity. Public key crypto and web of trust like constructs makes this possible.

Pseudonymity, and ideally total anonymity. Pseudonymity can largely be achieved with public key crypto and open systems that allow for permissionless participation. For transactions, pseudonymity exists in most cryptocurrencies. The challenge is linkage across time, especially when interfacing with other "legacy" system. There are stronger constructs that are actively being worked on and are promising here, such as mixnets (Nym), mixers (Wasabi Wallet, Tornado.Cash) and zero knowledge proofs (Zcash, Ethereum, Starkware). This area of applied research has exploded over the last few years.

Private and secure communication. Signal has pioneered a lot of this, following OTR. Double Ratchet, X3DH. E2EE is minimum these days, and properties like PFS and PD are getting better. For metadata protection, you have Tor, with its faults, and more active research on mixnets and private information retrieval, etc.

Censorship-resistance. This covers a lot of ground across the spectrum. You have technologies like Bittorrent, Bitcoin/Ethereum, Tor obfuscated transports, E2EE by default, partial mesh networks in production, abilit to move/replicate host machines more quickly have all made this more of a reality than it used to be. this easier. Of course, techniques such as deep packet inspection and internet shutdowns have increased.

Decentralization. Cryptocurrencies, projects like libp2p and IPFS. Need to be mindful here of many projects that claim decentralization but are still vulnerable to single points of failures, such as relying on gateways.

Built for mass adoption. This one is more subjective. There's definitely a lot of work to be done here, both when it comes to fundamental performance, key management and things like social discoverability. Directionally these things are improving and becoming easier for the average person but there is a lot ot be done here.

Scalability. With projects like Ethereum 2.0 and IPFS more and more resources are a being put into this, both at the consensus/compute layer as well as networking (gossip, scalable Kademlia) layer. Also various layer 2 solutions for transactions.

Fundamentals in place to support great user experience. Similar to built for mass adoption. As scalability becomes more important, more applied research is being done in the p2p area to improve things like latency, bandwidth.

Works for resource restricted devices, including smartphones. Work in progress and not enough focus here, generally an after thought. Also have stateless clients etc.

Adaptive nodes. See above. With subprotocols and capabilities in Ethereum and libp2p, this is getting easier.

Sustainable. Token economics is a thing. While a lot of it won't stay around, there are many more projects working on making themselves dispensable. Being open source, having an engaged community and enabling users run their own infrastructure. Users as stakeholders.

Spam resistant. Tricky problem if you want to be pseudonymous, but some signs of hope with incentivization mechanisms, zero knowledge based signaling, etc. Together with various forms of rate limiting and better controlling of topology and network amplification. And just generally being battle-tested by real world attacks, such as historical Ethereum DDoS attacks.

Trust minimized. Bitcoin. Zero knowledge provable computation. Open source. Reproducible builds. Signed binaries. Incentive compatible structures. Independent audits. Still a lot of work, but getting better.

Open source. Big and only getting bigger. Including mainstream companies.

What's next?

We've look at what WeChat provides and what we'd like an alternative to look like. We've also seen a few principal modules that are necessary to achieve those goals. To achieve all of this is a daunting task, and one might call it overly ambitiuous. We've also seen how far we've come with some of the goals, and how a lot of the pieces are there, in one form or another. Then it is a question of putting them all together in the right mix.

The good news is that a lot of people are working all these building blocks and thinking about these problems. Compared to a few years ago we've come quite far when it comes to p2p infrastructure, privacy, security, scalability, and general developer mass and mindshare. If you want to join us in building some of these building blocks, and assembling them, check out our forum.

PS. We are hiring protocol engineers. DS

Acknowledgements

Corey, Dean, Jacek.

References

]]>
+
+ + <![CDATA[From Kademlia to Discv5]]> + https://vac.dev/rlog/kademlia-to-discv5 + https://vac.dev/rlog/kademlia-to-discv5 + Thu, 09 Apr 2020 16:00:00 GMT + + A quick history of discovery in peer-to-peer networks, along with a look into discv4 and discv5, detailing what they are, how they work and where they differ.

If you've been working on Ethereum or adjacent technologies you've probably heard of discv4 or discv5. But what are they actually? How do they work and what makes them different? To answer these questions, we need to start at the beginning, so this post will assume that there is little knowledge on the subject so the post should be accessible for anyone.

The Beginning

Let's start right at the beginning: the problem of discovery and organization of nodes in peer-to-peer networks.

Early P2P file sharing technologies, such as Napster, would share information about who holds what file using a single server. A node would connect to the central server and give it a list of the files it owns. Another node would then connect to that central server, find a node that has the file it is looking for and contact that node. This however was a flawed system -- it was vulnerable to attacks and left a single party open to lawsuits.

It became clear that another solution was needed, and after years of research and experimentation, we were given the distributed hash table or DHT.

Distributed Hash Tables

In 2001 4 new protocols for such DHTs were conceived, Tapestry, Chord, CAN and Pastry, all of which made various trade-offs and changes in their core functionality, giving them unique characteristics.

But as said, they're all DHTs. So what is a DHT?

A distributed hash table (DHT) is essentially a distributed key-value list. Nodes participating in the DHT can easily retrieve the value for a key.

If we have a network with 9 key-value pairs and 3 nodes, ideally each node would store 3 (optimally 6 for redundancy) of those key-value pairs, meaning that if a key-value pair were to be updated, only part of the network would responsible for ensuring that it is. The idea is that any node in the network would know where to find the specific key-value pair it is looking for based on how things are distributed amongst the nodes.

Kademlia

So now that we know what DHTs are, let's get to Kademlia, the predecessor of discv4. Kademlia was created by Petar Maymounkov and David Mazières in 2002. I will naively say that this is probably one of the most popular and most used DHT protocols. It's quite simple in how it works, so let's look at it.

In Kademlia, nodes and values are arranged by distance (in a very mathematical definition). This distance is not a geographical one, but rather based on identifiers. It is calculated how far 2 identifiers are from eachother using some distance function.

Kademlia uses an XOR as its distance function. An XOR is a function that outputs true only when inputs differ. Here is an example with some binary identifiers:

XOR 10011001
00110010
--------
10101011

The top in decimal numbers means that the distance between 153 and 50 is 171.

There are several reasons why XOR was taken:

  1. The distance from one ID to itself will be 0.
  2. Distance is symmetric, A to B is the same as B to A.
  3. Follows triangle inequality, if A, B and C are points on a triangle then the distance A to B is closer or equal to that of A to C plus the one from B to C.

In summary, this distance function allows a node to decide what is "close" to it and make decisions based on that "closeness".

Kademlia nodes store a routing table. This table contains multiple lists. Each subsequent list contains nodes which are a little further distanced than the ones included in the previous list. Nodes maintain detailed knowledge about nodes closest to them, and the further away a node is, the less knowledge the node maintains about it.

So let's say I want to find a specific node. What I would do is go to any node which I already know and ask them for all their neighbours closest to my target. I repeat this process for the returned neighbours until I find my target.

The same thing happens for values. Values have a certain distance from nodes and their IDs are structured the same way so we can calculate this distance. If I want to find a value, I simply look for the neighbours closest to that value's key until I find the one storing said value.

For Kademlia nodes to support these functions, there are several messages with which the protocol communicates.

  • PING - Used to check whether a node is still running.
  • STORE - Stores a value with a given key on a node.
  • FINDNODE - Returns the closest nodes requested to a given ID.
  • FINDVALUE - The same as FINDNODE, except if a node stores the specific value it will return it directly.

This is a very simplified explanation of Kademlia and skips various important details. For the full description, make sure to check out the paper or a more in-depth design specification

Discv4

Now after that history lesson, we finally get to discv4 (which stands for discovery v4), Ethereum's current node discovery protocol. The protocol itself is essentially based off of Kademlia, however it does away with certain aspects of it. For example, it does away with any usage of the value part of the DHT.

Kademlia is mainly used for the organisation of the network, so we only use the routing table to locate other nodes. Due to the fact that discv4 doesn't use the value portion of the DHT at all, we can throw away the FINDVALUE and STORE commands described by Kademlia.

The lookup method previously described by Kademlia describes how a node gets its peers. A node contacts some node and asks it for the nodes closest to itself. It does so until it can no longer find any new nodes.

Additionally, discv4 adds mutual endpoint verification. This is meant to ensure that a peer calling FINDNODE also participates in the discovery protocol.

Finally, all discv4 nodes are expected to maintain up-to-date ENR records. These contain information about a node. They can be requested from any node using a discv4-specific packet called ENRRequest.

If you want some more details on ENRs, check out one of my posts "Network Addresses in Ethereum"

Discv4 comes with its own range of problems however. Let's look at a few of them.

Firstly, the way discv4 works right now, there is no way to differentiate between node sub-protocols. This means for example that an Ethereum node could add an Ethereum Classic Node, Swarm or Whisper node to its DHT without realizing that it is invalid until more communication has happened. This inability to differentiate sub-protocols makes it harder to find specific nodes, such as Ethereum nodes with light-client support.

Next, in order to prevent replay attacks, discv4 uses timestamps. This however can lead to various issues when a host's clock is wrong. For more details, see the "Known Issues" section of the discv4 specification.

Finally, we have an issue with the way mutual endpoint verification works. Messages can get dropped and there is no way to tell if both peers have verified eachother. This means that we could consider our peer verified while it does not consider us so making them drop the FINDNODE packet.

Discv5

Finally, let's look at discv5. The next iteration of discv4 and the discovery protocol which will be used by Eth 2.0. It aims at fixing various issues present in discv4.

The first change is the way FINDNODE works. In traditional Kademlia as well as in discv5, we pass an identifier. However, in discv5 we instead pass the logarithmic distance, meaning that a FINDNODE request gets a response containing all nodes at the specified logarithmic distance from the called node.

Logarithmic distance means we first calculate the distance and then run it through our log base 2 function. See:

log2(A xor B)

And the second, more important change, is that discv5 aims at solving one of the biggest issues of discv4: the differentiation of sub-protocols. It does this by adding topic tables. Topic tables are first in first out lists that contain nodes which have advertised that they provide a specific service. Nodes get themselves added to this list by registering ads on their peers.

As of writing, there is still an issue with this proposal. There is currently no efficient way for a node to place ads on multiple peers, since it would require separate requests for every peer which is inefficient in a large-scale network.

Additionally, it is unclear how many peers a node should place these ads on and exactly which peers to place them on. For more details, check out the issue devp2p#136.

There are a bunch more smaller changes to the protocol, but they are less important hence they were ommitted from this summary.

Nevertheless, discv5 still does not resolve a couple issues present in discv4, such as unreliable endpoint verification. As of writing this post, there is currently no new method in discv5 to improve the endpoint verification process.

As you can see discv5 is still a work in progress and has a few large challenges to overcome. However if it does, it will most likely be a large improvement to a more naive Kademlia implementations.


Hopefully this article helped explain what these discovery protocols are and how they work. If you're interested in their full specifications you can find them on github.

]]>
+
+ + <![CDATA[Waku Update]]> + https://vac.dev/rlog/waku-update + https://vac.dev/rlog/waku-update + Fri, 14 Feb 2020 12:00:00 GMT + + A research log. What's the current state of Waku? How many users does it support? What are the bottlenecks? What's next?

Waku is our fork of Whisper where we address the shortcomings of Whisper in an iterative manner. We've seen a in previous post that Whisper doesn't scale, and why. In this post we'll talk about what the current state of Waku is, how many users it can support, and future plans.

Current state

Specs:

We released Waku spec v0.3 this week! You can see the full changelog here.

The main change from 0.2 is making the handshake more flexible. This enables us to communicate topic interest immediately without ambiguity. We also did the following:

  • added recommendation for DNS based discovery
  • added an upgradability and compatibility policy
  • cut the spec up into several components

We cut the spec up in several components to make Vac as modular as possible. The components right now are:

We can probably factor these out further as the main spec is getting quite big, but this is good enough for now.

Clients:

There are currently two clients that implement Waku v0.3, these are Nimbus (Update: now nim-waku) in Nim and status-go in Go.

For more details on what each client support and don't, you can follow the work in progress checklist.

Work is currently in progress to integrate it into the Status core app. Waku is expected to be part of their upcoming 1.1 release (see Status app roadmap (link deprecated)).

Simulation:

We have a simulation that verifies - or rather, fails to falsify - our scalability model. More on the simulation and what it shows below.

How many users does Waku support?

This is our current understanding of how many users a network running Waku can support. Specifically in the context of the Status chat app, since that's the most immediate consumer of Waku. It should generalize fairly well to most deployments.

tl;dr (for Status app):

  • beta: 100 DAU
  • v1: 1k DAU
  • v1.1 (waku only): 10k DAU (up to x10 with deployment hotfixes)
  • v1.2 (waku+dns): 100k DAU (can optionally be folded into v1.1)

Assuming 10 concurrent users = 100 DAU. Estimate uncertainty increases for each order of magnitude until real-world data is observed.

As far as we know right now, these are the bottlenecks we have:

  • Immediate bottleneck - Receive bandwidth for end user clients (aka ‘Fixing Whisper with Waku’)
  • Very likely bottleneck - Nodes and cluster capacity (aka ‘DNS based node discovery’)
  • Conjecture but not unlikely to appear- Full node traffic (aka ‘the routing / partition problem’)

We've already seen the first bottleneck being discussed in the initial post. Dean wrote a post on DNS based discovery which explains how we will address the likely second bottleneck. More on the third one in future posts.

For more details on these bottlenecks, see Scalability estimate: How many users can Waku and the Status app support?.

Simulation

The ultimate test is real-world usage. Until then, we have a simulation thanks to Kim De Mey from the Nimbus team!

We have two network topologies, Star and full mesh. Both networks have 6 full nodes, one traditional light node with bloom filter, and one Waku light node.

One of the full nodes sends 1 envelope over 1 of the 100 topics that the two light nodes subscribe to. After that, it sends 10000 envelopes over random topics.

For light node, bloom filter is set to almost 10% false positive (bloom filter: n=100, k=3, m=512). It shows the number of valid and invalid envelopes received for the different nodes.

Star network:

DescriptionPeersValidInvalid
Master node7100010
Full node 13100010
Full node 21100010
Full node 31100010
Full node 41100010
Full node 51100010
Light node28150
Waku light node210

Full mesh:

DescriptionPeersValidInvalid
Full node 071000120676
Full node 17100019554
Full node 251000123304
Full node 351000111983
Full node 451000124425
Full node 551000123472
Light node2803803
Waku light node211

Things to note:

  • Whisper light node with ~10% false positive gets ~10% of total traffic
  • Waku light node gets ~1000x less envelopes than Whisper light node
  • Full mesh results in a lot more duplicate messages, expect for Waku light node

Run the simulation yourself here. The parameters are configurable, and it is integrated with Prometheus and Grafana.

Difference between Waku and Whisper

Summary of main differences between Waku v0 spec and Whisper v6, as described in EIP-627:

  • Handshake/Status message not compatible with shh/6 nodes; specifying options as association list
  • Include topic-interest in Status handshake
  • Upgradability policy
  • topic-interest packet code
  • RLPx subprotocol is changed from shh/6 to waku/0.
  • Light node capability is added.
  • Optional rate limiting is added.
  • Status packet has following additional parameters: light-node, confirmations-enabled and rate-limits
  • Mail Server and Mail Client functionality is now part of the specification.
  • P2P Message packet contains a list of envelopes instead of a single envelope.

Next steps and future plans

Several challenges remain to make Waku a robust and suitable base +communication protocol. Here we outline a few challenges that we are addressing and will continue to work on:

  • scalability of the network
  • incentived infrastructure and spam-resistance
  • build with resource restricted devices in mind, including nodes being mostly offline

For the third bottleneck, a likely candidate for fixing this is Kademlia routing. This is similar to what is done in Swarm's PSS. We are in the early stages of experimenting with this over libp2p in nim-libp2p. More on this in a future post!

Acknowledgements

Image from "caged sky" by mh.xbhd.org is licensed under CC BY 2.0 (https://ccsearch.creativecommons.org/photos/a9168311-78de-4cb7-a6ad-f92be8361d0e)

]]>
+
+ + <![CDATA[DNS Based Discovery]]> + https://vac.dev/rlog/dns-based-discovery + https://vac.dev/rlog/dns-based-discovery + Fri, 07 Feb 2020 12:00:00 GMT + + A look at EIP-1459 and the benefits of DNS based discovery.

Discovery in p2p networks is the process of how nodes find each other and specific resources they are looking for. Popular discovery protocols, such as Kademlia which utilizes a distributed hash table or DHT, are highly inefficient for resource restricted devices. These methods use short connection windows, and it is quite battery intensive to keep establishing connections. Additionally, we cannot expect a mobile phone for example to synchronize an entire DHT using cellular data.

Another issue is how we do the initial bootstrapping. In other words, how does a client find its first node to then discover the rest of the network? In most applications, including Status right now, this is done with a static list of nodes that a client can connect to.

In summary, we have a static list that provides us with nodes we can connect to which then allows us to discover the rest of the network using something like Kademlia. But what we need is something that can easily be mutated, guarantees a certain amount of security, and is efficient for resource restricted devices. Ideally our solution would also be robust and scalable.

How do we do this?

EIP 1459: Node Discovery via DNS, which is one of the strategies we are using for discovering waku nodes. EIP-1459 is a DNS-based discovery protocol that stores merkle trees in DNS records which contain connection information for nodes.

Waku is our fork of Whisper. Oskar recently wrote an entire post explaining it. In short, Waku is our method of fixing the shortcomings of Whisper in a more iterative fashion. You can find the specification here

DNS-based methods for bootstrapping p2p networks are quite popular. Even Bitcoin uses it, but it uses a concept called DNS seeds, which are just DNS servers that are configured to return a list of randomly selected nodes from the network upon being queried. This means that although these seeds are hardcoded in the client, the IP addresses of actual nodes do not have to be.

> dig dnsseed.bluematt.me +short
129.226.73.12
107.180.78.111
169.255.56.123
91.216.149.28
85.209.240.91
66.232.124.232
207.55.53.96
86.149.241.168
193.219.38.57
190.198.210.139
74.213.232.234
158.181.226.33
176.99.2.207
202.55.87.45
37.205.10.3
90.133.4.73
176.191.182.3
109.207.166.232
45.5.117.59
178.211.170.2
160.16.0.30

The above displays the result of querying on of these DNS seeds. All the nodes are stored as A records for the given domain name. This is quite a simple solution which Bitcoin almost soley relies on since removing the IRC bootstrapping method in v0.8.2.

What makes this DNS based discovery useful? It allows us to have a mutable list of bootstrap nodes without needing to ship a new version of the client every time a list is mutated. It also allows for a more lightweight method of discovering nodes, something very important for resource restricted devices.

Additionally, DNS provides us with a robust and scalable infrastructure. This is due to its hierarchical architecture. This hierarchical architecture also already makes it distributed such that the failure of one DNS server does not result in us no longer being able to resolve our name.

As with every solution though, there is a trade-off. By storing the list in DNS name an adversary would simply need to censor the DNS records for a specific name. This would prevent any new client trying to join the network from being able to do so.

One thing you notice when looking at EIP-1459 is that it is a lot more technically complex than Bitcoin's way of doing this. So if Bitcoin uses this simple method and has proven that it works, why did we need a new method?

There are multiple reasons, but the main one is security. In the Bitcoin example, an attacker could create a new list and no one querying would be able to tell. This is however mitigated in EIP-1459 where we can verify the integrity of the entire returned list by storing an entire merkle tree in the DNS records.

Let's dive into this. Firstly, a client that is using these DNS records for discovery must know the public key corresponding to the private key controlled by the entity creating the list. This is because the entire list is signed using a secp256k1 private key, giving the client the ability to authenticate the list and know that it has not been tampered with by some external party.

So that already makes this a lot safer than the method Bitcoin uses. But how are these lists even stored? As previously stated they are stored using merkle trees as follows:

  • The root of the tree is stored in a TXT record, this record contains the tree's root hash, a sequence number which is incremented every time the tree is updated and a signature as stated above.

    Additionally, there is also a root hash to a second tree called a link tree, it contains the information to different lists. This link tree allows us to delegate trust and build a graph of multiple merkle trees stored across multiple DNS names.

    The sequence number ensures that an attacker cannot replace a tree with an older version because when a client reads the tree, they should ensure that the sequence number is greater than the last synchronized version.

  • Using the root hash for the tree, we can find the merkle tree's first branch, the branch is also stored in a TXT record. The branch record contains all the hashes of the branch's leafs.

  • Once a client starts reading all the leafs, they can find one of two things: either a new branch record leading them further down the tree or an Ethereum Name Records (ENR) which means they now have the address of a node to connect to! To learn more about ethereum node records you can have a look at EIP-778, or read a short blog post I wrote explaining them here.

Below is the zone file taken from the EIP-1459, displaying how this looks in practice.

; name                        ttl     class type  content
@ 60 IN TXT enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA
C7HRFPF3BLGF3YR4DY5KX3SMBE 86900 IN TXT enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org
JWXYDBPXYWG6FX3GMDIBFA6CJ4 86900 IN TXT enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24
2XS2367YHAXJFGLZHVAWLQD4ZY 86900 IN TXT enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA
H4FHT4B454P6UXFD7JCYQ5PWDY 86900 IN TXT enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI
MHTDO6TMUBRIA2XWG5LUDACK24 86900 IN TXT enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o

All of this has already been introduced into go-ethereum with the pull request #20094, created by Felix Lange. There's a lot of tooling around it that already exists too which is really cool. So if your project is written in Golang and wants to use this, it's relatively simple! Additionally, here's a proof of concept that shows what this might look like with libp2p on github.

I hope this was a helpful explainer into DNS based discovery, and shows EIP-1459's benefits over more traditional DNS-based discovery schemes.

]]>
+
+ + <![CDATA[Fixing Whisper with Waku]]> + https://vac.dev/rlog/fixing-whisper-with-waku + https://vac.dev/rlog/fixing-whisper-with-waku + Tue, 03 Dec 2019 12:00:00 GMT + + A research log. Why Whisper doesn't scale and how to fix it.

This post will introduce Waku. Waku is a fork of Whisper that attempts to +addresses some of Whisper's shortcomings in an iterative fashion. We will also +introduce a theoretical scaling model for Whisper that shows why it doesn't +scale, and what can be done about it.

Introduction

Whisper is a gossip-based communication protocol or an ephemeral key-value store +depending on which way you look at it. Historically speaking, it is the +messaging pilllar of Web3, together with +Ethereum for consensus and Swarm for storage.

Whisper, being a somewhat esoteric protocol and with some fundamental issues, +hasn't seen a lot of usage. However, applications such as Status are using it, +and have been making minor ad hoc modifications to it to make it run on mobile +devices.

What are these fundamental issues? In short:

  1. scalability, most immediately when it comes to bandwidth usage
  2. spam-resistance, proof of work is a poor mechanism for heterogeneous nodes
  3. no incentivized infrastructure, leading to centralized choke points
  4. lack of formal and unambiguous specification makes it hard to analyze and implement
  5. running over devp2p, which limits where it can run and how

In this post, we'll focus on the first problem, which is scalability through bandwidth usage.

Whisper theoretical scalability model

(Feel free to skip this section if you want to get right to the results).

There's widespread implicit knowledge that Whisper "doesn't scale", but it is less understood exactly why. This theoretical model attempts to encode some characteristics of it. Specifically for use case such as one by Status (see Status Whisper usage +spec).

Caveats

First, some caveats: this model likely contains bugs, has wrong assumptions, or completely misses certain dimensions. However, it acts as a form of existence proof for unscalability, with clear reasons.

If certain assumptions are wrong, then we can challenge them and reason about them in isolation. It doesn’t mean things will definitely work as the model predicts, and that there aren’t unknown unknowns.

The model also only deals with receiving bandwidth for end nodes, uses mostly static assumptions of averages, and doesn’t deal with spam resistance, privacy guarantees, accounting, intermediate node or network wide failures.

Goals

  1. Ensure network scales by being user or usage bound, as opposed to bandwidth growing in proportion to network size.
  2. Staying with in a reasonable bandwidth limit for limited data plans.
  3. Do the above without materially impacting existing nodes.

It proceeds through various case with clear assumptions behind them, starting from the most naive assumptions. It shows results for 100 users, 10k users and 1m users.

Model

Case 1. Only receiving messages meant for you [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A4. Only receiving messages meant for you.

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1000.0KB/day
For 1m users, receiving bandwidth is 1000.0KB/day

------------------------------------------------------------

Case 2. Receiving messages for everyone [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A5. Received messages for everyone.

For 100 users, receiving bandwidth is 97.7MB/day
For 10k users, receiving bandwidth is 9.5GB/day
For 1m users, receiving bandwidth is 953.7GB/day

------------------------------------------------------------

Case 3. All private messages go over one discovery topic [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A8. All private messages are received by everyone (same topic) (static).

For 100 users, receiving bandwidth is 49.3MB/day
For 10k users, receiving bandwidth is 4.8GB/day
For 1m users, receiving bandwidth is 476.8GB/day

------------------------------------------------------------

Case 4. All private messages are partitioned into shards [naive case]

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

Case 5. 4 + Bloom filter with false positive rate

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1

For 100 users, receiving bandwidth is 10.7MB/day
For 10k users, receiving bandwidth is 978.0MB/day
For 1m users, receiving bandwidth is 95.5GB/day

NOTE: Traffic extremely sensitive to bloom false positives
This completely dominates network traffic at scale.
With p=1% we get 10k users ~100MB/day and 1m users ~10gb/day)

------------------------------------------------------------

Case 6. Case 5 + Benign duplicate receives

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).

For 100 users, receiving bandwidth is 21.5MB/day
For 10k users, receiving bandwidth is 1.9GB/day
For 1m users, receiving bandwidth is 190.9GB/day

------------------------------------------------------------

Case 7. 6 + Mailserver under good conditions; small bloom fp; mostly offline

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000
- A10. Bloom filter size (m) (static): 512
- A11. Bloom filter hash functions (k) (static): 3
- A12. Bloom filter elements, i.e. topics, (n) (static): 100
- A13. Bloom filter assuming optimal k choice (sensitive to m, n).
- A14. Bloom filter false positive proportion of full traffic, p=0.1
- A15. Benign duplicate receives factor (static): 2
- A16. No bad envelopes, bad PoW, expired, etc (static).
- A17. User is offline p% of the time (static) p=0.9
- A18. No bad request, dup messages for mailservers; overlap perfect (static).
- A19. Mailserver requests can change false positive rate to be p=0.01

For 100 users, receiving bandwidth is 3.9MB/day
For 10k users, receiving bandwidth is 284.8MB/day
For 1m users, receiving bandwidth is 27.8GB/day

------------------------------------------------------------

Case 8. No metadata protection w bloom filter; 1 node connected; static shard

Aka waku mode.

Next step up is to either only use contact code, or shard more aggressively.
Note that this requires change of other nodes behavior, not just local node.

Assumptions:
- A1. Envelope size (static): 1024kb
- A2. Envelopes / message (static): 10
- A3. Received messages / day (static): 100
- A6. Proportion of private messages (static): 0.5
- A7. Public messages only received by relevant recipients (static).
- A9. Private messages partitioned across partition shards (static), n=5000

For 100 users, receiving bandwidth is 1000.0KB/day
For 10k users, receiving bandwidth is 1.5MB/day
For 1m users, receiving bandwidth is 98.1MB/day

------------------------------------------------------------

See source +for more detail on the model and its assumptions.

Takeaways

  1. Whisper as it currently works doesn’t scale, and we quickly run into unacceptable bandwidth usage.
  2. There are a few factors of this, but largely it boils down to noisy topics usage and use of bloom filters. Duplicate (e.g. see Whisper vs PSS) and bad envelopes are also factors, but this depends a bit more on specific deployment configurations.
  3. Waku mode (case 8) is an additional capability that doesn’t require other nodes to change, for nodes that put a premium on performance.
  4. The next bottleneck after this is the partitioned topics (app/network specific), which either needs to gracefully (and potentially quickly) grow, or an alternative way of consuming those messages needs to be deviced.

The results are summarized in the graph above. Notice the log-log scale. The +colored backgrounds correspond to the following bandwidth usage:

  • Blue: <10mb/d (<~300mb/month)
  • Green: <30mb/d (<~1gb/month)
  • Yellow: <100mb/d (<~3gb/month)
  • Red: >100mb/d (>3gb/month)

These ranges are somewhat arbitrary, but are based on user +requirements for users +on a limited data plan, with comparable usage for other messaging apps.

Introducing Waku

Motivation for a new protocol

Apps such as Status will likely use something like Whisper for the forseeable +future, and we want to enable them to use it with more users on mobile devices +without bandwidth exploding with minimal changes.

Additionally, there's not a clear cut alternative that maps cleanly to the +desired use cases (p2p, multicast, privacy-preserving, open, etc).

We are actively researching, developing and collaborating with more greenfield +approaches. It is likely that Waku will either converge to those, or Waku will +lay the groundwork (clear specs, common issues/components) necessary to make +switching to another protocol easier. In this project we want to emphasize +iterative work with results on the order of weeks.

Briefly on Waku mode

  • Doesn’t impact existing clients, it’s just a separate node and capability.
  • Other nodes can still use Whisper as is, like a full node.
  • Sacrifices metadata protection and incurs higher connectivity/availability requirements for scalbility

Requirements:

  • Exposes API to get messages from a set of list of topics (no bloom filter)
  • Way of being identified as a Waku node (e.g. through version string)
  • Option to statically encode this node in app, e.g. similar to custom bootnodes/mailserver
  • Only node that needs to be connected to, possibly as Whisper relay / mailserver hybrid

Provides:

  • likely provides scalability of up to 10k users and beyond
  • with some enhancements to partition topic logic, can possibly scale up to 1m users (app/network specific)

Caveats:

  • hasn’t been tested in a large-scale simulation
  • other network and intermediate node bottlenecks might become apparent (e.g. full bloom filter and private cluster capacity; can likely be dealt with in isolation using known techniques, e.g. load balancing) (deployment specific)

Progress so far

In short, we have a Waku version 0 spec up as well as a PoC for backwards compatibility. In the coming weeks, we are going to solidify the specs, get a more fully featured PoC for Waku mode. See rough roadmap, project board [link deprecated] and progress thread on the Vac forum.

The spec has been rewrittten for clarity, with ABNF grammar and less ambiguous language. The spec also incorporates several previously ad hoc implemented features, such as light nodes and mailserver/client support. This has already caught a few incompatibilities between the geth (Go), status/whisper (Go) and nim-eth (Nim) versions, specifically around light node usage and the handshake.

If you are interested in this effort, please check out our forum for questions, comments and proposals. We already have some discussion for better spam protection (see previous post for a more complex but privacy-preserving proposal), something that is likely going to be addressed in future versions of Waku, along with many other fixes and enhancement.

]]>
+
+ + <![CDATA[Feasibility Study: Semaphore rate limiting through zkSNARKs]]> + https://vac.dev/rlog/feasibility-semaphore-rate-limiting-zksnarks + https://vac.dev/rlog/feasibility-semaphore-rate-limiting-zksnarks + Fri, 08 Nov 2019 12:00:00 GMT + + A research log. Zero knowledge signaling as a rate limiting mechanism to prevent spam in p2p networks.

tldr: Moon math promising for solving spam in Whisper, but to get there we need to invest more in performance work and technical upskilling.

Motivating problem

In open p2p networks for messaging, one big problem is spam-resistance. Existing solutions, such as Whisper's proof of work, are insufficient, especially for heterogeneous nodes. Other reputation-based approaches might not be desirable, due to issues around arbitrary exclusion and privacy.

One possible solution is to use a right-to-access staking-based method, where a node is only able to send a message, signal, at a certain rate, and otherwise they can be slashed. One problem with this is in terms of privacy-preservation, where we specifically don't want a user to be tied to a specific payment or unique fingerprint.

In addition to above, there are a lot of related problems that share similarities in terms of their structure and proposed solution.

  • Private transactions (Zcash, AZTEC)
  • Private voting (Semaphore)
  • Private group membership (Semaphore)
  • Layer 2 scaling, poss layer 1 (ZK Rollup; StarkWare/Eth2-3)

Overview

Basic terminology

A zero-knowledge proof allows a prover to show a verifier that they know something, without revealing what that something is. This means you can do trust-minimized computation that is also privacy preserving. As a basic example, instead of showing your ID when going to a bar you simply give them a proof that you are over 18, without showing the doorman your id.

zkSNARKs is a form of zero-knowledge proofs. There are many types of zero-knowledge proofs, and the field is evolving rapidly. They come with various trade-offs in terms of things such as: trusted setup, cryptographic assumptions, proof/verification key size, proof/verification time, proof size, etc. See section below for more.

Semaphore is a framework/library/construct on top of zkSNARks. It allows for zero-knowledge signaling, specifically on top of Ethereum. This means an approved user can broadcast some arbitrary string without revealing their identity, given some specific constraints. An approved user is someone who has been added to a certain merkle tree. See current Github home for more.

Circom is a DSL for writing arithmetic circuits that can be used in zkSNARKs, similar to how you might write a NAND gate. See Github for more.

Basic flow

We start with a private voting example, and then extend it to the slashable rate limiting example.

  1. A user registers an identity (arbitrary keypair), along with a small fee, to a smart contract. This adds them to a merkle tree and allows them to prove that they are member of that group, without revealing who they are.

  2. When a user wants to send a message, they compute a zero-knowledge proof. This ensures certain invariants, have some public outputs, and can be verified by anyone (including a smart contract).

  3. Any node can verify the proof, including smart contracts on chain (as of Byzantinum HF). Additionally, a node can have rules for the public output. In the case of voting, one such rule is that a specific output hash has to be equal to some predefined value, such as "2020-01-01 vote on Foo Bar for president".

  4. Because of how the proof is constructed, and the rules around output values, this ensures that: a user is part of the approved set of voters and that a user can only vote once.

  5. As a consequence of above, we have a system where registered users can only vote once, no one can see who voted for what, and this can all be proven and verified.

Rate limiting example

In the case of rate limiting, we do want nodes to send multiple messages. This changes step 3-5 above somewhat.

NOTE: It is a bit more involved than this, and if we precompute proofs the flow might look a bit different. But the general idea is the same.

  1. Instead of having a rule that you can only vote once, we have a rule that you can only send a message per epoch. Epoch here can be every second, as defined by UTC date time +-20s.

  2. Additionally, if a users sends more than one message per epoch, one of the public outputs is a random share of a private key. Using Shamir's Secret Sharing (similar to a multisig) and 2/3 key share as an example threshold: in the normal case only 1/3 private keys is revealed, which is insufficient to have access. In the case where two messages are sent in an epoch, probabilistically 2/3 shares is sufficient to have access to the key (unless you get the same random share of the key).

  3. This means any untrusted user who detects a spamming user, can use it to access their private key corresponding to funds in the contract, and thus slash them.

  4. As a consequence of above, we have a system where registered users can only messages X times per epoch, and no one can see who is sending what messages. Additionally, if a user is violating the above rate limit, they can be punished and any user can profit from it.

Briefly on scope of 'approved users'

In the case of an application like Status, this construct can either be a global StatusNetwork group, or one per chat, or network, etc. It can be applied both at the network and user level. There are no specific limitations on where or who deploys this, and it is thus more of a UX consideration.

Technical details

For a fairly self-contained set of examples above, see exploration in Vac research repo. Note that the Shamir secret sharing is not inside the SNARK, but out-of-band for now.

The current version of Semaphore is using NodeJS and Circom from Iden3 for Snarks.

For more on rate limiting idea, see ethresearch post.

Feasibility

The above repo was used to exercise the basic paths and to gain intution of feasibility. Based on it and related reading we outline a few blockers and things that require further study.

Technical feasibility

Proof time

Prove time for Semaphore (https://github.com/kobigurk/semaphore) zKSNARKs using circom, groth and snarkjs is currently way too long. It takes on the order of ~10m to generate a proof. With Websnark, it is likely to take 30s, which might still be too long. We should experiment with native code on mobile here.

See details.

Proving key size

Prover key size is ~110mb for Semaphore. Assuming this is embedded on mobile device, it bloats the APK a lot. Current APK size is ~30mb and even that might be high for people with limited bandwidth.

See details.

Trusted setup

Using zkSNARKs a trusted setup is required to generate prover and verifier keys. As part of this setup, a toxic parameter lambda is generated. If a party gets access to this lambda, they can prove anything. This means people using zKSNARKs usually have an elaborate MPC ceremony to ensure this parameter doesn't get discovered.

See details.

Shamir logic in SNARK

For Semaphore RLN we need to embed the Shamir logic inside the SNARK in order to do slashing for spam. Currently the implementation is trusted and very hacky.

See details.

End to end integation

Currently is standalone and doesn't touch multiple users, deployed contract with merkle tree and verification, actual transactions, a mocked network, add/remove members, etc. There are bound to be edge cases and unknown unknowns here.

See details.

Licensing issues

Currently Circom uses a GPL license, which can get tricky when it comes to the App Store etc.

See details.

Alternative ZKPs?

Some of the isolated blockers for zKSNARKs (#7, #8, #9) might be mitigated by the use of other ZKP technology. However, they likely have their own issues.

See details.

Social feasibility

Technical skill

zkSNARKs and related technologies are quite new. To learn how they work and get an intuition for them requires individuals to dedicate a lot of time to studying them. This means we must make getting competence in these technologies if we wish to use them to our advantage.

Time and resources

In order for this and related projects (such as private transaction) to get anywhere, it must be made an explicit area of focus for an extend period of time.

General thoughts

Similar to Whisper, and in line with moving towards protocol and infrastructure, we need to upskill and invest resources into this. This doesn't mean developing all of the technologies ourselves, but gaining enough competence to leverage and extend existing solutions by the growing ZKP community.

For example, this might also include leveraging largely ready made solutions such as AZTEC for private transaction; more fundamental research into ZK rollup and similar; using Semaphore for private group membership and private voting; Nim based wrapper aronud Bellman, etc.

Acknowledgement

Thanks to Barry Whitehat for patient explanation and pointers. Thanks to WJ for helping with runtime issues.

Peacock header image from [Tonos](<https://en.wikipedia.org/wiki/File:Flickr-lo.tangelini-Tonos(1).jpg>)._

]]>
+
+ + <![CDATA[P2P Data Sync with a Remote Log]]> + https://vac.dev/rlog/remote-log + https://vac.dev/rlog/remote-log + Fri, 04 Oct 2019 12:00:00 GMT + + A research log. Asynchronous P2P messaging? Remote logs to the rescue!

A big problem when doing end-to-end data sync between mobile nodes is that most devices are offline most of the time. With a naive approach, you quickly run into issues of 'ping-pong' behavior, where messages have to be constantly retransmitted. We saw some basic calculations of what this bandwidth multiplier looks like in a previous post.

While you could do some background processing, this is really battery-draining, and on iOS these capabilities are limited. A better approach instead is to loosen the constraint that two nodes need to be online at the same time. How do we do this? There are two main approaches, one is the store and forward model, and the other is a remote log.

In the store and forward model, we use an intermediate node that forward messages on behalf of the recipient. In the remote log model, you instead replicate the data onto some decentralized storage, and have a mutable reference to the latest state, similar to DNS. While both work, the latter is somewhat more elegant and "pure", as it has less strict requirements of an individual node's uptime. Both act as a highly-available cache to smoothen over non-overlapping connection windows between endpoints.

In this post we are going to describe how such a remote log schema could work. Specifically, how it enhances p2p data sync and takes care of the following requirements:

  1. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices +that are resource restricted, mostly-offline and often changing network.
  1. MAY use helper services in order to be more mobile-friendly. Examples of +helper services are decentralized file storage solutions such as IPFS and +Swarm. These help with availability and latency of data for mostly-offline +devices.

Remote log

A remote log is a replication of a local log. This means a node can read data from a node that is offline.

The spec is in an early draft stage and can be found here. A very basic spike / proof-of-concept can be found here.

Definitions

TermDefinition
CASContent-addressed storage. Stores data that can be addressed by its hash.
NSName system. Associates mutable data to a name.
Remote logReplication of a local log at a different location.

Roles

There are four fundamental roles:

  1. Alice
  2. Bob
  3. Name system (NS)
  4. Content-addressed storage (CAS)

The remote log is the data format of what is stored in the name system.

"Bob" can represent anything from 0 to N participants. Unlike Alice, Bob only needs read-only access to NS and CAS.

Flow

Figure 1: Remote log data synchronization.

Data format

The remote log lets receiving nodes know what data they are missing. Depending on the specific requirements and capabilities of the nodes and name system, the information can be referred to differently. We distinguish between three rough modes:

  1. Fully replicated log
  2. Normal sized page with CAS mapping
  3. "Linked list" mode - minimally sized page with CAS mapping

A remote log is simply a mapping from message identifiers to their corresponding address in a CAS:

Message Identifier (H1)CAS Hash (H2)
H1_3H2_3
H1_2H2_2
H1_1H2_1
address to next page

The numbers here corresponds to messages. Optionally, the content itself can be included, just like it normally would be sent over the wire. This bypasses the need for a dedicated CAS and additional round-trips, with a trade-off in bandwidth usage.

Message Identifier (H1)Content
H1_3C3
H1_2C2
H1_1C1
address to next page

Both patterns can be used in parallel, e,g. by storing the last k messages directly and use CAS pointers for the rest. Together with the next_page page semantics, this gives users flexibility in terms of bandwidth and latency/indirection, all the way from a simple linked list to a fully replicated log. The latter is useful for things like backups on durable storage.

Interaction with MVDS

vac.mvds.Message payloads are the only payloads that MUST be uploaded. Other messages types MAY be uploaded, depending on the implementation.

Future work

The spec is still in an early draft stage, so it is expected to change. Same with the proof of concept. More work is needed on getting a fully featured proof of concept with specific CAS and NAS instances. E.g. Swarm and Swarm Feeds, or IPFS and IPNS, or something else.

For data sync in general:

  • Make consistency guarantees more explicit for app developers with support for sequence numbers and DAGs, as well as the ability to send non-synced messages. E.g. ephemeral typing notifications, linear/sequential history and casual consistency/DAG history
  • Better semantics and scalability for multi-user sync contexts, e.g. CRDTs and joining multiple logs together
  • Better usability in terms of application layer usage (data sync clients) and supporting more transports

PS1. Thanks everyone who submitted great logo proposals for Vac!

PPS2. Next week on October 10th decanus and I will be presenting Vac at Devcon, come say hi :)

]]>
+
+ + <![CDATA[Vac - A Rough Overview]]> + https://vac.dev/rlog/vac-overview + https://vac.dev/rlog/vac-overview + Fri, 02 Aug 2019 12:00:00 GMT + + Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. Overview of terms, stack and open problems.

Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. What does that mean? Let's unpack it a bit.

Basic terms

messaging stack. While the initial focus is on data sync, we are concerned with all layers in the stack. That means all the way from underlying transports, p2p overlays and routing, to initial trust establishment and semantics for things like group chat. The ultimate goal is to give application developers the tools they need to provide secure messaging for their users, so they can focus on their domain expertise.

modular. Unlike many other secure messaging applications, our goal is not to have a tightly coupled set of protocols, nor is it to reinvent the wheel. Instead, we aim to provide options at each layer in the stack, and build on the shoulders of giants, putting a premimum on interoperability. It's similar in philosophy to projects such as libp2p or Substrate in that regard. Each choice comes with different trade-offs, and these look different for different applications.

peer-to-peer. The protocols we work on are pure p2p, and aim to minimize centralization. This too is in opposition to many initiatives in the secure messaging space.

messaging. By messaging we mean messaging in a generalized sense. This includes both human to human communication, as well machine to machine communication. By messaging we also mean something more fundamental than text messages, we also include things like transactions (state channels, etc) under this moniker.

secure messaging. Outside of traditional notions of secure messaging, such as ensuring end to end encryption, forward secrecy, avoiding MITM-attacks, etc, we are also concerned with two other forms of secure messaging. We call these private messaging and censorship-resistance. Private messaging means viewing privacy as a security property, with all that entails. Censorship resistance ties into being p2p, but also in terms of allowing for transports and overlays that can't easily be censored by port blocking, traffic analysis, and similar.

Vāc. Is a Vedic goddess of speech. It also hints at being a vaccine.

Protocol stack

What does this stack look like? We take inspiration from core internet architecture, existing survey work and other efforts that have been done to decompose the problem into orthogonal pieces. Each layer provides their own set of properties and only interact with the layers it is adjacent to. Note that this is a rough sketch.

Layer / ProtocolPurposeExamples
Application layerEnd user semantics1:1 chat, group chat
Data SyncData consistencyMVDS, BSP
Secure TransportConfidentiality, PFS, etcDouble Ratchet, MLS
Transport PrivacyTransport and metadata protectionWhisper, Tor, Mixnet
P2P OverlayOverlay routing, NAT traversaldevp2p, libp2p
Trust EstablishmentEstablishing end-to-end trustTOFU, web of trust

As an example, end user semantics such as group chat or moderation capabilities can largely work regardless of specific choices further down the stack. Similarly, using a mesh network or Tor doesn't impact the use of Double Ratchet at the Secure Transport layer.

Data Sync plays a similar role to what TCP does at the transport layer in a traditional Internet architecture, and for some applications something more like UDP is likely to be desirable.

In terms of specific properties and trade-offs at each layer, we'll go deeper down into them as we study them. For now, this is best treated as a rough sketch or mental map.

Problems and rough priorities

With all the pieces involved, this is quite an undertaking. Luckily, a lot of pieces are already in place and can be either incorporated as-is or iterated on. In terms of medium and long term, here's a rough sketch of priorities and open problems.

  1. Better data sync. While the current MVDS works, it is lacking in a few areas:
  • Lack of remote log for mostly-offline offline devices
  • Better scalability for multi-user chat contexts
  • Better usability in terms of application layer usage and supporting more transports
  1. Better transport layer support. Currently MVDS runs primarily over Whisper, which has a few issues:
  • scalability, being able to run with many nodes
  • spam-resistance, proof of work is a poor mechanism for heterogeneous devices
  • no incentivized infrastructure, leading to centralized choke points

In addition to these most immediate concerns, there are other open problems. Some of these are overlapping with the above.

  1. Adaptive nodes. Better support for resource restricted devices and nodes of varying capabilities. Light connection strategy for resources and guarantees. Security games to outsource processing with guarantees.

  2. Incentivized and spam-resistant messaging. Reasons to run infrastructure and not relying on altruistic nodes. For spam resistance, in p2p multicast spam is a big attack vector due to amplification. There are a few interesting directions here, such as EigenTrust, proof of burn with micropayments, and leveraging zero-knowledge proofs.

  3. Strong privacy guarantees at transport privacy layer. More rigorous privacy guarantees and explicit trade-offs for metadata protection. Includes Mixnet.

  4. Censorship-resistant and robust P2P overlay. NAT traversal; running in the browser; mesh networks; pluggable transports for traffic obfuscation.

  5. Scalable and decentralized secure conversational security. Strong security guarantees such as forward secrecy, post compromise security, for large group chats. Includes projects such MLS and extending Double Ratchet.

  6. Better trust establishment and key handling. Avoiding MITM attacks while still enabling a good user experience. Protecting against ghost users in group chat and providing better ways to do key handling.

There is also a set of more general problems, that touch multiple layers:

  1. Ensuring modularity and interoperability. Providing interfaces that allow for existing and new protocols to be at each layer of the stack.

  2. Better specifications. Machine-readable and formally verified specifications. More rigorous analysis of exact guarantees and behaviors. Exposing work in such a way that it can be analyzed by academics.

  3. Better simulations. Providing infrastructure and tooling to be able to test protocols in adverse environments and at scale.

  4. Enabling excellent user experience. A big reason for the lack of widespread adoption of secure messaging is the fact that more centralized, insecure methods provide a better user experience. Given that incentives can align better for users interested in secure messaging, providing an even better user experience should be doable.


We got some work to do. Come help us if you want. See you in the next update!

]]>
+
+ + <![CDATA[P2P Data Sync for Mobile]]> + https://vac.dev/rlog/p2p-data-sync-for-mobile + https://vac.dev/rlog/p2p-data-sync-for-mobile + Fri, 19 Jul 2019 12:00:00 GMT + + A research log. Reliable and decentralized, pick two.

Together with decanus, I've been working on the problem of data sync lately.

In building p2p messaging systems, one problem you quickly come across is the problem of reliably transmitting data. If there's no central server with high availability guarantees, you can't meaningfully guarantee that data has been transmitted. One way of solving this problem is through a synchronization protocol.

There are many synchronization protocols out there and I won't go into detail of how they differ with our approach here. Some common examples are Git and Bittorrent, but there are also projects like IPFS, Swarm, Dispersy, Matrix, Briar, SSB, etc.

Problem motivation

Why do we want to do p2p sync for mobilephones in the first place? There are three components to that question. One is on the value of decentralization and peer-to-peer, the second is on why we'd want to reliably sync data at all, and finally why mobilephones and other resource restricted devices.

Why p2p?

For decentralization and p2p, there are both technical and social/philosophical reasons. Technically, having a user-run network means it can scale with the number of users. Data locality is also improved if you query data that's close to you, similar to distributed CDNs. The throughput is also improved if there are more places to get data from.

Socially and philosophically, there are several ways to think about it. Open and decentralized networks also relate to the idea of open standards, i.e. compare the longevity of AOL with IRC or Bittorrent. One is run by a company and is shut down as soon as it stops being profitable, the others live on. Additionally increasingly control of data and infrastructure is becoming a liability. By having a network with no one in control, everyone is. It's ultimately a form of democratization, more similar to organic social structures pre Big Internet companies. This leads to properties such as censorship resistance and coercion resistance, where we limit the impact a 3rd party might have a voluntary interaction between individuals or a group of people. Examples of this are plentiful in the world of Facebook, Youtube, Twitter and WeChat.

Why reliably sync data?

At risk of stating the obvious, reliably syncing data is a requirement for many problem domains. You don't get this by default in a p2p world, as it is unreliable with nodes permissionslessly join and leave the network. In some cases you can get away with only ephemeral data, but usually you want some kind of guarantees. This is a must for reliable group chat experience, for example, where messages are expected to arrive in a timely fashion and in some reasonable order. The same is true for messages there represent financial transactions, and so on.

Why mobilephones?

Most devices people use daily are mobile phones. It's important to provide the same or at least similar guarantees to more traditional p2p nodes that might run on a desktop computer or computer. The alternative is to rely on gateways, which shares many of the drawbacks of centralized control and prone to censorship, control and surveillence.

More generally, resource restricted devices can differ in their capabilities. One example is smartphones, but others are: desktop, routers, Raspberry PIs, POS systems, and so on. The number and diversity of devices are exploding, and it's useful to be able to leverage this for various types of infrastructure. The alternative is to centralize on big cloud providers, which also lends itself to lack of democratization and censorship, etc.

Minimal Requirements

For requirements or design goals for a solution, here's what we came up with.

  1. MUST sync data reliably between devices. By reliably we mean having the ability to deal with messages being out of order, dropped, duplicated, or delayed.

  2. MUST NOT rely on any centralized services for reliability. By centralized services we mean any single point of failure that isn’t one of the endpoint devices.

  3. MUST allow for mobile-friendly usage. By mobile-friendly we mean devices that are resource restricted, mostly-offline and often changing network.

  4. MAY use helper services in order to be more mobile-friendly. Examples of helper services are decentralized file storage solutions such as IPFS and Swarm. These help with availability and latency of data for mostly-offline devices.

  5. MUST have the ability to provide casual consistency. By casual consistency we mean the commonly accepted definition in distributed systems literature. This means messages that are casually related can achieve a partial ordering.

  6. MUST support ephemeral messages that don’t need replication. That is, allow for messages that don’t need to be reliabily transmitted but still needs to be transmitted between devices.

  7. MUST allow for privacy-preserving messages and extreme data loss. By privacy-preserving we mean things such as exploding messages (self-destructing messages). By extreme data loss we mean the ability for two trusted devices to recover from a, deliberate or accidental, removal of data.

  8. MUST be agnostic to whatever transport it is running on. It should not rely on specific semantics of the transport it is running on, nor be tightly coupled with it. This means a transport can be swapped out without loss of reliability between devices.

MVDS - a minimium viable version

The first minimum viable version is in an alpha stage, and it has a specification, implementation and we have deployed it in a console client for end to end functionality. It's heavily inspired by Bramble Sync Protocol.

The spec is fairly minimal. You have nodes that exchange records over some secure transport. These records are of different types, such as OFFER, MESSAGE, REQUEST, and ACK. A peer keep tracks of the state of message for each node it is interacting with. There's also logic for message retransmission with exponential delay. The positive ACK and retransmission model is quite similar to how TCP is designed.

There are two different modes of syncing, interactive and batch mode. See sequence diagrams below.

Interactive mode: +

Interactive mode

Batch mode: +

Batch mode

Which mode should you choose? It's a tradeoff of latency and bandwidth. If you want to minimize latency, batch mode is better. If you care about preserving bandwidth interactive mode is better. The choice is up to each node.

Basic simulation

Initial ad hoc bandwidth and latency testing shows some issues with a naive approach. Running with the default simulation settings:

  • communicating nodes: 2
  • nodes using interactive mode: 2
  • interval between messages: 5s
  • time node is offine: 90%
  • nodes each node is sharing with: 2

we notice a huge overhead. More specifically, we see a ~5 minute latency overhead and a bandwidth multiplier of x100-1000, i.e. 2-3 orders of magnitude just for receiving a message with interactive mode, without acks.

Now, that seems terrible. A moment of reflection will reveal why that is. If each node is offline uniformly 90% of the time, that means that each record will be lost 90% of the time. Since interactive mode requires offer, request, payload (and then ack), that's three links just for Bob to receive the actual message.

Each failed attempt implies another retransmission. That means we have (1/0.1)^3 = 1000 expected overhead to receive a message in interactive mode. The latency follows naturally from that, with the retransmission logic.

Mostly-offline devices

The problem above hints at the requirements 3 and 4 above. While we did get reliable syncing (requirement 1), it came at a big cost.

There are a few ways of getting around this issue. One is having a store and forward model, where some intermediary node picks up (encrypted) messages and forwards them to the recipient. This is what we have in production right now at Status.

Another, arguably more pure and robust, way is having a remote log, where the actual data is spread over some decentralized storage layer, and you have a mutable reference to find the latest messages, similar to DNS.

What they both have in common is that they act as a sort of highly-available cache to smooth over the non-overlapping connection windows between two endpoints. Neither of them are required to get reliable data transmission.

Basic calculations for bandwidth multiplier

While we do want better simulations, and this is a work in progress, we can also look at the above scenarios using some basic calculations. This allows us to build a better intuition and reason about the problem without having to write code. Let's start with some assumptions:

  • two nodes exchanging a single message in batch mode
  • 10% uniformly random uptime for each node
  • in HA cache case, 100% uptime of a piece of infrastructure C
  • retransmission every epoch (with constant or exponential backoff)
  • only looking at average (p50) case

First case, no helper services

A sends a message to B, and B acks it.

A message -> B (10% chance of arrival)
A <- ack B (10% chance of arrival)

With a constant backoff, A will send messages at epoch 1, 2, 3, .... With exponential backoff and a multiplier of 2, this would be 1, 2, 4, 8, .... Let's assume constant backoff for now, as this is what will influence the success rate and thus the bandwidth multiplier.

There's a difference between time to receive and time to stop sending. Assuming each send attempt is independent, it takes on average 10 epochs for A's message to arrive with B. Furthermore:

  1. A will send messages until it receives an ACK.
  2. B will send ACK if it receives a message.

To get an average of one ack through, A needs to send 100 messages, and B send on average 10 acks. That's a multiplier of roughly a 100. That's roughly what we saw with the simulation above for receiving a message in interactive mode.

Second case, high-availability caching layer

Let's introduce a helper node or piece of infrastructure, C. Whenever A or B sends a message, it also sends it to C. Whenever A or B comes online, it queries for messages with C.

A message    -> B (10% chance of arrival)
A message -> C (100% chance of arrival)
B <- req/res -> C (100% chance of arrival)
A <- ack B (10% chance of arrival)
C <- ack B (100% chance of arrival)
A <- req/res -> C (100% chance of arrival)

What's the probability that A's messages will arrive at B? Directly, it's still 10%. But we can assume it's 100% that C picks up the message. (Giving C a 90% chance success rate doesn't materially change the numbers).

B will pick up A's message from C after an average of 10 epochs. Then B will send ack to A, which will also be picked up by C 100% of the time. Once A comes online again, it'll query C and receive B's ack.

Assuming we use exponential backoff with a multiplier of 2, A will send a message directly to B at epoch 1, 2, 4, 8 (assuming it is online). At this point, epoch 10, B will be online in the average case. These direct sends will likely fail, but B will pick the message up from C and send one ack, both directly to A and to be picked up by C. Once A comes online, it'll query C and receive the ack from B, which means it won't do any more retransmits.

How many messages have been sent? Not counting interactions with C, A sends 4 (at most) and B 1. Depending on if the interaction with C is direct or indirect (i.e. multicast), the factor for interaction with C will be ~2. This means the total bandwidth multiplier is likely to be <10, which is a lot more acceptable.

Since the syncing semantics are end-to-end, this is without relying on the reliablity of C.

Caveat

Note that both of these are probabilistic argument. They are also based on heuristics. More formal analysis would be desirable, as well as better simulations to experimentally verify them. In fact, the calculations could very well be wrong!

Future work

There are many enhancements that can be made and are desirable. Let's outline a few.

  1. Data sync clients. Examples of actual usage of data sync, with more interesting domain semantics. This also includes usage of sequence numbers and DAGs to know what content is missing and ought to be synced.

  2. Remote log. As alluded to above, this is necessary. It needs a more clear specification and solid proof of concepts.

  3. More efficient ways of syncing with large number of nodes. When the number of nodes goes up, the algorithmic complexity doesn't look great. This also touches on things such as ambient content discovery.

  4. More robust simulations and real-world deployments. Exisiting simulation is ad hoc, and there are many improvements that can be made to gain more confidence and identify issues. Additionally, better formal analysis.

  5. Example usage over multiple transports. Including things like sneakernet and meshnets. The described protocol is designed to work over unstructured, structured and private p2p networks. In some cases it can leverage differences in topology, such as multicast, or direct connections.

]]>
+
+
+
\ No newline at end of file diff --git a/rlog/vac-overview/index.html b/rlog/vac-overview/index.html new file mode 100644 index 00000000..39394e91 --- /dev/null +++ b/rlog/vac-overview/index.html @@ -0,0 +1,26 @@ + + + + + +Vac - A Rough Overview | Vac Research + + + + + + + + + + +
+

Vac - A Rough Overview

by
6 min read

Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. Overview of terms, stack and open problems.

Vac is a modular peer-to-peer messaging stack, with a focus on secure messaging. What does that mean? Let's unpack it a bit.

Basic terms

messaging stack. While the initial focus is on data sync, we are concerned with all layers in the stack. That means all the way from underlying transports, p2p overlays and routing, to initial trust establishment and semantics for things like group chat. The ultimate goal is to give application developers the tools they need to provide secure messaging for their users, so they can focus on their domain expertise.

modular. Unlike many other secure messaging applications, our goal is not to have a tightly coupled set of protocols, nor is it to reinvent the wheel. Instead, we aim to provide options at each layer in the stack, and build on the shoulders of giants, putting a premimum on interoperability. It's similar in philosophy to projects such as libp2p or Substrate in that regard. Each choice comes with different trade-offs, and these look different for different applications.

peer-to-peer. The protocols we work on are pure p2p, and aim to minimize centralization. This too is in opposition to many initiatives in the secure messaging space.

messaging. By messaging we mean messaging in a generalized sense. This includes both human to human communication, as well machine to machine communication. By messaging we also mean something more fundamental than text messages, we also include things like transactions (state channels, etc) under this moniker.

secure messaging. Outside of traditional notions of secure messaging, such as ensuring end to end encryption, forward secrecy, avoiding MITM-attacks, etc, we are also concerned with two other forms of secure messaging. We call these private messaging and censorship-resistance. Private messaging means viewing privacy as a security property, with all that entails. Censorship resistance ties into being p2p, but also in terms of allowing for transports and overlays that can't easily be censored by port blocking, traffic analysis, and similar.

Vāc. Is a Vedic goddess of speech. It also hints at being a vaccine.

Protocol stack

What does this stack look like? We take inspiration from core internet architecture, existing survey work and other efforts that have been done to decompose the problem into orthogonal pieces. Each layer provides their own set of properties and only interact with the layers it is adjacent to. Note that this is a rough sketch.

Layer / ProtocolPurposeExamples
Application layerEnd user semantics1:1 chat, group chat
Data SyncData consistencyMVDS, BSP
Secure TransportConfidentiality, PFS, etcDouble Ratchet, MLS
Transport PrivacyTransport and metadata protectionWhisper, Tor, Mixnet
P2P OverlayOverlay routing, NAT traversaldevp2p, libp2p
Trust EstablishmentEstablishing end-to-end trustTOFU, web of trust

As an example, end user semantics such as group chat or moderation capabilities can largely work regardless of specific choices further down the stack. Similarly, using a mesh network or Tor doesn't impact the use of Double Ratchet at the Secure Transport layer.

Data Sync plays a similar role to what TCP does at the transport layer in a traditional Internet architecture, and for some applications something more like UDP is likely to be desirable.

In terms of specific properties and trade-offs at each layer, we'll go deeper down into them as we study them. For now, this is best treated as a rough sketch or mental map.

Problems and rough priorities

With all the pieces involved, this is quite an undertaking. Luckily, a lot of pieces are already in place and can be either incorporated as-is or iterated on. In terms of medium and long term, here's a rough sketch of priorities and open problems.

  1. Better data sync. While the current MVDS works, it is lacking in a few areas:
  • Lack of remote log for mostly-offline offline devices
  • Better scalability for multi-user chat contexts
  • Better usability in terms of application layer usage and supporting more transports
  1. Better transport layer support. Currently MVDS runs primarily over Whisper, which has a few issues:
  • scalability, being able to run with many nodes
  • spam-resistance, proof of work is a poor mechanism for heterogeneous devices
  • no incentivized infrastructure, leading to centralized choke points

In addition to these most immediate concerns, there are other open problems. Some of these are overlapping with the above.

  1. Adaptive nodes. Better support for resource restricted devices and nodes of varying capabilities. Light connection strategy for resources and guarantees. Security games to outsource processing with guarantees.

  2. Incentivized and spam-resistant messaging. Reasons to run infrastructure and not relying on altruistic nodes. For spam resistance, in p2p multicast spam is a big attack vector due to amplification. There are a few interesting directions here, such as EigenTrust, proof of burn with micropayments, and leveraging zero-knowledge proofs.

  3. Strong privacy guarantees at transport privacy layer. More rigorous privacy guarantees and explicit trade-offs for metadata protection. Includes Mixnet.

  4. Censorship-resistant and robust P2P overlay. NAT traversal; running in the browser; mesh networks; pluggable transports for traffic obfuscation.

  5. Scalable and decentralized secure conversational security. Strong security guarantees such as forward secrecy, post compromise security, for large group chats. Includes projects such MLS and extending Double Ratchet.

  6. Better trust establishment and key handling. Avoiding MITM attacks while still enabling a good user experience. Protecting against ghost users in group chat and providing better ways to do key handling.

There is also a set of more general problems, that touch multiple layers:

  1. Ensuring modularity and interoperability. Providing interfaces that allow for existing and new protocols to be at each layer of the stack.

  2. Better specifications. Machine-readable and formally verified specifications. More rigorous analysis of exact guarantees and behaviors. Exposing work in such a way that it can be analyzed by academics.

  3. Better simulations. Providing infrastructure and tooling to be able to test protocols in adverse environments and at scale.

  4. Enabling excellent user experience. A big reason for the lack of widespread adoption of secure messaging is the fact that more centralized, insecure methods provide a better user experience. Given that incentives can align better for users interested in secure messaging, providing an even better user experience should be doable.


We got some work to do. Come help us if you want. See you in the next update!

+ + + + \ No newline at end of file diff --git a/rlog/waku-for-all/index.html b/rlog/waku-for-all/index.html new file mode 100644 index 00000000..50b47079 --- /dev/null +++ b/rlog/waku-for-all/index.html @@ -0,0 +1,72 @@ + + + + + +Waku for All Decentralized Applications and Infrastructures | Vac Research + + + + + + + + + + +
+

Waku for All Decentralized Applications and Infrastructures

by
7 min read

Waku is an open communication protocol and network. Decentralized apps and infrastructure can use Waku for their +communication needs. It is designed to enable dApps and decentralized infrastructure projects to have secure, private, +scalable communication. Waku is available in several languages and platforms, from Web to mobile to desktop to cloud. +Initially, We pushed Waku adoption to the Web ecosystem, we learned that Waku is usable in a variety of complex applications +and infrastructure projects. We have prioritized our effort to make Waku usable on various platforms and environments.

Background

We have built Waku to be the communication layer for Web3. Waku is a collection of protocols to chose from for your +messaging needs. It enables secure, censorship-resistant, privacy-preserving, spam-protected communication for its user. +It is designed to run on any device, from mobile to the cloud.

Waku is available on many systems and environments and used by several applications and SDKs for decentralized communications.

This involved research efforts in various domains: conversational security, protocol incentivization, zero-knowledge, +etc.

Waku uses novel technologies. Hence, we knew that early dogfooding of Waku was necessary. Even if research +was still in progress [1]. Thus, as soon as Waku protocols and software were usable, we started to push +for the adoption of Waku. This started back in 2021.

Waku is the communication component of the Web3 trifecta. This trifecta was Ethereum (contracts), Swarm +(storage) and Whisper (communication). Hence, it made sense to first target dApps which already uses one of the pillars: +Ethereum.

As most dApps are web apps, we started the development of js-waku for the browser.

Once ready, we reached out to dApps to integrate Waku, added prizes to hackathons +and gave talks.

We also assumed we would see patterns in the usage of Waku, that we would facilitate with the help of +SDKs.

Finally, we created several web apps: +examples +and PoCs.

By discussing with Waku users and watching it being used, we learned a few facts:

  1. The potential use cases for Waku are varied and many:
  1. Many projects are interested in having an embedded chat in their dApp,
  2. There are complex applications that need Waku as a solution. Taking RAILGUN as an example:
  • Web wallet
  • + React Native mobile wallet
  • + NodeJS node/backend.

(1) means that it is not that easy to create SDKs for common use cases.

(2) was a clear candidate for an SDK. Yet, building a chat app is a complex task. Hence, the Status app team tackled +this in the form of Status Web.

Finally, (3) was the most important lesson. We learned that multi-tier applications need Waku for decentralized and +censorship-resistant communications. For these projects, js-waku is simply not enough. They need Waku to work in their +Golang backend, Unity desktop game and React Native mobile app.

We understood that we should see the whole Waku software suite +(js-waku, +nwaku, +go-waku, +waku-react-native, +etc) as an asset for its success. +That we should not limit outreach, marketing, documentation efforts to the web, but target all platforms.

From a market perspective, we identified several actors:

  • platforms: Projects that uses Waku to handle communication,
  • operators: Operators run Waku nodes and are incentivized to do so,
  • developers: Developers are usually part of a platforms or solo hackers learning Web3,
  • contributors: Developers and researchers with interests in decentralization, privacy, censorship-resistance, +zero-knowledge, etc.

Waku for All Decentralized Applications and Infrastructures

In 2022, we shifted our focus to make the various Waku implementations usable and used.

We made Waku multi-plaform.

We shifted Waku positioning to leverage all Waku implementations and better serve the user's needs:

We are consolidating the documentation for all implementations on a single website (work in progress) +to improve developer experience.

This year, we also started the operator outreach effort to push for users to run their own Waku nodes. We have +recently concluded our first operator trial run. +Nwaku's documentation, stability and performance has improved. It is now easier to +run your own Waku node.

Today, operator wannabes most likely run their own nodes to support or use the Waku network. +We are dogfooding +Waku RLN, our novel economic spam protection protocol, +and looking at incentivizing the Waku Store protocol. +This way, we are adding reasons to run your own Waku node.

For those who were following us in 2021, know that we are retiring the Waku Connect branding in favour of the Waku +branding.

Waku for Your Project

As discussed, Waku is now available on various platforms. The question remains: How can Waku benefit your project?

Here are a couple of use cases we recently investigated:

Layer-2 Decentralization

Most ([2] [3] roll-ups use a centralized sequencer or equivalent. Running several sequencers is not as straightforward as running several execution nodes. +Waku can help:

  • Provide a neutral marketplace for a mempool: If sequencers compete for L2 tx fees, they may not be incentivized to +share transactions with other sequencers. Waku nodes can act as a neutral network to enable all sequences to access +transactions.
  • Enable censorship-resistant wallet<>L2 communication,
  • Provide rate limiting mechanism for spam protection: Using RLN to prevent DDOS.

Device pairing and communication

With Waku Device Pairing, a user can setup a secure encrypted communication channel +between their devices. As this channel would operate over Waku, it would be censorship-resistant and privacy preserving. +These two devices could be:

  • Ethereum node and mobile phone to access a remote admin panel,
  • Alice's phone and Bob's phone for any kind of secure communication,
  • Mobile wallet and desktop/browser dApp for transaction and signature exchange.

Check js-waku#950 for the latest update on this.

Get Involved

Developer? Grab any of the Waku implementations and integrate it in your app: https://waku.org/platform.

Researcher? See https://vac.dev/contribute to participate in Waku research.

Tech-savvy? Try to run your own node: https://waku.org/operator.

Otherwise, play around with the various web examples.

If you want to help, we are hiring!

Moving Forward

What you can expect next:


References

  • [1] Waku is modular; it is a suite of protocols; hence some Waku protocols may be mature, while +new protocols are still being designed. Which means that research continues to be ongoing while +Waku is already used in production.
  • [2] The Optimism Foundation runs the only block produce on the Optimism network.
  • [3] Top 10 L2s are documented has having a centralized operator.
+ + + + \ No newline at end of file diff --git a/rlog/waku-update/index.html b/rlog/waku-update/index.html new file mode 100644 index 00000000..4804a530 --- /dev/null +++ b/rlog/waku-update/index.html @@ -0,0 +1,27 @@ + + + + + +Waku Update | Vac Research + + + + + + + + + + +
+

Waku Update

by
6 min read

A research log. What's the current state of Waku? How many users does it support? What are the bottlenecks? What's next?

Waku is our fork of Whisper where we address the shortcomings of Whisper in an iterative manner. We've seen a in previous post that Whisper doesn't scale, and why. In this post we'll talk about what the current state of Waku is, how many users it can support, and future plans.

Current state

Specs:

We released Waku spec v0.3 this week! You can see the full changelog here.

The main change from 0.2 is making the handshake more flexible. This enables us to communicate topic interest immediately without ambiguity. We also did the following:

  • added recommendation for DNS based discovery
  • added an upgradability and compatibility policy
  • cut the spec up into several components

We cut the spec up in several components to make Vac as modular as possible. The components right now are:

We can probably factor these out further as the main spec is getting quite big, but this is good enough for now.

Clients:

There are currently two clients that implement Waku v0.3, these are Nimbus (Update: now nim-waku) in Nim and status-go in Go.

For more details on what each client support and don't, you can follow the work in progress checklist.

Work is currently in progress to integrate it into the Status core app. Waku is expected to be part of their upcoming 1.1 release (see Status app roadmap (link deprecated)).

Simulation:

We have a simulation that verifies - or rather, fails to falsify - our scalability model. More on the simulation and what it shows below.

How many users does Waku support?

This is our current understanding of how many users a network running Waku can support. Specifically in the context of the Status chat app, since that's the most immediate consumer of Waku. It should generalize fairly well to most deployments.

tl;dr (for Status app):

  • beta: 100 DAU
  • v1: 1k DAU
  • v1.1 (waku only): 10k DAU (up to x10 with deployment hotfixes)
  • v1.2 (waku+dns): 100k DAU (can optionally be folded into v1.1)

Assuming 10 concurrent users = 100 DAU. Estimate uncertainty increases for each order of magnitude until real-world data is observed.

As far as we know right now, these are the bottlenecks we have:

  • Immediate bottleneck - Receive bandwidth for end user clients (aka ‘Fixing Whisper with Waku’)
  • Very likely bottleneck - Nodes and cluster capacity (aka ‘DNS based node discovery’)
  • Conjecture but not unlikely to appear- Full node traffic (aka ‘the routing / partition problem’)

We've already seen the first bottleneck being discussed in the initial post. Dean wrote a post on DNS based discovery which explains how we will address the likely second bottleneck. More on the third one in future posts.

For more details on these bottlenecks, see Scalability estimate: How many users can Waku and the Status app support?.

Simulation

The ultimate test is real-world usage. Until then, we have a simulation thanks to Kim De Mey from the Nimbus team!

We have two network topologies, Star and full mesh. Both networks have 6 full nodes, one traditional light node with bloom filter, and one Waku light node.

One of the full nodes sends 1 envelope over 1 of the 100 topics that the two light nodes subscribe to. After that, it sends 10000 envelopes over random topics.

For light node, bloom filter is set to almost 10% false positive (bloom filter: n=100, k=3, m=512). It shows the number of valid and invalid envelopes received for the different nodes.

Star network:

DescriptionPeersValidInvalid
Master node7100010
Full node 13100010
Full node 21100010
Full node 31100010
Full node 41100010
Full node 51100010
Light node28150
Waku light node210

Full mesh:

DescriptionPeersValidInvalid
Full node 071000120676
Full node 17100019554
Full node 251000123304
Full node 351000111983
Full node 451000124425
Full node 551000123472
Light node2803803
Waku light node211

Things to note:

  • Whisper light node with ~10% false positive gets ~10% of total traffic
  • Waku light node gets ~1000x less envelopes than Whisper light node
  • Full mesh results in a lot more duplicate messages, expect for Waku light node

Run the simulation yourself here. The parameters are configurable, and it is integrated with Prometheus and Grafana.

Difference between Waku and Whisper

Summary of main differences between Waku v0 spec and Whisper v6, as described in EIP-627:

  • Handshake/Status message not compatible with shh/6 nodes; specifying options as association list
  • Include topic-interest in Status handshake
  • Upgradability policy
  • topic-interest packet code
  • RLPx subprotocol is changed from shh/6 to waku/0.
  • Light node capability is added.
  • Optional rate limiting is added.
  • Status packet has following additional parameters: light-node, confirmations-enabled and rate-limits
  • Mail Server and Mail Client functionality is now part of the specification.
  • P2P Message packet contains a list of envelopes instead of a single envelope.

Next steps and future plans

Several challenges remain to make Waku a robust and suitable base +communication protocol. Here we outline a few challenges that we are addressing and will continue to work on:

  • scalability of the network
  • incentived infrastructure and spam-resistance
  • build with resource restricted devices in mind, including nodes being mostly offline

For the third bottleneck, a likely candidate for fixing this is Kademlia routing. This is similar to what is done in Swarm's PSS. We are in the early stages of experimenting with this over libp2p in nim-libp2p. More on this in a future post!

Acknowledgements

Image from "caged sky" by mh.xbhd.org is licensed under CC BY 2.0 (https://ccsearch.creativecommons.org/photos/a9168311-78de-4cb7-a6ad-f92be8361d0e)

+ + + + \ No newline at end of file diff --git a/rlog/waku-v1-v2-bandwidth-comparison/index.html b/rlog/waku-v1-v2-bandwidth-comparison/index.html new file mode 100644 index 00000000..0f2c46f4 --- /dev/null +++ b/rlog/waku-v1-v2-bandwidth-comparison/index.html @@ -0,0 +1,115 @@ + + + + + +Waku v1 vs Waku v2: Bandwidth Comparison | Vac Research + + + + + + + + + + +
+

Waku v1 vs Waku v2: Bandwidth Comparison

by
10 min read

A local comparison of bandwidth profiles showing significantly improved scalability in Waku v2 over Waku v1.

Background

The original plan for Waku v2 suggested theoretical improvements in resource usage over Waku v1, +mainly as a result of the improved amplification factors provided by GossipSub. +In its turn, Waku v1 proposed improvements over its predecessor, Whisper.

Given that Waku v2 is aimed at resource restricted environments, +we are specifically interested in its scalability and resource usage characteristics. +However, the theoretical performance improvements of Waku v2 over Waku v1, +has never been properly benchmarked and tested.

Although we're working towards a full performance evaluation of Waku v2, +this would require significant planning and resources, +if it were to simulate "real world" conditions faithfully and measure bandwidth and resource usage across different network connections, +robustness against attacks/losses, message latencies, etc. +(There already exists a fairly comprehensive evaluation of GossipSub v1.1, +on which 11/WAKU2-RELAY is based.)

As a starting point, +this post contains a limited and local comparison of the bandwidth profile (only) between Waku v1 and Waku v2. +It reuses and adapts existing network simulations for Waku v1 and Waku v2 +and compares bandwidth usage for similar message propagation scenarios.

Theoretical improvements in Waku v2

Messages are propagated in Waku v1 using flood routing. +This means that every peer will forward every new incoming message to all its connected peers (except the one it received the message from). +This necessarily leads to unnecessary duplication (termed amplification factor), +wasting bandwidth and resources. +What's more, we expect this effect to worsen the larger the network becomes, +as each connection will receive a copy of each message, +rather than a single copy per peer.

Message routing in Waku v2 follows the libp2p GossipSub protocol, +which lowers amplification factors by only sending full message contents to a subset of connected peers. +As a Waku v2 network grows, each peer will limit its number of full-message ("mesh") peerings - +libp2p suggests a maximum of 12 such connections per peer. +This allows much better scalability than a flood-routed network. +From time to time, a Waku v2 peer will send metadata about the messages it has seen to other peers ("gossip" peers).

See this explainer for a more detailed discussion.

Methodology

The results below contain only some scenarios that provide an interesting contrast between Waku v1 and Waku v2. +For example, star network topologies do not show a substantial difference between Waku v1 and Waku v2. +This is because each peer relies on a single connection to the central node for every message, +which barely requires any routing: +each connection receives a copy of every message for both Waku v1 and Waku v2. +Hybrid topologies similarly show only a difference between Waku v1 and Waku v2 for network segments with mesh-like connections, +where routing decisions need to be made.

For this reason, the following approach applies to all iterations:

  1. Simulations are run locally. +This limits the size of possible scenarios due to local resource constraints, +but is a way to quickly get an approximate comparison.
  2. Nodes are treated as a blackbox for which we only measure bandwidth, +using an external bandwidth monitoring tool. +In other words, we do not consider differences in the size of the envelope (for v1) or the message (for v2).
  3. Messages are published at a rate of 50 new messages per second to each network, +except where explicitly stated otherwise.
  4. Each message propagated in the network carries 8 bytes of random payload, which is encrypted. +The same symmetric key cryptographic algorithm (with the same keys) are used in both Waku v1 and v2.
  5. Traffic in each network is generated from 10 nodes (randomly-selected) and published in a round-robin fashion to 10 topics (content topics for Waku v2). +In practice, we found no significant difference in average bandwidth usage when tweaking these two parameters (the number of traffic generating nodes and the number of topics).
  6. Peers are connected in a decentralized full mesh topology, +i.e. each peer is connected to every other peer in the network. +Waku v1 is expected to flood all messages across all existing connections. +Waku v2 gossipsub will GRAFT some of these connections for full-message peerings, +with the rest being gossip-only peerings.
  7. After running each iteration, we verify that messages propagated to all peers (comparing the number of published messages to the metrics logged by each peer).

For Waku v1, nodes are configured as "full" nodes (i.e. with full bloom filter), +while Waku v2 nodes are relay nodes, all subscribing and publishing to the same PubSub topic.

Network size comparison

Iteration 1: 10 nodes

Let's start with a small network of 10 nodes only and see how Waku v1 bandwidth usage compares to that of Waku v2. +At this small scale we don't expect to see improved bandwidth usage in Waku v2 over Waku v1, +since all connections, for both Waku v1 and Waku v2, will be full-message connections. +The number of connections is low enough that Waku v2 nodes will likely GRAFT all connections to full-message peerings, +essentially flooding every message on every connection in a similar fashion to Waku v1. +If our expectations are confirmed, it helps validate our methodology, +showing that it gives more or less equivalent results between Waku v1 and Waku v2 networks.

Sure enough, the figure shows that in this small-scale setup, +Waku v1 actually has a lower per-peer bandwidth usage than Waku v2. +One reason for this may be the larger overall proportion of control messages in a gossipsub-routed network such as Waku v2. +These play a larger role when the total network traffic is comparatively low, as in this iteration. +Also note that the average bandwidth remains more or less constant as long as the rate of published messages remains stable.

Iteration 2: 30 nodes

Now, let's run the same scenario for a larger network of highly-connected nodes, this time consisting of 30 nodes. +At this point, the Waku v2 nodes will start pruning some connections to limit the number of full-message peerings (to a maximum of 12), +while the Waku v1 nodes will continue flooding messages to all connected peers. +We therefore expect to see a somewhat improved bandwidth usage in Waku v2 over Waku v1.

Bandwidth usage in Waku v2 has increased only slightly from the smaller network of 10 nodes (hovering between 2000 and 3000 kbps). +This is because there are only a few more full-message peerings than before. +Compare this to the much higher increase in bandwidth usage for Waku v1, which now requires more than 4000 kbps on average.

Iteration 3: 50 nodes

For an even larger network of 50 highly connected nodes, +the divergence between Waku v1 and Waku v2 is even larger. +The following figure shows comparative average bandwidth usage for a throughput of 50 messages per second.

Average bandwidth usage (for the same message rate) has remained roughly the same for Waku v2 as it was for 30 nodes, +indicating that the number of full-message peerings per node has not increased.

Iteration 4: 85 nodes

We already see a clear trend in the bandwidth comparisons above, +so let's confirm by running the test once more for a network of 85 nodes. +Due to local resource constraints, the effective throughput for Waku v1 falls to below 50 messages per second, +so the v1 results below have been normalized and are therefore approximate. +The local Waku v2 simulation maintains the message throughput rate without any problems.

Iteration 5: 150 nodes

Finally, we simulate message propagation in a network of 150 nodes. +Due to local resource constraints, we run this simulation at a lower rate - +35 messages per second - +and for a shorter amount of time.

Notice how the Waku v1 bandwidth usage is now more than 10 times worse than that of Waku v2. +This is to be expected, as each Waku v1 node will try to flood each new message to 149 other peers, +while the Waku v2 nodes limit their full-message peerings to no more than 12.

Discussion

Let's summarize average bandwidth growth against network growth for a constant message propagation rate. +Since we are particularly interested in how Waku v1 compares to Waku v2 in terms of bandwidth usage, +the results are normalised to the Waku v2 average bandwidth usage for each network size.

Extrapolation is a dangerous game, +but it's safe to deduce that the divergence will only grow for even larger network topologies. +Although control signalling contributes more towards overall bandwidth for Waku v2 networks, +this effect becomes less noticeable for larger networks. +For network segments with more than ~18 densely connected nodes, +the advantage of using Waku v2 above Waku v1 becomes clear.

Network traffic comparison

The analysis above controls the average message rate while network size grows. +In reality, however, active users (and therefore message rates) are likely to grow in conjunction with the network. +This will have an effect on bandwidth for both Waku v1 and Waku v2, though not in equal measure. +Consider the impact of an increasing rate of messages in a network of constant size:

The rate of increase in bandwidth for Waku v2 is slower than that for Waku v1 for a corresponding increase in message propagation rate. +In fact, for a network of 30 densely-connected nodes, +if the message propagation rate increases by 1 per second, +Waku v1 requires an increased average bandwidth of almost 70kbps at each node. +A similar traffic increase in Waku v2 requires on average 40kbps more bandwidth per peer, just over half that of Waku v1.

Conclusions

  • Waku v2 scales significantly better than Waku v1 in terms of average bandwidth usage, +especially for densely connected networks.
  • E.g. for a network consisting of 150 or more densely connected nodes, +Waku v2 provides more than 10x better average bandwidth usage rates than Waku v1.
  • As the network continues to scale, both in absolute terms (number of nodes) and in network traffic (message rates) the disparity between Waku v2 and Waku v1 becomes even larger.

Future work

Now that we've confirmed that Waku v2's bandwidth improvements over its predecessor matches theory, +we can proceed to a more in-depth characterisation of Waku v2's resource usage. +Some questions that we want to answer include:

  • What proportion of Waku v2's bandwidth usage is used to propagate payload versus bandwidth spent on control messaging to maintain the mesh?
  • To what extent is message latency (time until a message is delivered to its destination) affected by network size and message rate?
  • How reliable is message delivery in Waku v2 for different network sizes and message rates?
  • What are the resource usage profiles of other Waku v2 protocols (e.g.12/WAKU2-FILTER and 19/WAKU2-LIGHTPUSH)?

Our aim is to get ever closer to a "real world" understanding of Waku v2's performance characteristics, +identify and fix vulnerabilities +and continually improve the efficiency of our suite of protocols.

References

+ + + + \ No newline at end of file diff --git a/rlog/waku-v2-ethereum-coscup/index.html b/rlog/waku-v2-ethereum-coscup/index.html new file mode 100644 index 00000000..5b151ee7 --- /dev/null +++ b/rlog/waku-v2-ethereum-coscup/index.html @@ -0,0 +1,78 @@ + + + + + +[Talk at COSCUP] Vac, Waku v2 and Ethereum Messaging | Vac Research + + + + + + + + + + +
+

[Talk at COSCUP] Vac, Waku v2 and Ethereum Messaging

by
8 min read

Learn more about Waku v2, its origins, goals, protocols, implementation and ongoing research. Understand how it is used and how it can be useful for messaging in Ethereum.

This is the English version of a talk originally given in Chinese at COSCUP in Taipei.

video recording with Chinese and English subtitles.


Introduction

Hi everyone!

Today I'll talk to you about Waku v2. What it is, what problems it is solving, +and how it can be useful for things such as messaging in Ethereum. First, let me +start with some brief background.

Brief history and background

Back when Ethereum got started, there used to be this concept of the "holy +trinity". You had Ethereum for compute/consensus, Swarm for storage, and Whisper +for messaging. This is partly where the term Web3 comes from.

Status started out as an app with the goal of being a window onto Ethereum and +a secure messenger. As one of the few, if not the only, apps using Whisper in +production, not to mention on a mobile phone, we quickly realized there were +problems with the underlying protocols and infrastructure. Protocols such as +Whisper weren't quite ready for prime time yet when it came to things such as +scalability and working in the real world.

As we started addressing some of these challenges, and moved from app +developement to focusing on protocols, research and infrastructure, we created +Vac. Vac is an r&d unit doing protocol research focused on creating modular p2p +messaging protocols for private, secure, censorship resistant communication.

I won't go into too much detail on the issues with Whisper, if you are +interested in this check out this talk +here or this +article.

In a nutshell, we forked Whisper to address immediate shortcomings and this +became Waku v1. Waku v2 is complete re-thought implementation from scratch on top +of libp2p. This will be the subject of today's talk.

Waku v2

Overview

Waku v2 is a privacy-preserving peer-to-peer messaging protocol for resource +restricted devices. We can look at Waku v2 as several things:

  • Set of protocols
  • Set of implementations
  • Network of nodes

Let's first look at what the goals are.

Goals

Waku v2 provides a PubSub based messaging protocol with the following +characteristics:

  1. Generalized messaging. Applications that require a messaging protocol to +communicate human to human, machine to machine, or a mix.
  2. Peer-to-peer. For applications that require a p2p solution.
  3. Resource restricted. For example, running with limited bandwidth, being +mostly-offline, or in a browser.
  4. Privacy. Applications that have privacy requirements, such as pseudonymity, +metadata protection, etc.

And to provide these properties in a modular fashion, where applications can +choose their desired trade-offs.

Protocols

Waku v2 consists of several protocols. Here we highlight a few of the most +important ones:

  • 10/WAKU2 - main specification, details how all the pieces fit together
  • 11/RELAY - thin layer on top of GossipSub for message dissemination
  • 13/STORE - fetching of historical messages
  • 14/MESSAGE - message payload

This is the recommended subset for a minimal Waku v2 client.

In addition to this there are many other types of specifications at various +stages of maturity, such as: content based filtering, bridge mode to Waku v1, +JSON RPC API, zkSNARKS based spam protection with RLN, accounting and +settlements with SWAP, fault-tolerant store nodes, recommendations around topic +usage, and more.

See https://rfc.vac.dev/ for a full overview.

Implementations

Waku v2 consists of multiple implementations. This allows for client diversity, +makes it easier to strengthen the protocols, and allow people to use Waku v2 in +different contexts.

  • nim-waku - the reference client written in Nim, most full-featured.
  • js-waku - allow usage of Waku v2 from browsers, focus on interacting with dapps.
  • go-waku - subset of Waku v2 to ease integration into the Status app.

Testnet Huilong and dogfooding

In order to test the protocol we have setup a testnet across all implementations +called Huilong. Yes, that's the Taipei subway station!

Among us core devs we have disabled the main #waku Discord channel used for +development, and people run their own node connected to this toy chat application.

Feel free to join and say hi! Instructions can be found here:

Research

While Waku v2 is being used today, we are actively researching improvements. +Since the design is modular, we can gracefully introduce new capabilities. Some +of these research areas are:

  • Privacy-preserving spam protection using zkSNARKs and RLN
  • Accounting and settlement of resource usage to incentivize nodes to provide services with SWAP
  • State synchronization for store protocol to make it easier to run a store node without perfect uptime
  • Better node discovery
  • More rigorous privacy analysis
  • Improving interaction with wallets and dapp

Use cases

Let's look at where Waku v2 is and can be used.

Prelude: Topics in Waku v2

To give some context, there are two different types of topics in Waku v2. One is +a PubSub topic, for routing. The other is a content topic, which is used for +content based filtering. Here's an example of the default PubSub topic:

/waku/2/default-waku/proto

This is recommended as it increases privacy for participants and it is stored by +default, however this is up to the application.

The second type of topic is a content topic, which is application specific. For +example, here's the content topic used in our testnet:

/toychat/2/huilong/proto

For more on topics, see https://rfc.vac.dev/spec/23/

Status app

In the Status protocol, content topics - topics in Whisper/Waku v1 - are used for several things:

  • Contact code topic to discover X3DH bundles for perfect forward secrecy
    • Partitioned into N (currently 5000) content topics to balance privacy with efficiency
  • Public chats correspond to hash of the plaintext name
  • Negotiated topic for 1:1 chat with DHKE derived content topic

See more here https://specs.status.im/spec/10

Currently, Status app is in the process of migrating to and testing Waku v2.

DappConnect: Ethereum messaging

It is easy to think of Waku as being for human messaging, since that's how it is +primarily used in the Status app, but the goal is to be useful for generalized +messaging, which includes Machine-To-Machine (M2M) messaging.

Recall the concept of the holy trinity with Ethereum/Swarm/Whisper and Web3 that +we mentioned in the beginning. Messaging can be used as a building block for +dapps, wallets, and users to communicate with each other. It can be used for +things such as:

  • Multisig and DAO vote transactions only needing one on-chain operation
  • Giving dapps ability to send push notifications to users
  • Giving users ability to directly respond to requests from dapps
  • Decentralized WalletConnect
  • Etc

Basically anything that requires communication and doesn't have to be on-chain.

WalletConnect v2

WalletConnect is an open protocol for connecting dapps to wallets with a QR +code. Version 2 is using Waku v2 as a communication channel to do so in a +decentralized and private fashion.

See for more: https://docs.walletconnect.org/v/2.0/tech-spec

WalletConnect v2 is currently in late alpha using Waku v2.

More examples

  • Gasless voting and vote aggregation off-chain
  • Dapp games using Waku as player discovery mechanism
  • Send encrypted message to someone with an Ethereum key
  • <Your dapp here>

These are all things that are in progress / proof of concept stage.

Contribute

We'd love to see contributions of any form!

Conclusion

In this talk we've gone over the original vision for Web3 and how Waku came to +be. We've also looked at what Waku v2 aims to do. We looked at its protocols, +implementations, the current testnet as well as briefly on some ongoing +research for Vac.

We've also looked at some specific use cases for Waku. First we looked at how +Status uses it with different topics. Then we looked at how it can be useful for +messaging in Ethereum, including for things like WalletConnect.

I hope this talk gives you a better idea of what Waku is, why it exists, and +that it inspires you to contribute, either to Waku itself or by using it in your +own project!

+ + + + \ No newline at end of file diff --git a/rlog/waku-v2-ethereum-messaging/index.html b/rlog/waku-v2-ethereum-messaging/index.html new file mode 100644 index 00000000..4dd62216 --- /dev/null +++ b/rlog/waku-v2-ethereum-messaging/index.html @@ -0,0 +1,30 @@ + + + + + +[Talk] Vac, Waku v2 and Ethereum Messaging | Vac Research + + + + + + + + + + +
+

[Talk] Vac, Waku v2 and Ethereum Messaging

by
10 min read

Talk from Taipei Ethereum Meetup. Read on to find out about our journey from Whisper to Waku v2, as well as how Waku v2 can be useful for Etherum Messaging.

The following post is a transcript of the talk given at the Taipei Ethereum meetup, November 5. There is also a video recording.


0. Introduction

Hi! My name is Oskar and I'm the protocol research lead at Vac. This talk will be divided into two parts. First I'll talk about the journey from Whisper, to Waku v1 and now to Waku v2. Then I'll talk about messaging in Ethereum. After this talk, you should have an idea of what Waku v2 is, the problems it is trying to solve, as well as where it can be useful for messaging in Ethereum.

PART 1 - VAC AND THE JOURNEY FROM WHISPER TO WAKU V1 TO WAKU V2

1. Vac intro

First, what is Vac? Vac grew out of our efforts Status to create a window on to Ethereum and secure messenger. Vac is modular protocol stack for p2p secure messaging, paying special attention to resource restricted devices, privacy and censorship resistance.

Today we are going to talk mainly about Waku v2, which is the transport privacy / routing aspect of the Vac protocol stack. It sits "above" the p2p overlay, such as libp2p dealing with transports etc, and below a conversational security layer dealing with messaging encryption, such as using Double Ratchet etc.

2. Whisper to Waku v1

In the beginning, there was Whisper. Whisper was part of the holy trinity of Ethereum. You had Ethereum for consensus/computation, Whisper for messaging, and Swarm for storage.

However, for various reasons, Whisper didn't get the attention it deserved. Development dwindled, it promised too much and it suffered from many issues, such as being extremely inefficient and not being suitable for running on e.g. mobile phone. Despite this, Status used it in its app from around 2017 to 2019. As far as I know, it was one of very few, if not the only, production uses of Whisper.

In an effort to solve some of its immediate problems, we forked Whisper into Waku and formalized it with a proper specification. This solved immediate bandwidth issues for light nodes, introduced rate limiting for better spam protection, improved historical message support, etc.

If you are interested in this journey, checkout the EthCC talk Dean and I gave in Paris earlier this year.

Status upgraded to Waku v1 early 2020. What next?

3. Waku v1 to v2

We were far from done. The changes we had made were quite incremental and done in order to get tangible improvements as quickly as possible. This meant we couldn't address more fundamental issues related to full node routing scalability, running with libp2p for more transports, better security, better spam protection and incentivization.

This kickstarted Waku v2 efforts, which is what we've been working on since July. This work was and is initally centered around a few pieces:

(a) Moving to libp2p

(b) Better routing

(c) Accounting and user-run nodes

The general theme was: making the Waku network more scalable and robust.

We also did a scalability study to show at what point the network would run into issues, due to the inherent lack of routing that Whisper and Waku v1 provided.

You can read more about this here.

3.5 Waku v2 - Design goals

Taking a step back, what problem does Waku v2 attempt to solve compared to all the other solutions that exists out there? What type of applications should use it and why? We have the following design goals:

  1. Generalized messaging. Many applications requires some form of messaging protocol to communicate between different subsystems or different nodes. This messaging can be human-to-human or machine-to-machine or a mix.

  2. Peer-to-peer. These applications sometimes have requirements that make them suitable for peer-to-peer solutions.

  3. Resource restricted. These applications often run in constrained environments, where resources or the environment is restricted in some fashion. E.g.:

    • limited bandwidth, CPU, memory, disk, battery, etc
    • not being publicly connectable
    • only being intermittently connected; mostly-offline
  4. Privacy. These applications have a desire for some privacy guarantees, such as pseudonymity, metadata protection in transit, etc.

As well as to do so in a modular fashion. Meaning you can find a reasonable trade-off depending on your exact requirements. For example, you usually have to trade off some bandwidth to get metadata protection, and vice versa.

The concept of designing for resource restricted devices also leads to the concept of adaptive nodes, where you have more of a continuum between full nodes and light nodes. For example, if you switch your phone from mobile data to WiFi you might be able to handle more bandwidth, and so on.

4. Waku v2 - Breakdown

Where is Waku v2 at now, and how is it structured?

It is running over libp2p and we had our second internal testnet last week or so. As a side note, we name our testnets after subway stations in Taipei, the first one being Nangang, and the most recent one being Dingpu.

The main implementation is written in Nim using nim-libp2p, which is also powering Nimbus, an Ethereum 2 client. There is also a PoC for running Waku v2 in the browser. On a spec level, we have the following specifications that corresponds to the components that make up Waku v2:

  • Waku v2 - this is the main spec that explains the goals of providing generalized messaging, in a p2p context, with a focus on privacy and running on resources restricted devices.
  • Relay - this is the main PubSub spec that provides better routing. It builds on top of GossipSub, which is what Eth2 heavily relies on as well.
  • Store - this is a 1-1 protocol for light nodes to get historical messages, if they are mostly-offline.
  • Filter - this is a 1-1 protocol for light nodes that are bandwidth restricted to only (or mostly) get messages they care about.
  • Message - this explains the payload, to get some basic encryption and content topics. It corresponds roughly to envelopes in Whisper/Waku v1.
  • Bridge - this explains how to do bridging between Waku v1 and Waku v2 for compatibility.

Right now, all protocols, with the exception of bridge, are in draft mode, meaning they have been implemented but are not yet being relied upon in production.

You can read more about the breakdown in this update though some progress has been made since then, as well was in the main Waku v2 spec.

5. Waku v2 - Upcoming

What's coming up next? There are a few things.

For Status to use it in production, it needs to be integrated into the main app using the Nim Node API. The bridge also needs to be implemented and tested.

For other users, we are currently overhauling the API to allow usage from a browser, e.g. To make this experience great, there are also a few underlying infrastructure things that we need in nim-libp2p, such as a more secure HTTP server in Nim, Websockets and WebRTC support.

There are also some changes we made to at what level content encryption happens, and this needs to be made easier to use in the API. This means you can use a node without giving your keys to it, which is useful in some environments.

More generally, beyond getting to production-ready use, there are a few bigger pieces that we are working on or will work on soon. These are things like:

  • Better scaling, by using topic sharding.
  • Accounting and user-run nodes, to account for and incentives full nodes.
  • Stronger and more rigorous privacy guarantees, e.g. through study of GossipSub, unlinkable packet formats, etc.
  • Rate Limit Nullifier for privacy preserving spam protection, a la what Barry Whitehat has presented before.

As well as better support for Ethereum M2M Messaging. Which is what I'll talk about next.

PART 2 - ETHEREUM MESSAGING

A lot of what follows is inspired by exploratory work that John Lea has done at Status, previously Head of UX Architecture at Ubuntu.

6. Ethereum Messaging - Why?

It is easy to think that Waku v2 is only for human to human messaging, since that's how Waku is currently primarily used in the Status app. However, the goal is to be useful for generalized messaging, which includes other type of information as well as machine to machine messaging.

What is Ethereum M2M messaging? Going back to the Holy Trinity of Ethereum/Whisper/Swarm, the messaging component was seen as something that could facilitate messages between dapps and acts as a building block. This can help with things such as:

  • Reducing on-chain transactions
  • Reduce latency for operations
  • Decentralize centrally coordinated services (like WalletConnect)
  • Improve UX of dapps
  • Broadcast live information
  • A message transport layer for state channels

And so on.

7. Ethereum Messaging - Why? (Cont)

What are some examples of practical things Waku as used for Ethereum Messaging could solve?

  • Multisig transfers only needing one on chain transaction
  • DAO votes only needing one one chain transaction
  • Giving dapps ability to direct push notifications to users
  • Giving users ability to directly respond to requests from daps
  • Decentralized Wallet Connect

Etc.

8. What's needed to deliver this?

We can break it down into our actors:

  • Decentralized M2M messaging system (Waku)
  • Native wallets (Argent, Metamask, Status, etc)
  • Dapps that benefit from M2M messaging
  • Users whose problems are being solved

Each of these has a bunch of requirements in turn. The messaging system needs to be decentralized, scalable, robust, etc. Wallets need support for messaging layer, dapps need to integrate this, etc.

This is a lot! Growing adoption is a challenge. There is a catch 22 in terms of justifying development efforts for wallets, when no dapps need it, and likewise for dapps when no wallets support Waku. In addition to this, there must be proven usage of Waku before it can be relied on, etc. How can we break this up into smaller pieces of work?

9. Breaking up the problem and a high level roadmap

We can start small. It doesn't and need to be used for critical features first. A more hybrid approach can be taken where it acts more as nice-to-haves.

  1. Forking Whisper and solving scalablity, spam etc issues with it. +This is a work in progress. What we talked about in part 1.
  2. Expose messaging API for Dapp developers.
  3. Implement decentralized version of WalletConnect. +Currently wallets connect ot dapps with centralized service. Great UX.
  4. Solve DAO/Multi-Sig coordination problem. +E.g. send message to wallet-derived key when it is time to sign a transaction.
  5. Extend dapp-to-user and user-to-dapp communication to more dapps. +Use lessons learned and examples to drive adoptation for wallets/dapps.

And then build up from there.

10. We are hiring!

A lot of this will happen in Javascript and browsers, since that's the primarily environment for a lot of wallets and dapps. We are currently hiring for a Waku JS Wallet integration lead to help push this effort further.

Come talk to me after or apply here.

That's it! You can find us on Status, Telegram, vac.dev. I'm on twitter here.

Questions?


+ + + + \ No newline at end of file diff --git a/rlog/waku-v2-plan/index.html b/rlog/waku-v2-plan/index.html new file mode 100644 index 00000000..638c46d8 --- /dev/null +++ b/rlog/waku-v2-plan/index.html @@ -0,0 +1,35 @@ + + + + + +What's the Plan for Waku v2? | Vac Research + + + + + + + + + + +
+

What's the Plan for Waku v2?

by
14 min read

Read about our plans for Waku v2, moving to libp2p, better routing, adaptive nodes and accounting!

tldr: The Waku network is fragile and doesn't scale. Here's how to solve it.

NOTE: This post was originally written with Status as a primary use case in mind, which reflects how we talk about some problems here. However, Waku v2 is a general-purpose private p2p messaging protocol, especially for people running in resource restricted environments.

Problem

The Waku network is fragile and doesn't scale.

As Status is moving into a user-acquisition phase and is improving retention rates for users they need the infrastructure to keep up, specifically when it comes to messaging.

Based on user acquisition models, the initial goal is to support 100k DAU in September, with demand growing from there.

With the Status Scaling Model we have studied the current bottlenecks as a function of concurrent users (CCU) and daily active users (DAU). Here are the conclusions.

**1. Connection limits**. With 100 full nodes we reach ~10k CCU based on connection limits. This can primarily be addressed by increasing the number of nodes (cluster or user operated). This assumes node discovery works. It is also worth investigating the limitations of max number of connections, though this is likely to be less relevant for user-operated nodes. For a user-operated network, this means 1% of users have to run a full node. See Fig 1-2.

**2. Bandwidth as a bottleneck**. We notice that memory usage appears to not be +the primary bottleneck for full nodes, and the bottleneck is still bandwidth. To support 10k DAU, and full nodes with an amplification factor of 25 the required Internet speed is ~50 Mbps, which is a fast home Internet connection. For ~100k DAU only cloud-operated nodes can keep up (500 Mbps). See Fig 3-5.

**3. Amplification factors**. Reducing amplification factors with better routing, would have a high impact, but it is likely we'd need additional measures as well, such as topic sharding or similar. See Fig 8-13.

Figure 1-5:

+
+
+
+

See https://colab.research.google.com/drive/1Fz-oxRxxAFPpM1Cowpnb0nT52V1-yeRu#scrollTo=Yc3417FUJJ_0 for the full report.

What we need to do is:

  1. Reduce amplification factors
  2. Get more user-run full nodes

Doing this means the Waku network will be able to scale, and doing so in the right way, in a robust fashion. What would a fragile way of scaling be? Increasing our reliance on a Status Pte Ltd operated cluster which would paint us in a corner where we:

  • keep increasing requirements for Internet speed for full nodes
  • are vulnerable to censorship and attacks
  • have to control the topology in an artifical manner to keep up with load
  • basically re-invent a traditional centralized client-server app with extra steps
  • deliberately ignore most of our principles
  • risk the network being shut down when we run out of cash

Appetite

Our initial risk appetite for this is 6 weeks for a small team.

The idea is that we want to make tangible progress towards the goal in a limited period of time, as opposed to getting bogged down in trying to find a theoretically perfect generalized solution. Fixed time, variable scope.

It is likely some elements of a complete solution will be done separately. See later sections for that.

Solution

There are two main parts of the solution. One is to reduce amplification factors, and the other is incentivization to get more user run full nodes with desktop, etc.

What does a full node provide? It provides connectivity to the network, can act as a bandwidth "barrier" and be high or reasonably high availability. What this means right now is essentially topic interest and storing historical messages.

The goal is here to improve the status quo, not get a perfect solution from the get go. All of this can be iterated on further, for stronger guarantees, as well as replaced by other new modules.

Let's first look at the baseline, and then go into some of the tracks and their phases. Track 1 is best done first, after which track 2 and 3 can be executed in parallel. Track 1 gives us more options for track 2 and 3. The work in track 1 is currently more well-defined, so it is likely the specifics of track 2 and 3 will get refined at a later stage.

Baseline

Here's where we are at now. In reality, the amplification factor are likely even worse than this (15 in the graph below), up to 20-30. Especially with an open network, where we can't easily control connectivity and availability of nodes. Left unchecked, with a full mesh, it could even go as high x100, though this is likely excessive and can be dialed down. See scaling model for more details.

Track 1 - Move to libp2p

Moving to PubSub over libp2p wouldn't improve amplification per se, but it would be stepping stone. Why? It paves the way for GossipSub, and would be a checkpoint on this journey. Additionally, FloodSub and GossipSub are compatible, and very likely other future forms of PubSub such as GossipSub 1.1 (hardened/more secure), EpiSub, forwarding Kademlia / PubSub over Kademlia, etc. Not to mention security This would also give us access to the larger libp2p ecosystem (multiple protocols, better encryption, quic, running in the browser, security audits, etc, etc), as well as be a joint piece of infrastructured used for Eth2 in Nimbus. More wood behind fewer arrows.

See more on libp2p PubSub here: https://docs.libp2p.io/concepts/publish-subscribe/

As part of this move, there are a few individual pieces that are needed.

1. FloodSub

This is essentially what Waku over libp2p would look like in its most basic form.

One difference that is worth noting is that the app topics would not be the same as Waku topics. Why? In Waku we currently don't use topics for routing between full nodes, but only for edge/light nodes in the form of topic interest. In FloodSub, these topics are used for routing.

Why can't we use Waku topics for routing directly? PubSub over libp2p isn't built for rare and ephemeral topics, and nodes have to explicitly subscribe to a topic. See topic sharding section for more on this.

Moving to FloodSub over libp2p would also be an opportunity to clean up and simplify some components that are no longer needed in the Waku v1 protocol, see point below.

Very experimental and incomplete libp2p support can be found in the nim-waku repo under v2: https://github.com/status-im/nim-waku

2. Simplify the protocol

Due to Waku's origins in Whisper, devp2p and as a standalone protocol, there are a lot of stuff that has accumulated (https://rfc.vac.dev/spec/6/). Not all of it serves it purpose anymore. For example, do we still need RLP here when we have Protobuf messages? What about extremely low PoW when we have peer scoring? What about key management / encryption when have encryption at libp2p and Status protocol level?

Not everything has to be done in one go, but being minimalist at this stage will the protocol lean and make us more adaptable.

The essential characteristic that has to be maintained is that we don't need to change the upper layers, i.e. we still deal with (Waku) topics and some envelope like data unit.

3. Core integration

As early as possible we want to integrate with Core via Stimbus in order to mitigate risk and catch integration issues early in the process. What this looks like in practice is some set of APIs, similar to how Whisper and Waku were working in parallel, and experimental feature behind a toggle in core/desktop.

4. Topic interest behavior

While we target full node traffic here, we want to make sure we maintain the existing bandwidth requirements for light nodes that Waku v1 addressed (https://vac.dev/fixing-whisper-with-waku). This means implementing topic-interest in the form of Waku topics. Note that this would be separate from app topics notes above.

5. Historical message caching

Basically what mailservers are currently doing. This likely looks slightly different in a libp2p world. This is another opportunity to simplify things with a basic REQ-RESP architecture, as opposed to the roundabout way things are now. Again, not everything has to be done in one go but there's no reason to reimplement a poor API if we don't have to.

Also see section below on adaptive nodes and capabilities.

6. Waku v1 <> Libp2p bridge

To make the transition complete, there has to a be bridge mode between current Waku and libp2p. This is similar to what was done for Whisper and Waku, and allows any nodes in the network to upgrade to Waku v2 at their leisure. For example, this would likely look different for Core, Desktop, Research and developers.

Track 2 - Better routing

This is where we improve the amplification factors.

1. GossipSub

This is a subprotocol of FloodSub in the libp2p world. Moving to GossipSub would allow traffic between full nodes to go from an amplification factor of ~25 to ~6. This basically creates a mesh of stable bidirectional connections, together with some gossiping capabilities outside of this view.

Explaining how GossipSub works is out of scope of this document. It is implemented in nim-libp2p and used by Nimbus as part of Eth2. You can read the specs here in more detail if you are interested: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md and https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md

+
+
+

While we technically could implement this over existing Waku, we'd have to re-implement it, and we'd lose out on all the other benefits libp2p would provide, as well as the ecosystem of people and projects working on improving the scalability and security of these protocols.

2. Topic sharding

This one is slightly more speculative in terms of its ultimate impact. The basic idea is to split the application topic into N shards, say 10, and then each full node can choose which shards to listen to. This can reduce amplification factors by another factor of 10.

+

Note that this means a light node that listens to several topics would have to be connected to more full nodes to get connectivity. For a more exotic version of this, see https://forum.vac.dev/t/rfc-topic-propagation-extension-to-libp2p-pubsub/47

This is orthogonal from the choice of FloodSub or GossipSub, but due to GossipSub's more dynamic nature it is likely best combined with it.

3. Other factors

Not a primary focus, but worth a look. Looking at the scaling model, there might be other easy wins to improve overall bandwidth consumption between full nodes. For example, can we reduce envelope size by a significant factor?

Track 3 - Accounting and user-run nodes

This is where we make sure the network isn't fragile, become a true p2p app, get our users excited and engaged, and allow us to scale the network without creating an even bigger cluster.

To work in practice, this has a soft dependency on node discovery such as DNS based discovery (https://eips.ethereum.org/EIPS/eip-1459) or Discovery v5 (https://vac.dev/feasibility-discv5).

1. Adaptive nodes and capabilities

We want to make the gradation between light nodes, full nodes, storing (partial set of) historical messages, only acting for a specific shard, etc more flexible and explicit. This is required to identify and discover the nodes you want. See https://github.com/vacp2p/specs/issues/87

Depending on how the other tracks come together, this design should allow for a desktop node to identify as a full relaying node for some some app topic shard, but also express waku topic interest and retrieve historical messages itself.

E.g. Disc v5 can be used to supply node properties through ENR.

2. Accounting

This is based on a few principles:

  1. Some nodes contribute a lot more than other nodes in the network
  2. We can account for the difference in contribution in some fashion
  3. We want to incentivize nodes to tell the true, and be incentivized not to lie

Accounting here is a stepping stone, where accounting is the raw data upon which some settlement later occurs. It can have various forms of granularity. See https://forum.vac.dev/t/accounting-for-resources-in-waku-and-beyond/31 for discussion.

We also note that in GossipSub, the mesh is bidrectional. Additionally, it doesn't appears to be a high priority issue in terms of nodes misreporting. What is an issue is having people run full nodes in the first place. There are a few points to that. It has to be possible in the end-user UX, nodes have to be discovered, and it has to be profitable/visible that you are contributing. UX and discovery are out of scope for this work, whereas visibility/accounting is part of this scope. Settlement is a stretch goal here.

The general shape of the solution is inspired by the Swarm model, where we do accounting separate from settlement. It doesn't require any specific proofs, but nodes are incentivized to tell the truth in the following way:

  1. Both full node and light node do accounting in a pairwise, local fashion
  2. If a light node doesn't ultimately pay or lie about reporting, they get disconnected (e.g.)
  3. If a full node doesn't provide its service the light node may pick another full node (e.g.)

While accounting for individual resource usage is useful, for the ultimate end user experience we can ideally account for other things such as:

  • end to end delivery
  • online time
  • completeness of storage

This can be gradually enhanced and strengthened, for example with proofs, consistency checks, Quality of Service, reputation systems. See https://discuss.status.im/t/network-incentivisation-first-draft/1037 for one attempt to provide stronger guarantees with periodic consistency checks and a shared fund mechanism. And https://forum.vac.dev/t/incentivized-messaging-using-validity-proofs/51 for using validity proofs and removing liveness requirement for settlement.

All of this is optional at this stage, because our goal here is to improve the status quo for user run nodes. Accounting at this stage should be visible and correspond to the net benefit a node provides to another.

As a concrete example: a light node has some topic interest and cares about historical messages on some topic. A full node communicates envelopes as they come in, communicates their high availability (online time) and stores/forward stored messages. Both nodes have this information, and if they agree settlement (initially just a mock message) can be sending a payment to an address at some time interval / over some defined volume. See future sections for how this can be improved upon.

Also see below in section 4, using constructs such as eigentrust as a local reputation mechanism.

3. Relax high availability requirement

If we want desktop nodes to participate in the storing of historical messages, high availability is a problem. It is a problem for any node, especially if they lie about it, but assuming they are honest it is still an issue.

By being connected to multiple nodes, we can get an overlapping online window. Then these can be combined together to get consistency. This is obviously experimental and would need to be tested before being deployed, but if it works it'd be very useful.

Additionally or alternatively, instead of putting a high requirement on message availability, focus on detection of missing information. This likely requires re-thinking how we do data sync / replication.

4. Incentivize light and full nodes to tell the truth (policy, etc)

In accounting phase it is largely assumed nodes are honest. What happens when they lie, and how do we incentivize them to be honest? In the case of Bittorrent this is done with tit-for-tat, however this is a different kind of relationship. What follows are some examples of how this can be done.

For light nodes:

  • if they don't, they get disconnected
  • prepayment (especially to "high value" nodes)

For full nodes:

  • multiple nodes reporting to agree, where truth becomes a shelling point
  • use eigentrust
  • staking for discovery visibility with slashing

5. Settlement PoC

Can be done after phase 2 if so desired. Basically integrate payments based on accounting and policy.

Out of scope

  1. We assume the Status Base model requirements are accurate.
  2. We assume Core will improve retention rates.
  3. We assume the Stimbus production team will enable integration of nim-waku.
  4. We assume Discovery mechanisms such as DNS and Discovery v5 will be worked on separately.
  5. We assume Core will, at some point, provide an UX for integrating payment of services.
  6. We assume the desktop client is sufficiently usable.
  7. We assume Core and Infra will investigate ways of improving MaxPeers.
+ + + + \ No newline at end of file diff --git a/rlog/waku-v2-update/index.html b/rlog/waku-v2-update/index.html new file mode 100644 index 00000000..cea9d383 --- /dev/null +++ b/rlog/waku-v2-update/index.html @@ -0,0 +1,28 @@ + + + + + +Waku v2 Update | Vac Research + + + + + + + + + + +
+

Waku v2 Update

by
8 min read

A research log. Read on to find out what is going on with Waku v2, a messaging protocol. What has been happening? What is coming up next?

It has been a while since the last post. It is time for an update on Waku v2. Aside from getting more familiar with libp2p (specifically nim-libp2p) and some vacation, what have we been up to? In this post we'll talk about what we've gotten done since last time, and briefly talk about immediate next steps and future. But first, a recap.

Recap

In the last post (Waku v2 plan) we explained the rationale of Waku v2 - the current Waku network is fragile and doesn't scale. To solve this, Waku v2 aims to reduce amplification factors and get more user run nodes. We broke the work down into three separate tracks.

  1. Track 1 - Move to libp2p
  2. Track 2 - Better routing
  3. Track 3 - Accounting and user-run nodes

As well as various rough components for each track. The primary initial focus is track 1. This means things like: moving to FloodSub, simplify the protocol, core integration, topic interest behavior, historical message caching, and Waku v1<>v2 bridge.

Current state

Let's talk about the state of specs and our main implementation nim-waku. Then we'll go over our recent testnet, Nangang, and finish off with a Web PoC.

Specs

After some back and forth on how to best structure things, we ended up breaking down the specs into a few pieces. While Waku v2 is best thought of as a cohesive whole in terms of its capabilities, it is made up of several protocols. Here's a list of the current specs and their status:

Raw means there is not yet an implementation that corresponds fully to the spec, and draft means there is an implementation that corresponds to the spec. In the interest of space, we won't go into too much detail on the specs here except to note a few things:

  • The relay spec is essentially a thin wrapper on top of PubSub/FloodSub/GossipSub
  • The filter protocol corresponds to previous light client mode in Waku v1
  • The store protocol corresponds to the previous mailserver construct in Waku v1

The filter and store protocol allow for adaptive nodes, i.e. nodes that have various capabilities. For example, a node being mostly offline, or having limited bandwidth capacity. The bridge spec outlines how to bridge the Waku v1 and v2 networks.

Implementation

The main implementation we are working on is nim-waku. This builds on top of libraries such as nim-libp2p and others that the Nimbus team have been working on as part of their Ethereum 2.0 client.

Currently nim-waku implements the relay protocol, and is close to implementing filter and store protocol. It also exposes a Nim Node API that allows libraries such as nim-status to use it. Additionally, there is also a rudimentary JSON RPC API for command line scripting.

Nangang testnet

Last week we launched a very rudimentary internal testnet called Nangang. The goal was to test basic connectivity and make sure things work end to end. It didn't have things like: client integration, encryption, bridging, multiple clients, store/filter protocol, or even a real interface. What it did do is allow Waku developers to "chat" via RPC calls and looking in the log output. Doing this meant we exposed and fixed a few blockers, such as connection issues, deployment, topic subscription management, protocol and node integration, and basic scripting/API usage. After this, we felt confident enough to upgrade the main and relay spec to "draft" status.

Waku Web PoC

As a bonus, we wanted to see what it'd take to get Waku running in a browser. This is a very powerful capability that enables a lot of use cases, and something that libp2p enables with its multiple transport support.

Using the current stack, with nim-waku, would require quite a lot of ground work with WASM, WebRTC, Websockets support etc. Instead, we decided to take a shortcut and hack together a JS implementation called Waku Web Chat. This quick hack wouldn't be possible without the people behind js-libp2p-examples and js-libp2p and all its libraries. These are people like Jacob Heun, Vasco Santos, and Cayman Nava. Thanks!

It consists of a brower implementation, a NodeJS implementation and a bootstrap server that acts as a signaling server for WebRTC. It is largely a bastardized version of GossipSub, and while it isn't completely to spec, it does allow messages originating from a browser to eventually end up at a nim-waku node, and vice versa. Which is pretty cool.

Coming up

Now that we know what the current state is, what is still missing? what are the next steps?

Things that are missing

While we are getting closer to closing out work for track 1, there are still a few things missing from the initial scope:

  1. Store and filter protocols need to be finished. This means basic spec, implementation, API integration and proven to work in a testnet. All of these are work in progress and expected to be done very soon. Once the store protocol is done in a basic form, it needs further improvements to make it production ready, at least on a spec/basic implementation level.

  2. Core integration was mentioned in scope for track 1 initially. This work has stalled a bit, largely due to organizational bandwidth and priorities. While there is a Nim Node API that in theory is ready to be used, having it be used in e.g. Status desktop or mobile app is a different matter. The team responsible for this at Status (status-nim has been making progress on getting nim-waku v1 integrated, and is expected to look into nim-waku v2 integration soon. One thing that makes this a especially tricky is the difference in interface between Waku v1 and v2, which brings +us too...

  3. Companion spec for encryption. As part of simplifying the protocol, the routing is decoupled from the encryption in v2 (1, 2). There are multiple layers of encryption at play here, and we need to figure out a design that makes sense for various use cases (dapps using Waku on their own, Status app, etc).

  4. Bridge implementation. The spec is done and we know how it should work, but it needs to be implemented.

  5. General tightening up of specs and implementation.

While this might seem like a lot, a lot has been done already, and the majority of the remaining tasks are more amendable to be pursued in parallel with other efforts. It is also worth mentioning that part of track 2 and 3 have been started, in the form of moving to GossipSub (amplification factors) and basics of adaptive nodes (multiple protocols). This is in addition to things like Waku Web which were not part of the initial scope.

Upcoming

Aside from the things mentioned above, what is coming up next? There are a few areas of interest, mentioned in no particular order. For track 2 and 3, see previous post for more details.

  1. Better routing (track 2). While we are already building on top of GossipSub, we still need to explore things like topic sharding in more detail to further reduce amplification factors.

  2. Accounting and user-run nodes (track 3). With store and filter protocol getting ready, we can start to implement accounting and light connection game for incentivization in a bottom up and iterative manner.

  3. Privacy research. Study better and more rigorous privacy guarantees. E.g. how FloodSub/GossipSub behaves for common threat models, and how custom packet +format can improve things like unlinkability.

  4. zkSnarks RLN for spam protection and incentivization. We studied this last year and recent developments have made this relevant to study again. Create an experimental spec/PoC as an extension to the relay protocol. Kudos to Barry Whitehat and others like Kobi Gurkan and Koh Wei Jie for pushing this!

  5. Ethereum M2M messaging. Being able to run in the browser opens up a lot of doors, and there is an opportunity here to enable things like a decentralized WalletConnect, multi-sig transactions, voting and similar use cases. This was the original goal of Whisper, and we'd like to deliver on that.

As you can tell, quite a lot of thing! Luckily, we have two people joining as protocol engineers soon, which will bring much needed support for the current team of ~2-2.5 people. More details to come in further updates.


If you are feeling adventurous and want to use early stage alpha software, check out the docs. If you want to read the specs, head over to Waku spec. If you want to talk with us, join us on Status or on Telegram (they are bridged).

+ + + + \ No newline at end of file diff --git a/rlog/wakuv2-apd/index.html b/rlog/wakuv2-apd/index.html new file mode 100644 index 00000000..7974de31 --- /dev/null +++ b/rlog/wakuv2-apd/index.html @@ -0,0 +1,165 @@ + + + + + +Waku v2 Ambient Peer Discovery | Vac Research + + + + + + + + + + +
+

Waku v2 Ambient Peer Discovery

by
18 min read

Introducing and discussing ambient peer discovery methods currently used by Waku v2, as well as future plans in this area.

Waku v2 comprises a set of modular protocols for secure, privacy preserving communication. +Avoiding centralization, these protocols exchange messages over a P2P network layer. +In order to build a P2P network, participating nodes first have to discover peers within this network. +This is where ambient peer discovery comes into play: +it allows nodes to find peers, making it an integral part of any decentralized application.

In this post the term node to refers to our endpoint or the endpoint that takes action, +while the term peer refers to other endpoints in the P2P network. +These endpoints can be any device connected to the Internet: e.g. servers, PCs, notebooks, mobile devices, or applications like a browser. +As such, nodes and peers are the same. We use these terms for the ease of explanation without loss of generality.

In Waku's modular design, ambient peer discovery is an umbrella term for mechanisms that allow nodes to find peers. +Various ambient peer discovery mechanisms are supported, and each is specified as a separate protocol. +Where do these protocols fit into Waku's protocol stack? +The P2P layer of Waku v2 builds on libp2p gossipsub. +Nodes participating in a gossipsub protocol manage a mesh network that is used for routing messages. +This mesh network is an unstructured P2P network +offering high robustness and resilience against attacks. +Gossipsub implements many improvements overcoming the shortcomings typically associated with unstructured P2P networks, e.g. inefficient flooding based routing. +The gossipsub mesh network is managed in a decentralized way, which requires each node to know other participating peers. +Waku v2 may use any combination of its ambient discovery protocols to find appropriate peers.

Summarizing, Waku v2 comprises a peer management layer based on libp2p gossipsub, +which manages the peers of nodes, and an ambient peer discovery layer, +which provides information about peers to the peer management layer.

We focus on ambient peer discovery methods that are in line with our goal of building a fully decentralized, generalized, privacy-preserving and censorship-resistant messaging protocol. +Some of these protocols still need adjustments to adhere to our privacy and anonymity requirements. For now, we focus on operational stability and feasibility. +However, when choosing techniques, we pay attention to selecting mechanisms that can feasibly be tweaked for privacy in future research efforts. +Because of the modular design and the fact that Waku v2 has several discovery methods at its disposal, we could even remove a protocol in case future evaluation deems it not fitting our standards.

This post covers the current state and future considerations of ambient peer discovery for Waku v2, +and gives reason for changes and modifications we made or plan to make. +The ambient peer discovery protocols currently supported by Waku v2 are a modified version of Ethereum's Discovery v5 +and DNS-based discovery. +Waku v2 further supports gossipsub's peer exchange protocol. +In addition, we plan to introduce protocols for general peer exchange and capability discovery, respectively. +The former allows resource restricted nodes to outsource querying for peers to stronger peers, +the latter allows querying peers for their supported capabilities. +Besides these new protocols, we are working on integrating capability discovery in our existing ambient peer discovery protocols.

Static Node Lists

The simplest method of learning about peers in a P2P network is via static node lists. +These can be given to nodes as start-up parameters or listed in a config-file. +They can also be provided in a script-parseable format, e.g. in JSON. +While this method of providing bootstrap nodes is very easy to implement, it requires static peers, which introduce centralized elements. +Also, updating static peer information introduces significant administrative overhead: +code and/or config files have to be updated and released. +Typically, static node lists only hold a small number of bootstrap nodes, which may lead to high load on these nodes.

DNS-based Discovery

Compared to static node lists, +DNS-based discovery (specified in EIP-1459) +provides a more dynamic way of discovering bootstrap nodes. +It is very efficient, can easily be handled by resource restricted devices and provides very good availability. +In addition to a naive DNS approach, Ethereum's DNS-based discovery introduces efficient authentication leveraging Merkle trees.

A further advantage over static node lists is the separation of code/release management and bootstrap node management. +However, changing and updating the list of bootstrap nodes still requires administrative privileges because DNS records have to be added or updated.

While this method of discovery still requires centralized elements, +node list management can be delegated to various DNS zones managed by other entities mitigating centralization.

Discovery V5

A much more dynamic method of ambient peer discovery is Discovery v5, which is Ethereum's peer discovery protocol. +It is based on the Kademlia distributed hashtable (DHT). +An introduction to discv5 and its history, and a discv5 Waku v2 feasibility study +can be found in previous posts on this research log.

We use Discovery v5 as an ambient peer discovery method for Waku v2 because it is decentralized, efficient, actively researched, and has web3 as its main application area. +Discv5 also offers mitigation techniques for various attacks, which we cover later in this post.

Using a DHT (structured P2P network) as a means for ambient peer discovery, while using the gossipsub mesh network (unstructured P2P network) for transmitting actual messages, +Waku v2 leverages advantages from both worlds. +One of the main benefits of DHTs is offering a global view over participating nodes. +This, in turn, allows sampling random sets of nodes which is important for equally distributing load. +Gossipsub, on the other hand, offers great robustness and resilience against attacks. +Even if discv5 discovery should not work in advent of a DoS attack, Waku v2 can still operate switching to different discovery methods.

Discovery methods that use separate P2P networks still depend on bootstrapping, +which Waku v2 does via parameters on start-up or via DNS-based discovery. +This might raise the question of why such discovery methods are beneficial. +The answer lies in the aforementioned global view of DHTs. Without discv5 and similar methods, the bootstrap nodes are used as part of the gossipsub mesh. +This might put heavy load on these nodes and further, might open pathways to inference attacks. +Discv5, on the other hand, uses the bootstrap nodes merely as an entry to the discovery network and can provide random sets of nodes (sampled from a global view) +for bootstrapping or expanding the mesh.

DHT Background

Distributed Hash Tables are a class of structured P2P overlay networks. +A DHT can be seen as a distributed node set of which each node is responsible for a part of the hash space. +In contrast to unstructured P2P networks, e.g. the mesh network maintained by gossipsub, +DHTs have a global view over the node set and the hash space (assuming the participating nodes behave well).

DHTs are susceptible to various kinds of attacks, especially Sybil attacks +and eclipse attacks. +While security aspects have been addressed in various research papers, general practical solutions are not available. +However, discv5 introduced various practical mitigation techniques.

Random Walk Discovery

While discv5 is based on the Kademlia DHT, it only uses the distributed node set aspect of DHTs. +It does not map values (items) into the distributed hash space. +This makes sense, because the main purpose of discv5 is discovering other nodes that support discv5, which are expected to be Ethereum nodes. +Ethereum nodes that want to discover other Ethereum nodes simply query the discv5 network for a random set of peers. +If Waku v2 would do the same, only a small subset of the retrieved nodes would support Waku v2.

A first naive solution for Waku v2 discv5 discovery is

  • retrieve a random node set, which is achieved by querying for a set of randomly chosen node IDs
  • filter the returned nodes on the query path based on Waku v2 capability via the Waku v2 ENR
  • repeat until enough Waku v2 capable nodes are found

This query process boils down to random walk discovery, which is very resilient against attacks, but also very inefficient if the number of nodes supporting the desired capability is small. +We refer to this as the needle-in-the-haystack problem.

Random Walk Performance Estimation

This subsection provides a rough estimation of the overhead introduced by random walk discovery.

Given the following parameters:

  • nn number of total nodes participating in discv5
  • pp percentage of nodes supporting Waku
  • WW the event of having at least one Waku node in a random sample
  • kk the size of a random sample (default = 16)
  • α\alpha the number of parallel queries started
  • bb bits per hop
  • qq the number of queries

A query takes log2bnlog_{2^b}n hops to retrieve a random sample of nodes.

P(W)=1(1p/100)kP(W) = 1 - (1-p/100)^k is the probability of having at least one Waku node in the sample.

P(Wq)=1(1p/100)kqP(W^q) = 1 - (1-p/100)^{kq} is the probability of having at least one Waku node in the union of qq samples.

Expressing this in terms of qq, we can write: +P(Wq)=1(1p/100)kq    q=log(1p/100)k(1P(Wq))P(W^q) = 1 - (1-p/100)^{kq} \iff q = log_{(1-p/100)^k}(1-P(W^q))

Figure 1 shows a log-log plot for P(Wq)=90%P(W^q) = 90\%.

Figure 1: log-log plot showing the number of queries necessary to retrieve a Waku v2 node with a probability of 90% in relation to the Waku v2 node concentration in the network.

Assuming p=0.1p=0.1, we would need

0.9=1(10.1/100)16q=>q1440.9 = 1 - (1-0.1/100)^{16q} => q \approx 144

queries to get a Waku node with 90% probability, which leads to 14418=2592\approx 144 * 18 = 2592 overlay hops. +Choosing b=3b=3 would reduce the number to 1446=864\approx 144 * 6 = 864. +Even when choosing α=10\alpha = 10 we would have to wait at least 80 RTTs. +This effort is just for retrieving a single Waku node. Ideally, we want at least 3 Waku nodes for bootstrapping a Waku relay.

The discv5 doc roughly estimates p=1p=1% to be the threshold for acceptably efficient random walk discovery. +This is in line with our estimation:

0.9=1(11/100)16q=>q140.9 = 1 - (1-1/100)^{16q} => q \approx 14

The number of necessary queries is linearly dependent on the percentage pp of Waku nodes. +The number of hops per query is logarithmically dependent on nn. +Thus, random walk searching is inefficient for small percentages pp. +Still, random walks are more resilient against attacks.

We can conclude that a Waku node concentration below 1% renders vanilla discv5 unfit for our needs. +Our current solution and future plans for solving this issue are covered in the next subsections.

Simple Solution: Separate Discovery Network

The simple solution we currently use for Waku v2 discv5 is a separate discv5 network. +All (well behaving) nodes in this network support Waku v2, resulting in a very high query efficiency. +However, this solution reduces resilience because the difficulty of attacking a DHT scales with the number of participating nodes.

Discv5 Topic Discovery

We did not base our solution on the current version of discv5 topic discovery, +because, similar to random walk discovery, it suffers from poor performance for relatively rare capabilities/topics.

However, there is ongoing research in discv5 topic discovery which is close to ideas we explored when pondering efficient and resilient Waku discv5 solutions. +We keep a close eye on this research, give feedback, and make suggestions, as we plan to switch to this version of topic discovery in the future.

In a nutshell, topic discovery will manage separate routing tables for each topic. +These topic specific tables are initialized with nodes from the discv5 routing table. +While the buckets of the discv5 routing table represent distance intervals from the node's node ID, the topic table buckets represent distance intervals from topic IDs.

Nodes that want to register a topic try to register that topic at one random peer per bucket. +This leads to registering the topic at peers in closer and closer neighbourhoods around the topic ID, which +yields a very efficient and resilient compromise between random walk discovery and DHT discovery. +Peers in larger neighbourhoods around the topic ID are less efficient to discover, however more resilient against eclipse attacks and vice versa.

Further, this works well with the overload and DoS protection discv5 employs. +Discv5 limits the amount of nodes registered per topic on a single peer. Further, discv5 enforces a waiting time before nodes can register topics at peers. +So, for popular topics, a node might fail to register the topic in a close neighbourhood. +However, because the topic is popular (has a high occurrence percentage pp), it can still be efficiently discovered.

In the future, we also plan to integrate Waku v2 capability discovery, which will not only allow asking for nodes that support Waku v2, +but asking for Waku v2 nodes supporting specific Waku v2 protocols like filter or store. +For the store protocol we envision sub-capabilities reflecting message topics and time frames of messages. +We will also investigate related security implications.

Attacks on DHTs

In this post, we only briefly describe common attacks on DHTs. +These attacks are mainly used for denial of service (DoS), +but can also used as parts of more sophisticated attacks, e.g. deanonymization attacks. +A future post on this research log will cover security aspects of ambient peer discovery with a focus on privacy and anonymity.

Sybil Attack

The power of an attacker in a DHT is proportional to the number of controlled nodes. +Controlling nodes comes at a high resource cost and/or requires controlling a botnet via a preliminary attack.

In a Sybil attack, an attacker generates lots of virtual node identities. +This allows the attacker to control a large portion of the ID space in a DHT at a relatively low cost. +Sybil attacks are especially powerful when the attacker can freely choose the IDs of generated nodes, +because this allows positioning at chosen points in the DHT.

Because Sybil attacks amplify the power of many attacks against DHTs, +making Sybil attacks as difficult as possible is the basis for resilient DHT operation. +The typical abstract mitigation approach is binding node identities to physical network interfaces. +To some extend, this can be achieved by introducing IP address based limits. +Further, generating node IDs can be bound by proof of work (PoW), +which, however, comes with a set of shortcomings, e.g. relatively high costs on resource restricted devices. +The discv5 doc +describes both Sybil and eclipse attacks, as well as concrete mitigation techniques employed by discv5.

Eclipse Attack

In an eclipse attack, nodes controlled by the attacker poison the routing tables of other nodes in a way that parts of the DHT become eclipsed, i.e. invisible. +When a controlled node is asked for the next step in a path, +it provides another controlled node as the next step, +effectively navigating the querying node around or away from certain areas of the DHT. +While several mitigation techniques have been researched, there is no definitive protection against eclipse attacks available as of yet. +One mitigation technique is increasing α\alpha, the number of parallel queries, and following each concurrent path independently for the lookup.

The eclipse attack becomes very powerful in combination with a successful Sybil attack; +especially when the attacker can freely choose the position of the Sybil nodes.

The aforementioned new topic discovery of discv5 provides a good balance between protection against eclipse attacks and query performance.

Peer Exchange Protocol

While discv5 based ambient peer discovery has many desirable properties, resource restricted nodes and nodes behind restrictive NAT setups cannot run discv5 satisfactory. +With these nodes in mind, we started working on a simple peer exchange protocol based on ideas proposed here. +The peer exchange protocol will allow nodes to ask peers for additional peers. +Similar to discv5, the peer exchange protocol will also support capability discovery.

The new peer exchange protocol can be seen as a simple replacement for the Rendezvous protocol, which Waku v2 does not support. +While the rendezvous protocol involves nodes registering at rendezvous peers, the peer exchange protocol simply allows nodes to ask any peer for a list of peers (with a certain set of capabilities). +Rendezvous tends to introduce centralized elements as rendezvous peers have a super-peer role.

In the future, we will investigate resource usage of Waku v2 discv5 and provide suggestions for minimal resources nodes should have to run discv5 satisfactory.

Waku v2 comprises further protocols related to ambient peer discovery. We shortly mention them for context, even though they are not strictly ambient peer discovery protocols.

Gossipsub Peer Exchange Protocol

Gossipsub provides an integrated peer exchange mechanism which is also supported by Waku v2. +Gossipsub peer exchange works in a push manner. Nodes send peer lists to peers they prune from the active mesh. +This pruning is part of the gossipsub peer management, blurring the boundaries of peer management and ambient peer discovery.

We will investigate anonymity implications of this protocol and might disable it in favour of more anonymity-preserving protocols. +Sending a list of peers discloses information about the sending node. +We consider restricting these peer lists to cached peers that are currently not used in the active gossipsub mesh.

Capability Negotiation

Some of the ambient peer discovery methods used by Waku2 will support capability discovery. +This allows to narrow down the set of retrieved peers to peers that support specific capabilities. +This is efficient because it avoids establishing connections to nodes that we are not interested in.

However, the ambient discovery interface does not require capability discovery, which will lead to nodes having peers with unknown capabilities in their peer lists. +We work on a capability negotiation protocol which allows nodes to ask peers

  • for their complete list of capabilities, and
  • whether they support a specific capability

We will investigate security implications, especially when sending full capability lists.

NAT traversal

For NAT traversal, Waku v2 currently supports the port mapping protocols UPnP and NAT-PMP / PCP.

In the future, we plan to add support for parts of ICE, e.g. STUN. +We do not plan to support TURN because TURN relays would introduce a centralized element. +A modified decentralized version of TURN featuring incentivization might be an option in the future; +strong peers could offer a relay service similar to TURN.

There are plans to integrate more NAT traversal into discv5, in which we might participate. +So far, the only traversal technique supported by discv5 is nodes receiving their external IP address in pong messages.

While NAT traversal is very important, adding more NAT traversal techniques is not a priority at the moment. +Nodes behind restrictive symmetric NAT setups cannot be discovered, but they can still discover peers in less restrictive setups. +While we wish to have as many nodes as possible to be discoverable via ambient peer discovery, two nodes behind a restrictive symmetric NAT can still exchange Waku v2 messages if they discovered a shared peer. +This is one of the nice resilience related properties of flooding based routing algorithms.

For mobile nodes, which suffer from changing IP addresses and double NAT setups, we plan using the peer exchange protocol to ask peers for more peers. +Besides saving resources on resource restricted devices, this approach works as long as peers are in less restrictive environments.

Conclusion and Future Prospects

Ambient peer discovery is an integral part of decentralized applications. It allows nodes to learn about peers in the network. +As of yet, Waku v2 supports DNS-based discovery and a slightly modified version of discv5. +We are working on further protocols, including a peer exchange protocol that allows resource restricted nodes to ask stronger peers for peer lists. +Further, we are working on adding capability discovery to our ambient discovery protocols, allowing nodes to find peers with desired properties.

These protocols can be combined in a modular way and allow Waku v2 nodes to build a strong and resilient mesh network, +even if some discovery methods are not available in a given situation.

We will investigate security properties of these discovery mechanisms with a focus on privacy and anonymity in a future post on this research log. +As an outlook we can already state that DHT approaches typically allow inferring information about the querying node. +Further, sending peer lists allows inferring the position of a node within the mesh, and by extension information about the node. +Waku v2 already provides some mitigation, because the mesh for transmitting actual messages, and the peer discovery network are separate. +To mitigate information leakage by transmitting peer lists, we plan to only reply with lists of peers that nodes do not use in their active meshes.


References

+ + + + \ No newline at end of file diff --git a/rlog/wakuv2-noise/index.html b/rlog/wakuv2-noise/index.html new file mode 100644 index 00000000..fd5c0c33 --- /dev/null +++ b/rlog/wakuv2-noise/index.html @@ -0,0 +1,28 @@ + + + + + +Noise handshakes as key-exchange mechanism for Waku | Vac Research + + + + + + + + + + +
+

Noise handshakes as key-exchange mechanism for Waku

by
22 min read

We provide an overview of the Noise Protocol Framework as a tool to design efficient and secure key-exchange mechanisms in Waku2.

Introduction

In this post we will provide an overview of how Waku v2 users can adopt Noise handshakes to agree on cryptographic keys used to securely encrypt messages.

This process belongs to the class of key-exchange mechanisms, consisting of all those protocols that, with different levels of complexity and security guarantees, allow two parties to publicly agree on a secret without letting anyone else know what this secret is.

But why do we need key-exchange mechanisms in the first place?

With the advent of public-key cryptography, it become possible to decouple encryption from decryption through use of two distinct cryptographic keys: one public, used to encrypt information and that can be made available to anyone, and one private (kept secret), which enables decryption of messages encrypted with its corresponding public key. The same does not happen in the case of symmetric encryption schemes where, instead, the same key is used for both encryption and decryption operations and hence cannot be publicly revealed as for public keys.

In order to address specific application needs, many different public, symmetric and hybrid cryptographic schemes were designed: Waku v1 and Waku v2, which inherits part of their design from the Ethereum messaging protocol Whisper, provide support to both public-key primitives (ECIES, ECDSA) and symmetric primitives (AES-256-GCM, KECCAK-256), used to sign, hash, encrypt and decrypt exchanged messages.

In principle, when communications employ public-key based encryption schemes (ECIES, in the case of Waku), there is no need for a key-agreement among parties: messages can be directly encrypted using the recipient's public-key before being sent over the network. However, public-key encryption and decryption primitives are usually very inefficient in processing large amount of data, and this may constitute a bottleneck for many of today's applications. Symmetric encryption schemes such as AES-256-GCM, on the other hand, are much more efficient, but the encryption/decryption key needs to be shared among users beforehand any encrypted messages is exchanged.

To counter the downsides given by each of these two approaches while taking advantage of their strengths, hybrid constructions were designed. In these, public-key primitives are employed to securely agree on a secret key which, in turn, is used with a symmetric cipher for encrypting messages. In other words, such constructions specify a (public-key based) key-agreement mechanism!

Waku, up to payload version 1, does not implement nor recommend any protocol for exchanging symmetric ciphers' keys, leaving such task to the application layer. It is important to note that the kind of key-agreement employed has a direct impact on the security properties that can be granted on later encrypted messages, while security requirements usually depend on the specific application for which encryption is needed in the first place.

In this regard, Status, which builds on top of Waku, implements a custom version of the X3DH key-agreement protocol, in order to allow users to instantiate end-to-end encrypted communication channels. However, although such a solution is optimal when applied to (distributed) E2E encrypted chats, it is not flexible enough to fit or simplify the variety of applications Waku aims to address. +Hence, proposing and implementing one or few key-agreements which provide certain (presumably strong) security guarantees, would inevitably degrade performances of all those applications for which, given their security requirements, more tailored and efficient key-exchange mechanisms can be employed.

Guided by different examples, in the following sections we will overview Noise, a protocol framework we are currently integrating in Waku, for building secure key-agreements between two parties. One of the great advantage of using Noise is that it is possible to add support to new key-exchanges by just specifying users' actions from a predefined list, requiring none to minimal modifications to existing implementations. Furthermore, Noise provides a framework to systematically analyze protocols' security properties and the corresponding attacker threat models. This allows not only to easily design new key-agreements eventually optimized for specific applications we want to address, but also to easily analyze or even formally verify any of such custom protocol!

We believe that with its enormous flexibility and features, Noise represents a perfect candidate for bringing key-exchange mechanisms in Waku.

The Diffie-Hellman Key-exchange

The formalization of modern public-key cryptography started with the pioneering work of Whitefield Diffie and Martin Hellman, who detailed one of the earliest known key-agreement protocols: the famous Diffie-Hellman Key-Exchange.

Diffie-Hellman (DH) key-exchange is largely used today and represents the main cryptographic building block on which Noise handshakes' security is based.

In turn, the security of DH is based on a mathematical problem called discrete logarithm which is believed to be hard when the agreement is practically instantiated using certain elliptic curves EE defined over finite fields Fp\mathbb{F}_p.

Informally, a DH exchange between Alice and Bob proceeds as follows:

  • Alice picks a secret scalar sAFps_A\in\mathbb{F}_p and computes, using the underlying curve's arithmetic, the point PA=sAPE(Fp)P_A = s_A\cdot P\in E(\mathbb{F}_p) for a certain pre-agreed public generator PP of the elliptic curve E(Fp)E(\mathbb{F}_p). She then sends PAP_A to Bob.
  • Similarly, Bob picks a secret scalar sBFps_B\in\mathbb{F}_p, computes PB=sBPE(Fp)P_B = s_B\cdot P\in E(\mathbb{F}_p) and sends PBP_B to Alice.
  • By commutativity of scalar multiplication, both Alice and Bob can now compute the point PAB=sAsBPP_{AB} = s_As_B\cdot P, using the elliptic curve point received from the other party and their secret scalar.

The assumed hardness of computing discrete logarithms in the elliptic curve, ensures that it is not possible to compute sAs_A or sBs_B from PAP_A and PBP_B, respectively. Another security assumption (named Computational Diffie-Hellman assumption) ensures that it is not possible to compute PABP_{AB} from PP, PAP_A and PBP_B. Hence the point PABP_{AB} shared by Alice and Bob at the end of the above protocol cannot be efficiently computed by an attacker intercepting PAP_A and PBP_B, and can then be used to generate a secret to be later employed, for example, as a symmetric encryption key.

On a side note, this protocol shows the interplay between two components typical to public-key based schemes: the scalars sAs_A and sBs_B can be seen as private keys associated to the public keys PAP_A and PBP_B, respectively, which allow Alice and Bob only to compute the shared secret point PABP_{AB}.

Ephemeral and Static Public Keys

Although we assumed that it is practically impossible for an attacker to compute the randomly picked secret scalar from the corresponding public elliptic curve point, it may happen that such scalar gets compromised or can be guessed due to a faulty employed random number generator. In such cases, an attacker will be able to recover the final shared secret and all encryption keys eventually derived from that, with clear catastrophic consequences for the privacy of exchanged messages.

To mitigate such issues, multiple DH operations can be combined using two different types of exchanged elliptic curve points or, better, public keys: ephemeral keys, that is random keys used only once in a DH operation, and long-term static keys, used mainly for authentication purposes since employed multiple times.

Just to provide an example, let us suppose Alice and Bob perform the following custom DH-based key-exchange protocol:

  • Alice generates an ephemeral key EA=eAPE_A=e_A\cdot P by picking a random scalar eAe_A and sends EAE_A to Bob;
  • Similarly, Bob generates an ephemeral key EB=eBPE_B=e_B\cdot P and sends EBE_B to Alice;
  • Alice and Bob computes EAB=eAeBPE_{AB} = e_Ae_B \cdot P and from it derive a secret encryption key kk.
  • Bob sends to Alice his static key SB=sBPS_B = s_B\cdot P encrypted with kk.
  • Alice encrypts with kk her static key SA=sAPS_A = s_A\cdot P and sends it to Bob.
  • Alice and Bob decrypt the received static keys, compute the secret SAB=sAsBPS_{AB} = s_As_B \cdot P and use it together with EABE_{AB} to derive a new encryption key k~\tilde{k} to be later used with a symmetric cipher.

In this protocol, if Alice's and/or Bob's static keys get compromised, it would not possible to derive the final secret key k~\tilde{k}, since at least one ephemeral key among EAE_A and EBE_B has to be compromised too in order to recover the secret EABE_{AB}. Furthermore, since Alice's and Bob's long-term static keys are encrypted, an attacker intercepting exchanged (encrypted) public keys will not be able to link such communication to Alice or Bob, unless one of the ephemeral key is compromised (and, even in such case, none of the messages encrypted under the key k~\tilde{k} can be decrypted).

The Noise Protocol Framework

In previous section we gave a small intuition on how multiple DH operations over ephemeral and static users' public keys can be combined to create different key-exchange protocols.

The Noise Protocol Framework, defines various rules for building custom key-exchange protocols while allowing easy analysis of the security properties and threat models provided given the type and order of the DH operations employed.

In Noise terminology, a key-agreement or Noise protocol consists of one or more Noise handshakes. During a Noise handshake, Alice and Bob exchange multiple (handshake) messages containing their ephemeral keys and/or static keys. These public keys are then used to perform a handshake-dependent sequence of Diffie-Hellman operations, whose results are all hashed into a shared secret key. Similarly as we have seen above, after a handshake is complete, each party will use the derived secret key to send and receive authenticated encrypted data by employing a symmetric cipher.

Depending on the handshake pattern adopted, different security guarantees can be provided on messages encrypted using a handshake-derived key.

The Noise handshakes we support in Waku all provide the following security properties:

  • Confidentiality: the adversary should not be able to learn what data is being sent between Alice and Bob.
  • Strong forward secrecy: an active adversary cannot decrypt messages nor infer any information on the employed encryption key, even in the case he has access to Alice's and Bob's long-term private keys (during or after their communication).
  • Authenticity: the adversary should not be able to cause either Alice or Bob to accept messages coming from a party different than their original senders.
  • Integrity: the adversary should not be able to cause Alice or Bob to accept data that has been tampered with.
  • Identity-hiding: once a secure communication channel is established, a passive adversary should not be able to link exchanged encrypted messages to their corresponding sender and recipient by knowing their long-term static keys.

We refer to Noise specification for more formal security definitions and precise threat models relative to Waku supported Noise Handshake patterns.

Message patterns

Noise handshakes involving DH operations over ephemeral and static keys can be succinctly sketched using the following set of handshake message tokens: e,s,ee,se,es,ss.

Tokens employing single letters denote (the type of) users' public keys: e refers to randomly generated ephemeral key(s), while s indicates the users' long-term static key(s).

Two letters tokens, instead, denotes DH operations over the two users' public keys the token refers to, given that the left token letter refers to the handshake initiator's public key, while the right token letter indicates the used responder's public key. Thus, if Alice started a handshake with Bob, the es token will shortly represent a DH operation among Alice's ephemeral key e and Bob's static key s.

Since, in order to perform any DH operations users need to share (or pre-share) the corresponding public keys, Noise compactly represents messages' exchanges using the two direction -> and <-, where the -> denotes a message (arbitrary and/or DH public key) from the initiator to the responder, while <- the opposite.

Hence a message pattern consisting of a direction and one or multiple tokens such as <- e, s, es has to be interpreted one token at a time: in this example, the responder is sending his ephemeral and static key to the initiator and is then executing a DH operation over the initiator's ephemeral key e (shared in a previously exchanged message pattern) and his static key s. On the other hand, such message indicates also that the initiator received the responder's ephemeral and static keys e and s, respectively, and performed a DH operation over his ephemeral key and the responder's just received static key s. In this way, both parties will be able to derive at the end of each message pattern processed the same shared secret, which is eventually used to update any derived symmetric encryption keys computed so far.

In some cases, DH public keys employed in a handshake are pre-shared before the handshake itself starts. In order to chronologically separate exchanged keys and DH operations performed before and during a handshake, Noise employs the ... delimiter.

For example, the following message patterns

<- e
...
-> e, ee

indicates that the initiator knew the responder's ephemeral key before he sends his own ephemeral key and executes a DH operation between both parties ephemeral keys (similarly, the responder receives the initiator's ephemeral key and does a ee DH operation).

At this point it should be clear how such notation is able to compactly represent a large variety of DH based key-agreements. Nevertheless, we can easily define additional tokens and processing rules in order to address specific applications and security requirements, such as the psk token used to process arbitrary pre-shared key material.

As an example of Noise flexibility, the custom protocol we detailed above can be shortly represented as (Alice is on the left):

-> e
<- e, ee, s
-> s, ss

where after each DH operation an encryption key is derived (along with the secrets computed by all previously executed DH operations) in order to encrypt/decrypt any subsequent sent/received message.

Another example is given by the possibility to replicate within Noise the well established Signal's X3DH key-agreement protocols, thus making the latter a general framework to design and study security of many practical and widespread DH-based key-exchange protocols.

The Noise State Objects

We mentioned multiple times that parties derive an encryption key each time they perform a DH operation, but how does this work in more details?

Noise defines three state object: a Handshake State, a Symmetric State and a Cipher State, each encapsulated into each other and instantiated during the execution of a handshake.

The Handshake State object stores the user's and other party's received ephemeral and static keys (if any) and embeds a Symmetric State object.

The Symmetric State, instead, stores a handshake hash value h, iteratively updated with any message read/received and DH secret computed, and a chaining key ck, updated using a key derivation function every time a DH secret is computed. This object further embeds a Cipher State.

Lastly, the Cipher State stores a symmetric encryption k key and a counter n used to encrypt and decrypt messages exchanged during the handshake (not only static keys, but also arbitrary payloads). These key and counter are refreshed every time the chaining key is updated.

While processing each handshake's message pattern token, all these objects are updated according to some specific processing rules which employ a combination of public-key primitives, hash and key-derivation functions and symmetric ciphers. It is important to note, however, that at the end of each processed message pattern, the two users will share the same Symmetric and Cipher State embedded in their respective Handshake States.

Once a handshake is complete, users derive two new Cipher States and can then discard the Handshake State object (and, thus, the embedded Symmetric State and Cipher State objects) +employed during the handshake.

These two Cipher states are used to encrypt and decrypt all outbound and inbound after-handshake messages, respectively, and only to these will be granted the confidentiality, authenticity, integrity and identity-hiding properties we detailed above.

For more details on processing rules, we refer to Noise specifications.

Supported Noise Handshakes in Waku

The Noise handshakes we provided support to in Waku address four typical scenarios occurring when an encrypted communication channel between Alice and Bob is going to be created:

  • Alice and Bob know each others' static key.
  • Alice knows Bob's static key;
  • Alice and Bob share no key material and they don't know each others' static key.
  • Alice and Bob share some key material, but they don't know each others' static key.

The possibility to have handshakes based on the reciprocal knowledge parties have of each other, allows designing Noise handshakes that can quickly reach the desired level of security on exchanged encrypted messages while keeping the number of interactions between Alice and Bob minimum.

Nonetheless, due to the pure token-based nature of handshake processing rules, implementations can easily add support to any custom handshake pattern with minor modifications, in case more specific application use-cases need to be addressed.

On a side note, we already mentioned that identity-hiding properties can be guaranteed against a passive attacker that only reads the communication occurring between Alice and Bob. However, an active attacker who compromised one party's static key and actively interferes with the parties' exchanged messages, may lower the identity-hiding security guarantees provided by some handshake patterns. In our security model we exclude such adversary, but, for completeness, in the following we report a summary of possible de-anonymization attacks that can be performed by such an active attacker.

For more details on supported handshakes and on how these are implemented in Waku, we refer to 35/WAKU2-NOISE RFC.

The K1K1 Handshake

If Alice and Bob know each others' static key (e.g., these are public or were already exchanged in a previous handshake) , they MAY execute a K1K1 handshake. In Noise notation (Alice is on the left) this can be sketched as:

 K1K1:
-> s
<- s
...
-> e
<- e, ee, es
-> se

We note that here only ephemeral keys are exchanged. This handshake is useful in case Alice needs to instantiate a new separate encrypted communication channel with Bob, e.g. opening multiple parallel connections, file transfers, etc.

Security considerations on identity-hiding (active attacker): no static key is transmitted, but an active attacker impersonating Alice can check candidates for Bob's static key.

The XK1 Handshake

Here, Alice knows how to initiate a communication with Bob and she knows his public static key: such discovery can be achieved, for example, through a publicly accessible register of users' static keys, smart contracts, or through a previous public/private advertisement of Bob's static key.

A Noise handshake pattern that suits this scenario is XK1:

 XK1:
<- s
...
-> e
<- e, ee, es
-> s, se

Within this handshake, Alice and Bob reciprocally authenticate their static keys s using ephemeral keys e. We note that while Bob's static key is assumed to be known to Alice (and hence is not transmitted), Alice's static key is sent to Bob encrypted with a key derived from both parties ephemeral keys and Bob's static key.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party. An active attacker initiating the handshake can check candidates for Bob's static key against recorded/accepted exchanged handshake messages.

The XX and XXpsk0 Handshakes

If Alice is not aware of any static key belonging to Bob (and neither Bob knows anything about Alice), she can execute an XX handshake, where each party tranXmits to the other its own static key.

The handshake goes as follows:

 XX:
-> e
<- e, ee, s, es
-> s, se

We note that the main difference with XK1 is that in second step Bob sends to Alice his own static key encrypted with a key obtained from an ephemeral-ephemeral Diffie-Hellman exchange.

This handshake can be slightly changed in case both Alice and Bob pre-shares some secret psk which can be used to strengthen their mutual authentication during the handshake execution. One of the resulting protocol, called XXpsk0, goes as follow:

 XXpsk0:
-> psk, e
<- e, ee, s, es
-> s, se

The main difference with XX is that Alice's and Bob's static keys, when transmitted, would be encrypted with a key derived from psk as well.

Security considerations on identity-hiding (active attacker): Alice's static key is encrypted with forward secrecy to an authenticated party for both XX and XXpsk0 handshakes. In XX, Bob's static key is encrypted with forward secrecy but is transmitted to a non-authenticated user which can then be an active attacker. In XXpsk0, instead, Bob's secret key is protected by forward secrecy to a partially authenticated party (through the pre-shared secret psk but not through any static key), provided that psk was not previously compromised (in such case identity-hiding properties provided by the XX handshake applies).

Session Management and Multi-Device Support

When two users complete a Noise handshake, an encryption/decryption session - or Noise session - consisting of two Cipher States is instantiated.

By identifying Noise session with a session-id derived from the handshake's cryptographic material, we can take advantage of the PubSub/GossipSub protocols used by Waku for relaying messages in order to manage instantiated Noise sessions.

The core idea is to exchange after-handshake messages (encrypted with a Cipher State specific to the Noise session), over a content topic derived from the (secret) session-id the corresponding session refers to.

This allows to decouple the handshaking phase from the actual encrypted communication, thus improving users' identity-hiding capabilities.

Furthermore, by publicly revealing a value derived from session-id on the corresponding session content topic, a Noise session can be marked as stale, enabling peers to save resources by discarding any eventually stored message sent to such content topic.

One relevant aspect in today's applications is the possibility for users to employ different devices in their communications. In some cases, this is non-trivial to achieve since, for example, encrypted messages might be required to be synced on different devices which do not necessarily share the necessary key material for decryption and may be temporarily offline.

We address this by requiring each user's device to instantiate multiple Noise sessions either with all user's other devices which, in turn, all together share a Noise session with the other party, or by directly instantiating a Noise session with all other party's devices.

We named these two approaches N11MN11M and NMNM, respectively, which are in turn loosely based on the paper “Multi-Device for Signal” and Signal’s Sesame Algorithm.

Informally, in the N11MN11M session management scheme, once the first Noise session between any of Alice’s and Bob’s device is instantiated, its session information is securely propagated to all other devices using previously instantiated Noise sessions. Hence, all devices are able to send and receive new messages on the content topic associated to such session.

In the NMNM session management scheme, instead, all pairs of Alice's and Bob's devices have a distinct Noise session: a message is then sent from the currently-in-use sender’s device to all recipient’s devices, by properly encrypting and sending it to the content topics of each corresponding Noise session. If sent messages should be available on all sender’s devices as well, we require each pair of sender’s devices to instantiate a Noise session used for syncing purposes.

For more technical details on how Noise sessions are instantiated and managed within these two mechanisms and the different trade-offs provided by the latter, we refer to 37/WAKU2-NOISE-SESSIONS.

Conclusions

In this post we provided an overview of Noise, a protocol framework for designing Diffie-Hellman based key-exchange mechanisms allowing systematic security and threat model analysis.

The flexibility provided by Noise components allows not only to fully replicate with same security guarantees well established key-exchange primitives such as X3DH, currently employed by Status 5/TRANSPORT-SECURITY, but enables also optimizations based on the reciprocal knowledge parties have of each other while allowing easier protocols' security analysis and (formal) verification.

Furthermore, different handshakes can be combined and executed one after each other, a particularly useful feature to authenticate multiple static keys employed by different applications but also to ease keys revocation.

The possibility to manage Noise sessions over multiple devices and the fact that handshakes can be concretely instantiated using modern, fast and secure cryptographic primitives such as ChaChaPoly and BLAKE2b, make Noise one of the best candidates for efficiently and securely address the many different needs of applications built on top of Waku requiring key-agreement.

Future steps

The available implementation of Noise in nwaku, although mostly complete, is still in its testing phase. As future steps we would like to:

  • have an extensively tested and robust Noise implementation;
  • formalize, implement and test performances of the two proposed N11MN11M and NMNM session management mechanisms and their suitability for common use-case scenarios;
  • provide Waku network nodes a native protocol to readily support key-exchanges, strongly-encrypted communication and multi-device session management mechanisms with none-to-little interaction besides applications' connection requests.

References

+ + + + \ No newline at end of file diff --git a/rlog/wakuv2-relay-anon/index.html b/rlog/wakuv2-relay-anon/index.html new file mode 100644 index 00000000..36598d36 --- /dev/null +++ b/rlog/wakuv2-relay-anon/index.html @@ -0,0 +1,152 @@ + + + + + +Waku Privacy and Anonymity Analysis Part I: Definitions and Waku Relay | Vac Research + + + + + + + + + + +
+

Waku Privacy and Anonymity Analysis Part I: Definitions and Waku Relay

by
17 min read

Introducing a basic threat model and privacy/anonymity analysis for the Waku v2 relay protocol.

Waku v2 enables secure, privacy preserving communication using a set of modular P2P protocols. +Waku v2 also aims at protecting the user's anonymity. +This post is the first in a series about Waku v2 security, privacy, and anonymity. +The goal is to eventually have a full privacy and anonymity analysis for each of the Waku v2 protocols, as well as covering the interactions of various Waku v2 protocols. +This provides transparency with respect to Waku's current privacy and anonymity guarantees, and also identifies weak points that we have to address.

In this post, we first give an informal description of security, privacy and anonymity in the context of Waku v2. +For each definition, we summarize Waku's current guarantees regarding the respective property. +We also provide attacker models, an attack-based threat model, and a first anonymity analysis of Waku v2 relay within the respective models.

Waku comprises many protocols that can be combined in a modular way. +For our privacy and anonymity analysis, we start with the relay protocol because it is at the core of Waku v2 enabling Waku's publish subscribe approach to P2P messaging. +In its current form, Waku relay is a minor extension of libp2p GossipSub.

Figure 1: The Waku v2 relay mesh is based on the [GossipSub mesh](https://docs.libp2p.io/concepts/publish-subscribe#types-of-peering)

Informal Definitions: Security, Privacy, and Anonymity

The concepts of security, privacy, and anonymity are linked and have quite a bit of overlap.

Security

Of the three, Security has the clearest agreed upon definition, +at least regarding its key concepts: confidentiality, integrity, and availability.

  • confidentiality: data is not disclosed to unauthorized entities.
  • integrity: data is not modified by unauthorized entities.
  • availability: data is available, i.e. accessible by authorized entities.

While these are the key concepts, the definition of information security has been extended over time including further concepts, +e.g. authentication and non-repudiation. +We might cover these in future posts.

Privacy

Privacy allows users to choose which data and information

  • they want to share
  • and with whom they want to share it.

This includes data and information that is associated with and/or generated by users. +Protected data also comprises metadata that might be generated without users being aware of it. +This means, no further information about the sender or the message is leaked. +Metadata that is protected as part of the privacy-preserving property does not cover protecting the identities of sender and receiver. +Identities are protected by the anonymity property.

Often privacy is realized by the confidentiality property of security. +This neither makes privacy and security the same, nor the one a sub category of the other. +While security is abstract itself (its properties can be realized in various ways), privacy lives on a more abstract level using security properties. +Privacy typically does not use integrity and availability. +An adversary who has no access to the private data, because the message has been encrypted, could still alter the message.

Waku offers confidentiality via secure channels set up with the help of the Noise Protocol Framework. +Using these secure channels, message content is only disclosed to the intended receivers. +They also provide good metadata protection properties. +However, we do not have a metadata protection analysis as of yet, +which is part of our privacy/anonymity roadmap.

Anonymity

Privacy and anonymity are closely linked. +Both the identity of a user and data that allows inferring a user's identity should be part of the privacy policy. +For the purpose of analysis, we want to have a clearer separation between these concepts.

We define anonymity as unlinkablity of users' identities and their shared data and/or actions.

We subdivide anonymity into receiver anonymity and sender anonymity.

Receiver Anonymity

We define receiver anonymity as unlinkability of users' identities and the data they receive and/or related actions. +The data transmitted via Waku relay must be a Waku message, which contains a content topic field. +Because each message is associated with a content topic, and each receiver is interested in messages with specific content topics, +receiver anonymity in the context of Waku corresponds to subscriber-topic unlinkability. +An example for the "action" part of our receiver anonymity definition is subscribing to a specific topic.

The Waku message's content topic is not related to the libp2p pubsub topic. +For now, Waku uses a single libp2p pubsub topic, which means messages are propagated via a single mesh of peers. +With this, the receiver discloses its participation in Waku on the gossipsub layer. +We will leave the analysis of libp2p gossipsub to a future article within this series, and only provide a few hints and pointers here.

Waku offers k-anonymity regarding content topic interest in the global adversary model. +K-anonymity in the context of Waku means an attacker can link receivers to content topics with a maximum certainty of 1/k1/k. +The larger kk, the less certainty the attacker gains. +Receivers basically hide in a pool of kk content topics, any subset of which could be topics they subscribed to. +The attacker does not know which of those the receiver actually subscribed to, +and the receiver enjoys plausible deniability regarding content topic subscription. +Assuming there are nn Waku content topics, a receiver has nn-anonymity with respect to association to a specific content topic.

Technically, Waku allows distributing messages over several libp2p pubsub topics. +This yields kk-anonymity, assuming kk content topics share the same pubsub topic. +However, if done wrongly, such sharding of pubsub topics can breach anonymity. +A formal specification of anonymity-preserving topic sharding building on the concepts of partitioned topics is part of our roadmap.

Also, Waku is not directly concerned with 1:1 communication, so for this post, 1:1 communication is out of scope. +Channels for 1:1 communication can be implemented on top of Waku relay. +In the future, a 1:1 communication protocol might be added to Waku. +Similar to topic sharding, it would maintain receiver anonymity leveraging partitioned topics.

Sender Anonymity

We define sender anonymity as unlinkability of users' identities and the data they send and/or related actions. +Because the data in the context of Waku is Waku messages, sender anonymity corresponds to sender-message unlinkability.

In summary, Waku offers weak sender anonymity because of Waku's strict no sign policy, +which has its origins in the Ethereum consensus specs. +17/WAKU-RLN-RELAY and 18/WAKU2-SWAP mitigate replay and injection attacks.

Waku currently does not offer sender anonymity in stronger attacker models, as well as cannot protect against targeted attacks in weaker attacker models like the single or multi node attacker. +We will cover this in more detail in later sections.

Anonymity Trilemma

The Anonymity trilemma states that only two out of strong anonymity, low bandwidth, and low latency can be guaranteed in the global on-net attacker model. +Waku's goal, being a modular set of protocols, is to offer any combination of two out of these three properties, as well as blends. +An example for blending is an adjustable number of pubsub topics and peers in the respective pubsub topic mesh; this allows tuning the trade-off between anonymity and bandwidth.

Figure 2: Anonymity Trilemma: pick two.

A fourth factor that influences the anonymity trilemma is frequency and patterns of messages. +The more messages there are, and the more randomly distributed they are, the better the anonymity protection offered by a given anonymous communication protocol. +So, incentivising users to use the protocol, for instance by lowering entry barriers, helps protecting the anonymity of all users. +The frequency/patterns factor is also related to the above described k-anonymity.

Censorship Resistance

Another security related property that Waku aims to offer is censorship resistance. +Censorship resistance guarantees that users can participate even if an attacker tries to deny them access. +So, censorship resistance ties into the availability aspect of security. +In the context of Waku that means users should be able to send messages as well as receive all messages they are interested in, +even if an attacker tries to prevent them from disseminating messages or tries to deny them access to messages.

Currently, Waku only guarantees censorship resistance in the weak single node attacker model. +While currently employed secure channels mitigate targeted censorship, e.g. blocking specific content topics, +general censorship resistance in strong attacker models is part of our roadmap. +Among other options, we will investigate Pluggable Transports in future articles.

Attacker Types

The following lists various attacker types with varying degrees of power. +The more power an attacker has, the more difficult it is to gain the respective attacker position.

Each attacker type comes in a passive and an active variant. +While a passive attacker can stay hidden and is not suspicious, +the respective active attacker has more (or at least the same) deanonymization power.

We also distinguish between internal and external attackers.

Internal

With respect to Waku relay, an internal attacker participates in the same pubsub topic as its victims. +Without additional measures on higher layer protocols, access to an internal position is easy to get.

Single Node

This attacker controls a single node. +Because this position corresponds to normal usage of Waku relay, it is trivial to obtain.

Multi Node

This attacker controls several nodes. We assume a smaller static number of controlled nodes. +The multi node position can be achieved relatively easily by setting up multiple nodes. +Botnets might be leveraged to increase the number of available hosts. +Multi node attackers could use Sybil attacks to increase the number of controlled nodes. +A countermeasure is for nodes to only accept libp2p gossipsub graft requests from peers with different IP addresses, or even different subnets.

Linearly Scaling Nodes

This attacker controls a number of nodes that scales linearly with the number of nodes in the network. +This attacker is especially interesting to investigate in the context of DHT security, +which Waku uses for ambient peer discovery.

External

An external attacker can only see encrypted traffic (protected by a secure channel set up with Noise). +Because an internal position can be easily obtained, +in practice external attackers would mount combined attacks that leverage both internal an external attacks. +We cover this more below when describing attacks.

Local

A local attacker has access to communication links in a local network segment. +This could be a rogue access point (with routing capability).

AS

An AS attacker controls a single AS (autonomous system). +A passive AS attacker can listen to traffic on arbitrary links within the AS. +An active AS attacker can drop, inject, and alter traffic on arbitrary links within the AS.

In practice, a malicious ISP would be considered as an AS attacker. +A malicious ISP could also easily setup a set of nodes at specific points in the network, +gaining internal attack power similar to a strong multi node attacker.

Global On-Net

A global on-net attacker has complete overview over the whole network. +A passive global attacker can listen to traffic on all links, +while the active global attacker basically carries the traffic: it can freely drop, inject, and alter traffic at all positions in the network. +This basically corresponds to the Dolev-Yao model.

An entity with this power would, in practice, also have the power of the internal linearly scaling nodes attacker.

Attack-based Threat Analysis

The following lists various attacks including the weakest attacker model in which the attack can be successfully performed. +The respective attack can be performed in all stronger attacker models as well.

An attack is considered more powerful if it can be successfully performed in a weaker attacker model.

If not stated otherwise, we look at these attacks with respect to their capability to deanonymize the message sender.

Scope

In this post, we introduce a simple tightly scoped threat model for Waku v2 Relay, which will be extended in the course of this article series.

In this first post, we will look at the relay protocol in isolation. +Even though many threats arise from layers Waku relay is based on, and layers that in turn live on top of relay, +we want to first look at relay in isolation because it is at the core of Waku v2. +Addressing and trying to solve all security issues of a complex system at once is an overwhelming task, which is why we focus on the soundness of relay first.

This also goes well with the modular design philosophy of Waku v2, as layers of varying levels of security guarantees can be built on top of relay, all of which can relay on the guarantees that Waku provides. +Instead of looking at a multiplicative explosion of possible interactions, we look at the core in this article, and cover the most relevant combinations in future posts.

Further restricting the scope, we will look at the data field of a relay message as a black box. +In a second article on Waku v2 relay, we will look into the data field, which according to the specification of Waku v2 relay must be a Waku v2 message. +We only consider messages with version field 2, which indicates that the payload has to be encoded using 35/WAKU2-NOISE.

Prerequisite: Get a Specific Position in the Network

Some attacks require the attacker node(s) to be in a specific position in the network. +In most cases, this corresponds to trying to get into the mesh peer list for the desired pubsub topic of the victim node.

In libp2p gossipsub, and by extension Waku v2 relay, nodes can simply send a graft message for the desired topic to the victim node. +If the victim node still has open slots, the attacker gets the desired position. +This only requires the attacker to know the gossipsub multiaddress of the victim node.

A linearly scaling nodes attacker can leverage DHT based discovery systems to boost the probability of malicious nodes being returned, which in turn significantly increases the probability of attacker nodes ending up in the peer lists of victim nodes. +Waku v2 discv5 will employ countermeasures that mitigate the amplifying effect this attacker type can achieve.

Replay Attack

In the scope we defined above, Waku v2 is resilient against replay attacks. +GossipSub nodes, and by extension Waku relay nodes, feature a seen cache, and only relay messages they have not seen before. +Further, replay attacks will be punished by RLN and SWAP.

Neighbourhood Surveillance

This attack can be performed by a single node attacker that is connected to all peers of the victim node vv with respect to a specific topic mesh. +The attacker also has to be connected to vv. +In this position, the attacker will receive messages mvm_v sent by vv both on the direct path from vv, and on indirect paths relayed by peers of vv. +It will also receive messages mxm_x that are not sent by vv. These messages mxm_x are relayed by both vv and the peers of vv. +Messages that are received (significantly) faster from vv than from any other of vv's peers are very likely messages that vv sent, +because for these messages the attacker is one hop closer to the source.

The attacker can (periodically) measure latency between itself and vv, and between itself and the peers of vv to get more accurate estimates for the expected timings. +An AS attacker (and if the topology allows, even a local attacker) could also learn the latency between vv and its well-behaving peers. +An active AS attacker could also increase the latency between vv and its peers to make the timing differences more prominent. +This, however, might lead to vv switching to other peers.

This attack cannot (reliably) distinguish messages mvm_v sent by vv from messages mym_y relayed by peers of vv the attacker is not connected to. +Still, there are hop-count variations that might be leveraged. +Messages mvm_v always have a hop-count of 1 on the path from vv to the attacker, while all other paths are longer. +Messages mym_y might have the same hop-count on the path from vv as well as on other paths.

Controlled Neighbourhood

If a multi node attacker manages to control all peers of the victim node, it can trivially tell which messages originated from vv.

Observing Messages

If Waku relay was not protected with Noise, the AS attacker could simply check for messages leaving vv which have not been relayed to vv. +These are the messages sent by vv. +Waku relay protects against this attack by employing secure channels setup using Noise.

Correlation

Monitoring all traffic (in an AS or globally), allows the attacker to identify traffic correlated with messages originating from vv. +This (alone) does not allow an external attacker to learn which message vv sent, but it allows identifying the respective traffic propagating through the network. +The more traffic in the network, the lower the success rate of this attack.

Combined with just a few nodes controlled by the attacker, the actual message associated with the correlated traffic can eventually be identified.

DoS

An active single node attacker could run a disruption attack by

  • (1) dropping messages that should be relayed
  • (2) flooding neighbours with bogus messages

While (1) has a negative effect on availability, the impact is not significant. +A linearly scaling botnet attacker, however, could significantly disrupt the network with such an attack. +(2) is thwarted by RLN. +Also SWAP helps mitigating DoS attacks.

A local attacker can DoS Waku by dropping all Waku traffic within its controlled network segment. +An AS attacker can DoS Waku within its authority, while a global attacker can DoS the whole network. +A countermeasure are censorship resistance techniques like Pluggable Transports.

Summary and Future Work

Currently, Waku v2 relay offers k-anonymity with respect to receiver anonymity. +This also includes k-anonymity towards legitimate members of the same topic.

Waku v2 relay offers sender anonymity in the single node attacker model with its strict no sign policy. +Currently, Waku v2 does not guarantee sender anonymity in the multi node and stronger attacker models. +However, we are working on modular anonymity-preserving protocols and building blocks as part of our privacy/anonymity roadmap. +The goal is to allow tunable anonymity with respect to trade offs between strong anonymity, low bandwidth, and low latency. +All of these cannot be fully guaranteed as the the anonymity trilemma states. +Some applications have specific requirements, e.g. low latency, which require a compromise on anonymity. +Anonymity-preserving mechanisms we plan to investigate and eventually specify as pluggable anonymity protocols for Waku comprise

  • Dandelion++ for lightweight anonymity;
  • onion routing as a building block adding a low latency anonymization layer;
  • a mix network for providing strong anonymity (on top of onion routing) even in the strongest attacker model at the cost of higher latency.

These pluggable anonymity-preserving protocols will form a sub-set of the Waku v2 protocol set. +As an intermediate step, we might directly employ Tor for onion-routing, and Nym as a mix-net layer.

In future research log posts, we will cover further Waku v2 protocols and identify anonymity problems that will be added to our roadmap. +These protocols comprise

  • 13/WAKU2-STORE, which can violate receiver anonymity as it allows filtering by content topic. +A countermeasure is using the content topic exclusively for local filters.
  • 12/WAKU2-FILTER, which discloses nodes' interest in topics;
  • 19/WAKU2-LIGHTPUSH, which also discloses nodes' interest in topics and links the lightpush client as the sender of a message to the lightpush service node;
  • 21/WAKU2-FTSTORE, which discloses nodes' interest in specific time ranges allowing to infer information like online times.

While these protocols are not necessary for the operation of Waku v2, and can be seen as pluggable features, +we aim to provide alternatives without the cost of lowering the anonymity level.

References

+ + + + \ No newline at end of file diff --git a/rlog/wechat-replacement-need/index.html b/rlog/wechat-replacement-need/index.html new file mode 100644 index 00000000..01f35537 --- /dev/null +++ b/rlog/wechat-replacement-need/index.html @@ -0,0 +1,26 @@ + + + + + +What Would a WeChat Replacement Need? | Vac Research + + + + + + + + + + +
+

What Would a WeChat Replacement Need?

by
26 min read

What would a self-sovereign, private, censorship-resistant and open alternative to WeChat look like?

What would it take to replace WeChat? More specifically, what would a self-sovereign, private, censorship-resistant and open alternative look like? One that allows people to communicate, coordinate and transact freely.

Background

What WeChat provides to the end-user

Let's first look at some of the things that WeChat providers. It is a lot:

  • Messaging: 1:1 and group chat. Text, as well as voice and video. Post gifs. Share location.
  • Group chat: Limited to 500 people; above 100 people people need to verify with a bank account. Also has group video chat and QR code to join a group.
  • Timeline/Moments: Post comments with attachments and have people like/comment on it.
  • Location Discovery: See WeChat users that are nearby.
  • Profile: Nickname and profile picture; can alias people.
  • "Broadcast" messages: Send one message to many contacts, up to 200 people (spam limited).
  • Contacts: Max 5000 contacts (people get around it with multiple accounts and sim cards).
  • App reach: Many diferent web apps, extensions, native apps, etc. Scan QR code to access web app from phone.
  • Selective posting: Decide who can view your posts and who can view your comments on other people's post.
  • Transact: Send money gifts through red envelopes.
  • Transact: Use WeChat pay to transfer money to friends and businesses; linked account with Alipay that is connected to your bank account.
  • Services: Find taxis and get notifications; book flights, train tickets, hotels etc.
  • Mini apps: API for all kinds of apps that allow you to provide services etc.
  • Picture in picture: allowing you to have a video call while using the app.

And much more. Not going to through it all in detail, and there are probably many things I don't know about WeChat since I'm not a heavy user living in mainland China.

How WeChat works - a toy model

This is an overly simplistic model of how WeChat works, but it is sufficient for our purposes. This general design applies to most traditional client-server apps today.

To sign up for account you need a phone number or equivalent. To get access to some features you need to verify your identity further, for example with official ID and/or bank account.

When you signup this creates an entry in the WeChat server, from now on treated as a black box. You authenticate with that box, and thats where you get your messages from. If you go online the app asks that box for messages you have received while you were offline. If you login from a different app your contacts and conversations are synced from that box.

The box gives you an account, it deals with routing to your contacts, it stores messages and attachments and gives access to mini apps that people have uploaded. For transacting money, there is a partnership with a different company that has a different box which talks to your bank account.

This is done in a such a way that they can support a billion users with the features above, no sweat.

Whoever controls that box can sees who you are talking with and what the content of those messages are. There is no end to end encryption. If WeChat/Tencent disagrees with you for some reason they can ban you. This means you can't interact with the box under that name anymore.

What do we want?

We want something that is self-sovereign, private, censorship-resistant and open that allows individuals and groups of people to communicate and transact freely. To explore what this means in more detail, without getting lost in the weeds, we provide the following list of properties. A lot of these are tied together, and some fall out of the other requirements. Some of them stand in slight opposition to each other.

Self-sovereignity identity. Exercises authority within your own sphere. If you aren't harming anyone, you should be able to have an account and communicate with other people.

Pseudonymity, and ideally total anonymity. Not having your identity tied to your real name (e.g. through phone number, bank account, ID, etc). This allows people to act more freely without being overly worried about censorship and coercion in the real world. While total anonymity is even more desirable - especially to break multiple hops to a true-name action - real-world constraints sometimes makes this more challenging.

Private and secure communication. Your communication and who you transact with should be for your eyes only. This includes transactions (transfer of value) as a form of communication.

Censorship-resistance. Not being able to easily censor individuals on the platform. Both at an individual, group and collective level. Not having single points of failure that allow service to be disrupted.

Decentralization. Partly falls out of censorship-resistance and other properties. If infrastructure isn't decentralized it means there's a single point of failure that can be disrupted. This is more of a tool than a goal on its own, but it is an important tool.

Built for mass adoption. Includes scalabiltiy, UX (latency, reliability, bandwidth consumption, UI etc), and allowing for people to stick around. One way of doing this is to allow users to discover people they want to talk to.

Scalability. Infrastructure needs to support a lot of users to be a viabile alternative. Like, a billion of them (eventually).

Fundamentals in place to support great user experience. To be a viable alternative, aside from good UI and distribution, fundamentals such as latency, bandwidth usage, consistency etc must support great UX to be a viable alternative.

Works for resource restricted devices, including smartphones. Most people will use a smartphone to use this. This means it has to work well on them and similar devices, without becoming a second-class citizen where we ignore properties such as censorship-resistance and privacy. Some concession to reality will be necessary due to additional constraints, which leads us to...

Adaptive nodes. Nodes will have different capabilities, and perhaps at different times. To maintain a lot of the properties described here it is desirable if as many participants as possible are first-class citizens. If a phone is switching from a limited data plan to a WiFi network or from battery to AC power it can do more useful work, and so on. Likewise for a laptop with a lot of free disk space and spare compute power, etc.

Sustainable. If there's no centralized, top down ad-driven model, this means all the infrastructure has to be sustainable somehow. Since these are individual entitites, this means it has to be paid for. While altruistic modes and similar can be used, this likely requires some form of incentivization scheme for useful services provided in the network. Related: free rider problem.

Spam resistant. Relates to sustainability, scalability and built for mass adoption. Made more difficult by pseudonymous identity due to whitewashing attacks.

Trust-minimized. To know that properties are provided for and aren't compromised, various ways of minimizing trust requirements are useful. This also related to mass adoption and social cohesion. Examples include: open and audited protocols, open source, reproducible builds, etc. This also relates to how mini apps are provided for, since we may not know their source but want to be able to use them anyway.

Open source. Related to above, where we must be able to inspect the software to know that it functions as advertised and hasn't been compromised, e.g. by uploading private data to a third party.

Some of these are graded and a bit subtle, i.e.:

  • Censorship resistance would ideally be able to absorb Internet shutdowns. This would require an extensive MANET/meshnet infrastructure, which while desirable, requires a lot of challenges to be overcome to be feasible.
  • Privacy would ideally make all actions (optionally) totally anoymous, though this may incur undue costs on bandwidth and latency, which impacts user experience.
  • Decentralization, certain topologies, such as DHTs, are efficient and quite decentralized but still have some centralized aspects, which makes it attackable in various ways. Ditto for blockchains compared with bearer instruments which requires some coordinating infrastructure, compared with naturally occuring assets such as precious metals.
  • "Discover people" and striving for "total anonymity" might initially seem incompatible. The idea is to provide for sane defaults, and then allow people to decide how much information they want to disclose. This is the essence of privacy.
  • Users often want some form of moderation to get a good user experience, which can be seen as a form of censorship. The idea to raise the bar on the basics, the fundamental infrastructure. If individuals or specific communities want certain moderation mechanisms, that is still a compatible requirement.

Counterpoint 1

We could refute the above by saying that the design goals are undesirable. We want a system where people can censor others, and where everyone is tied to their real identity. Or we could say something like, freedom of speech is a general concept, and it doesn't apply to Internet companies, even if they provide a vital service. You can survive without it and you should've read the terms of service. This roughly charactericizes the mainstream view.

Additional factor here is the idea that a group of people know more about what's good for you then you do, so they are protecting you.

Counterpoint 2

We could agree with all these design goals, but think they are too extreme in terms of their requirements. For example, we could operate as a non profit, take donations and volunteers, and then host the whole infrastructure ourselves. We could say we are in a friendly legislation, so we won't be a single point of failure. Since we are working on this and maybe even our designs are open, you can trust us and we'll provide service and infrastructure that gives you what you want without having to pay for it or solve all these complex decentralized computation and so on problems. If you don't trust us for some reason, you shouldn't use us regardless. Also, this is better than status quo. And we are more likely to survive by doing this, either by taking shortcuts or by being less ambituous in terms of scope.

Principal components

There are many ways to skin a cat, but this is one way of breaking down the problem. We have a general direction with the properties listed above, together with some understanding of how WeChat works for the everday user. Now the question is, what infrastructure do we need to support this? How do we achieve the above properties, or at least get closer to them? We want to figure out the necessary building blocks, and one of doing this is to map out likely necessary components.

Background: Ethereum and Web3 stack

It is worth noting that a lot of the required infrastructure has been developed, at least as concepts, in the original Ethereum / Web3 vision. In it there is Ethereum for consensus/compute/transact, storage through Swarm, and communication through Whisper. That said, the main focus has been on the Ethereum blockchain itself, and a lot of things have happened in the last 5y+ with respect to technology around privacy and scalabilty. It is worth revisiting things from a fresh point of view, with the WeChat alternative in mind as a clear use case.

Account - self-sovereign identity and the perils of phone numbers

Starting from the most basic: what is an account and how do you get one? With most internet services today, WeChat and almost all popular messaging apps included, you need to signup with some centralized authority. Usually you also have to verify this with some data that ties this account to you as an individual. E.g. by requiring a phone number, which in most jurisdictions 1 means giving out your real ID. This also means you can be banned from using the service by a somewhat arbitrary process, with no due process.

Now, we could argue these app providers can do what they want. And they are right, in a very narrow sense. As apps like WeChat (and Google) become general-purpose platforms, they become more and more ingrained in our everyday lives. They start to provide utilities that we absolutely require to work to go about our day, such as paying for food or transportation. This means we need higher standard than this.

Justifications for requiring phone numbers are usually centered around three claims:

  1. Avoiding spam
  2. Tying your account to your real name, for various reasons
  3. Using as a commonly shared identifier as a social network discovery mechanism

Of course, many services require more than phone numbers. E.g. email, other forms of personal data such as voice recording, linking a bank account, and so on.

In contrast, a self-sovereign system would allow you to "create an account" completely on your own. This can easily be done with public key cryptograpy, and it also paves the way for end-to-end encryption to make your messages private.

The main issue with this that you need to get more creative about avoiding spam (e.g. through white washing attacks), and ideally there is some other form of social discovery mechanism.

Just having a public key as an account isn't enough though. If it goes through a central server, then nothing is stopping that server from arbitrarly blocking requests related to that public key. Of course, this also depends on how transparent such requests are. Fundamentally, lest we rely completely on goodwill, there needs to be multiple actors by which you can use the service. This naturally points to decentralization as a requirement. See counterpoint.

Even so, if the system is closed source we don't know what it is doing. Perhaps the app communicating is also uploading data to another place, or somehow making it possible to see who is who and act accordingly.

You might notice that just one simple property, self-sovereign identity, leads to a slew of other requirements and properties. You might also notice that WeChat is far from alone in this, even if their identity requirements might be a bit stringent than, say, Telegram. Their control aspects are also a bit more extreme, at least for someone with western sensibilities 2.

Most user facing applications have similar issues, Google Apps/FB/Twitter etc. For popular tools that have this built in, we can look at git - which is truly decentralized and have keypair at the bottom. It is for a very specific technical domain, and even then people rely on Github. Key management is fairly difficult even for technical people, and for normal people even more so. Banks are generally far behind on this tech, relying on arcane procedures and special purpose hardware for 2FA. That's another big issue.

Let's shift gears a bit and talk about some other functional requirements.

Routing - packets from A to B

In order to get a lot of the features WeChat provides, we need the ability to do three things: communicate, store data, and transact with people. We need a bit more than that, but let's focus on this for now.

To communicate with people, in the base case, we need to go from one phone to another phone that is separated by a large distance. This requires some form of routing. The most natural platform to build this on is the existing Internet, though not the only one. Most phones are resource restricted, and are only "on" for brief periods of time. This is needed to preserve battery and bandwidth. Additionally, Internet uses IPs as endpoints, which change as a phones move through space. NAT punching etc isn't always perfect either. This means we need a way to get a message from one public key to another, and through some intermediate nodes. We can think of these nodes as a form of service network. Similar to how a power grid works, or phone lines, or collection of ISPs.

One important property here is to ensure we don't end up in a situation like the centralized capture scenario above, something we've seen with centralized ISPs 3 4 where they can choose which traffic is good and which is bad. We want to allow the use of different service nodes, just like if a restaurant gives you food poisioning you can go to the one next door and then the first one goes out of business after a while. And the circle of life continues.

We shouldn't be naive though, and think that this is something nodes are likely to do for free. They need to be adequately compensated for their services, in some of incentivization scheme. That can either be monetary, or as in the case of Bittorrent, more of a barter situation where you use game theory to coordinate with strangers 5, and some form of reputation attached to it (for private trackers).

There are many ways of doing routing, and we won't go into too much technical detail here. Suffice to say is that you likely want both a structured and unstructured alternative, and that these comes with several trade-offs when it comes to efficiency, metadata protection, ability to incentivize, compatibility with existing topologies, and suitability for mobilephones (mostly offline, bandwidth restricted, not directly connectable). Expect more on this in a future article.

Some of these considerations naturally leads us into the storage and transaction components.

Storage - available and persistant for later

If mobile phones are mostly offline, we need some way to store these messages so they can be retrieved when online again. The same goes for various kinds attachments as well, and for when people are switching devices. A user might control their timeline, but in the WeChat case that timeline is stored on Tencent's servers, and queried from there as well. This naturally needs to happen by some other service nodes. In the WeChat case, and for most IMs, the way these servers are paid for is through some indirect ad mechanism. The entity controlling these ads and so on is the same one as the one operating the servers for storage. A more direct model with different entities would see these services being compensated for their work.

We also need storage for attachments, mini-apps, as well as a way of understanding the current state of consensus when it comes to the compute/transact module. In the WeChat case, this state is completely handled by the bank institution or one of their partners, such as Alibaba. When it comes to bearer instruments like cash, no state needs to be kept as that's a direct exchange in the physical world. This isn't directly compatible with transfering value over a distance.

All of this state requires availability and persistance. It should be done in a trust minimized fashion and decentralized, which requires some form of incentivization for keeping data around. If it isn't, you are relying on social cohesion which breaks down at very large scales.

Since data will be spread out across multiple nodes, you need a way to sync data and transfer it in the network. As well as being able to add and query data from it. All of this requires a routing component.

To make it more censorship resistant it might be better to keep it as a general-purpose store, i.e. individuals don't need to know what they storing. Otherwise, you naturally end up in a situation where individual nodes can be pressured to not store certain content.

Messaging - from me to you to all of us (not them)

This builds on top of routing, but it has a slightly different focus. The goal is to allow for individuals and groups to communicate in a private, secure and censorship-resistant manner.

It also needs to provide a decent interface to the end user, in terms of dealing seamlessly with offline messages, providing reliable and timely messaging.

In order to get closer to the ideal of total anonymity, it is useful to be able to hide metadata of who is talking to whom. This applies to both normal communication as well as for transactions. Ideally, no one but the parties involved can see who is taking part in a conversation. This can be achieved through various techniques such as mixnets, anonymous credentials, private information retrieval, and so on. Many of these techniques have a fundamental trade-off with latency and bandwidth, something that is a big concern for mobilephones. Being able to do some form of tuning, in an adaptive node manner, depending on your threat model and current capabilities is useful here.

The baseline here is pseudonymity, and having tools to allow individuals to "cut off" ties to their real world identity and transactions. People act different in different circles in the real world, and this should be mimicked online as well. Your company, family or government shouldn't be able to know what exactly you use your paycheck for, and who you are talking to.

Compute - transact, contract and settle

The most immediate need here is transaction from A to B. Direct exchange. There is also a more indirect need for private lawmaking and contracting.

We talked about routing and storage and how they likely need to be incentivized to work properly. How are they going to be compensated? While this could in theory work via existing banking system and so on, this would be rather heavy. It'd also very likely require tying your identifier to your legal name, something that goes against what we want to achieve. What we want is something that acts more as right-to-access, similar to the way cash functions in a society 6. I pay for a fruit with something that is valuable to you and then I'm on my way.

While there might be other candidates, such as pre-paid debit cards and so on, this transaction mode pretty much requires a cryptocurrency component. The alternative is to do it on a reputation basis, which might work for small communities, due to social cohesion, but quickly detoriates for large ones 7. Ad hoc models like private Bittorrent trackers are centralized and easy to censor.

Now, none of the existing cryptocurrency models are ideal. They also all suffer from lack of widespread use, and it is difficult to get onboarded to them in the first place. Transactions in Bitcoin are slow. Ethereum is faster and has more capabilities, but it still suffers from linking payments over time, which makes the privacy part of this more difficult. Zcash, Monero and similar are interesting, but also require more use. For Zcash, shielded transactions appear to only account for less than 2% of all transactions in 2019 8 9.

Another dimension is what sets general purpose cryptocurrencies like Ethereum apart. Aside from just paying from A to B, you can encode rules about when something should be paid out and not. This is very useful for doing a form of private lawmaking, contracting, for setting up service agreements with these nodes. If there's no trivial recourse as in the meatspace world, where you know someone's name and you can sue them, you need a different kind of model.

What makes something like Zcash interesting is that it works more like digital cash. Instead of leaving a public trail for everyone, where someone can see where you got the initial money from and then trace you across various usage, for Zcash every hop is privacy preserving.

To fulfill the general goals of being censorship resistance and secure, it is also vital that the system being used stays online and can't be easily disrupted. That points to disintermediation, as opposed to using gateways and exchanges. This is a case where something like cash, or gold, is more direct, since no one can censor this transaction without being physically present where this direct exchange is taking place. However, like before, this doesn't work over distance.

Secure chat - just our business

Similar to the messaging module above. The distinction here is that we assume the network part has already taken place. Here we are interested in keeping the contents of messages private, so that means confidentiality/end-to-end encryption, integrity, authentication, as well as forward secrecy and plausible deniability. This means that even if there's some actor that gets some private key material, or confiscated your phone, there is some level of...ephemerality to your conversations. Another issue here in terms of scalable private group chat.

Extensible mini apps

This relates to the compute and storage module above. Essentially we want to provide mini apps as in WeChat, but to do so in a way that is compatible with what we want to achieve more generally. This allows individuals and small businesses to create small tools for various purposes, and coordinate with strangers. E.g. booking a cab or getting an insurance, and so on.

This has a higher dependency on the contracting/general computation aspect. I.e. often it isn't only a transaction, but you might want to encode some specific rules here that strangers can abide by without having too high trust requirements. As a simple example: escrows.

This also needs an open API that anyone can use. It should be properly secured, so using one doesn't compromise the rest of the system it is operating in. To be censorship resistant it requires the routing and storage component to work properly.

Where are we now?

Let's look back at some of desirable properties we set out in the beginning and see how close we are to building out the necessary components. Is it realistic at all or just a pipe dream? We'll see that there are many building blocks in place, and there's reason for hope.

Self-sovereignity identity. Public key crypto and web of trust like constructs makes this possible.

Pseudonymity, and ideally total anonymity. Pseudonymity can largely be achieved with public key crypto and open systems that allow for permissionless participation. For transactions, pseudonymity exists in most cryptocurrencies. The challenge is linkage across time, especially when interfacing with other "legacy" system. There are stronger constructs that are actively being worked on and are promising here, such as mixnets (Nym), mixers (Wasabi Wallet, Tornado.Cash) and zero knowledge proofs (Zcash, Ethereum, Starkware). This area of applied research has exploded over the last few years.

Private and secure communication. Signal has pioneered a lot of this, following OTR. Double Ratchet, X3DH. E2EE is minimum these days, and properties like PFS and PD are getting better. For metadata protection, you have Tor, with its faults, and more active research on mixnets and private information retrieval, etc.

Censorship-resistance. This covers a lot of ground across the spectrum. You have technologies like Bittorrent, Bitcoin/Ethereum, Tor obfuscated transports, E2EE by default, partial mesh networks in production, abilit to move/replicate host machines more quickly have all made this more of a reality than it used to be. this easier. Of course, techniques such as deep packet inspection and internet shutdowns have increased.

Decentralization. Cryptocurrencies, projects like libp2p and IPFS. Need to be mindful here of many projects that claim decentralization but are still vulnerable to single points of failures, such as relying on gateways.

Built for mass adoption. This one is more subjective. There's definitely a lot of work to be done here, both when it comes to fundamental performance, key management and things like social discoverability. Directionally these things are improving and becoming easier for the average person but there is a lot ot be done here.

Scalability. With projects like Ethereum 2.0 and IPFS more and more resources are a being put into this, both at the consensus/compute layer as well as networking (gossip, scalable Kademlia) layer. Also various layer 2 solutions for transactions.

Fundamentals in place to support great user experience. Similar to built for mass adoption. As scalability becomes more important, more applied research is being done in the p2p area to improve things like latency, bandwidth.

Works for resource restricted devices, including smartphones. Work in progress and not enough focus here, generally an after thought. Also have stateless clients etc.

Adaptive nodes. See above. With subprotocols and capabilities in Ethereum and libp2p, this is getting easier.

Sustainable. Token economics is a thing. While a lot of it won't stay around, there are many more projects working on making themselves dispensable. Being open source, having an engaged community and enabling users run their own infrastructure. Users as stakeholders.

Spam resistant. Tricky problem if you want to be pseudonymous, but some signs of hope with incentivization mechanisms, zero knowledge based signaling, etc. Together with various forms of rate limiting and better controlling of topology and network amplification. And just generally being battle-tested by real world attacks, such as historical Ethereum DDoS attacks.

Trust minimized. Bitcoin. Zero knowledge provable computation. Open source. Reproducible builds. Signed binaries. Incentive compatible structures. Independent audits. Still a lot of work, but getting better.

Open source. Big and only getting bigger. Including mainstream companies.

What's next?

We've look at what WeChat provides and what we'd like an alternative to look like. We've also seen a few principal modules that are necessary to achieve those goals. To achieve all of this is a daunting task, and one might call it overly ambitiuous. We've also seen how far we've come with some of the goals, and how a lot of the pieces are there, in one form or another. Then it is a question of putting them all together in the right mix.

The good news is that a lot of people are working all these building blocks and thinking about these problems. Compared to a few years ago we've come quite far when it comes to p2p infrastructure, privacy, security, scalability, and general developer mass and mindshare. If you want to join us in building some of these building blocks, and assembling them, check out our forum.

PS. We are hiring protocol engineers. DS

Acknowledgements

Corey, Dean, Jacek.

References

+ + + + \ No newline at end of file diff --git a/scripts/draco-1.4.3/draco_decoder.wasm b/scripts/draco-1.4.3/draco_decoder.wasm new file mode 100644 index 00000000..c24ca541 Binary files /dev/null and b/scripts/draco-1.4.3/draco_decoder.wasm differ diff --git a/scripts/draco-1.4.3/draco_wasm_wrapper.js b/scripts/draco-1.4.3/draco_wasm_wrapper.js new file mode 100644 index 00000000..e74212b7 --- /dev/null +++ b/scripts/draco-1.4.3/draco_wasm_wrapper.js @@ -0,0 +1,2534 @@ +var $jscomp = $jscomp || {} +$jscomp.scope = {} +$jscomp.arrayIteratorImpl = function (m) { + var p = 0 + return function () { + return p < m.length ? { done: !1, value: m[p++] } : { done: !0 } + } +} +$jscomp.arrayIterator = function (m) { + return { next: $jscomp.arrayIteratorImpl(m) } +} +$jscomp.makeIterator = function (m) { + var p = 'undefined' != typeof Symbol && Symbol.iterator && m[Symbol.iterator] + return p ? p.call(m) : $jscomp.arrayIterator(m) +} +$jscomp.ASSUME_ES5 = !1 +$jscomp.ASSUME_NO_NATIVE_MAP = !1 +$jscomp.ASSUME_NO_NATIVE_SET = !1 +$jscomp.SIMPLE_FROUND_POLYFILL = !1 +$jscomp.ISOLATE_POLYFILLS = !1 +$jscomp.FORCE_POLYFILL_PROMISE = !1 +$jscomp.FORCE_POLYFILL_PROMISE_WHEN_NO_UNHANDLED_REJECTION = !1 +$jscomp.getGlobal = function (m) { + m = [ + 'object' == typeof globalThis && globalThis, + m, + 'object' == typeof window && window, + 'object' == typeof self && self, + 'object' == typeof global && global, + ] + for (var p = 0; p < m.length; ++p) { + var l = m[p] + if (l && l.Math == Math) return l + } + throw Error('Cannot find global object') +} +$jscomp.global = $jscomp.getGlobal(this) +$jscomp.defineProperty = + $jscomp.ASSUME_ES5 || 'function' == typeof Object.defineProperties + ? Object.defineProperty + : function (m, p, l) { + if (m == Array.prototype || m == Object.prototype) return m + m[p] = l.value + return m + } +$jscomp.IS_SYMBOL_NATIVE = + 'function' === typeof Symbol && 'symbol' === typeof Symbol('x') +$jscomp.TRUST_ES6_POLYFILLS = + !$jscomp.ISOLATE_POLYFILLS || $jscomp.IS_SYMBOL_NATIVE +$jscomp.polyfills = {} +$jscomp.propertyToPolyfillSymbol = {} +$jscomp.POLYFILL_PREFIX = '$jscp$' +var $jscomp$lookupPolyfilledValue = function (m, p) { + var l = $jscomp.propertyToPolyfillSymbol[p] + if (null == l) return m[p] + l = m[l] + return void 0 !== l ? l : m[p] +} +$jscomp.polyfill = function (m, p, l, q) { + p && + ($jscomp.ISOLATE_POLYFILLS + ? $jscomp.polyfillIsolated(m, p, l, q) + : $jscomp.polyfillUnisolated(m, p, l, q)) +} +$jscomp.polyfillUnisolated = function (m, p, l, q) { + l = $jscomp.global + m = m.split('.') + for (q = 0; q < m.length - 1; q++) { + var k = m[q] + if (!(k in l)) return + l = l[k] + } + m = m[m.length - 1] + q = l[m] + p = p(q) + p != q && + null != p && + $jscomp.defineProperty(l, m, { configurable: !0, writable: !0, value: p }) +} +$jscomp.polyfillIsolated = function (m, p, l, q) { + var k = m.split('.') + m = 1 === k.length + q = k[0] + q = !m && q in $jscomp.polyfills ? $jscomp.polyfills : $jscomp.global + for (var A = 0; A < k.length - 1; A++) { + var g = k[A] + if (!(g in q)) return + q = q[g] + } + k = k[k.length - 1] + l = $jscomp.IS_SYMBOL_NATIVE && 'es6' === l ? q[k] : null + p = p(l) + null != p && + (m + ? $jscomp.defineProperty($jscomp.polyfills, k, { + configurable: !0, + writable: !0, + value: p, + }) + : p !== l && + (void 0 === $jscomp.propertyToPolyfillSymbol[k] && + ((l = (1e9 * Math.random()) >>> 0), + ($jscomp.propertyToPolyfillSymbol[k] = $jscomp.IS_SYMBOL_NATIVE + ? $jscomp.global.Symbol(k) + : $jscomp.POLYFILL_PREFIX + l + '$' + k)), + $jscomp.defineProperty(q, $jscomp.propertyToPolyfillSymbol[k], { + configurable: !0, + writable: !0, + value: p, + }))) +} +$jscomp.polyfill( + 'Promise', + function (m) { + function p() { + this.batch_ = null + } + function l(g) { + return g instanceof k + ? g + : new k(function (n, u) { + n(g) + }) + } + if ( + m && + (!( + $jscomp.FORCE_POLYFILL_PROMISE || + ($jscomp.FORCE_POLYFILL_PROMISE_WHEN_NO_UNHANDLED_REJECTION && + 'undefined' === typeof $jscomp.global.PromiseRejectionEvent) + ) || + !$jscomp.global.Promise || + -1 === $jscomp.global.Promise.toString().indexOf('[native code]')) + ) + return m + p.prototype.asyncExecute = function (g) { + if (null == this.batch_) { + this.batch_ = [] + var n = this + this.asyncExecuteFunction(function () { + n.executeBatch_() + }) + } + this.batch_.push(g) + } + var q = $jscomp.global.setTimeout + p.prototype.asyncExecuteFunction = function (g) { + q(g, 0) + } + p.prototype.executeBatch_ = function () { + for (; this.batch_ && this.batch_.length; ) { + var g = this.batch_ + this.batch_ = [] + for (var n = 0; n < g.length; ++n) { + var u = g[n] + g[n] = null + try { + u() + } catch (z) { + this.asyncThrow_(z) + } + } + } + this.batch_ = null + } + p.prototype.asyncThrow_ = function (g) { + this.asyncExecuteFunction(function () { + throw g + }) + } + var k = function (g) { + this.state_ = 0 + this.result_ = void 0 + this.onSettledCallbacks_ = [] + this.isRejectionHandled_ = !1 + var n = this.createResolveAndReject_() + try { + g(n.resolve, n.reject) + } catch (u) { + n.reject(u) + } + } + k.prototype.createResolveAndReject_ = function () { + function g(z) { + return function (P) { + u || ((u = !0), z.call(n, P)) + } + } + var n = this, + u = !1 + return { resolve: g(this.resolveTo_), reject: g(this.reject_) } + } + k.prototype.resolveTo_ = function (g) { + if (g === this) + this.reject_(new TypeError('A Promise cannot resolve to itself')) + else if (g instanceof k) this.settleSameAsPromise_(g) + else { + a: switch (typeof g) { + case 'object': + var n = null != g + break a + case 'function': + n = !0 + break a + default: + n = !1 + } + n ? this.resolveToNonPromiseObj_(g) : this.fulfill_(g) + } + } + k.prototype.resolveToNonPromiseObj_ = function (g) { + var n = void 0 + try { + n = g.then + } catch (u) { + this.reject_(u) + return + } + 'function' == typeof n + ? this.settleSameAsThenable_(n, g) + : this.fulfill_(g) + } + k.prototype.reject_ = function (g) { + this.settle_(2, g) + } + k.prototype.fulfill_ = function (g) { + this.settle_(1, g) + } + k.prototype.settle_ = function (g, n) { + if (0 != this.state_) + throw Error( + 'Cannot settle(' + + g + + ', ' + + n + + '): Promise already settled in state' + + this.state_, + ) + this.state_ = g + this.result_ = n + 2 === this.state_ && this.scheduleUnhandledRejectionCheck_() + this.executeOnSettledCallbacks_() + } + k.prototype.scheduleUnhandledRejectionCheck_ = function () { + var g = this + q(function () { + if (g.notifyUnhandledRejection_()) { + var n = $jscomp.global.console + 'undefined' !== typeof n && n.error(g.result_) + } + }, 1) + } + k.prototype.notifyUnhandledRejection_ = function () { + if (this.isRejectionHandled_) return !1 + var g = $jscomp.global.CustomEvent, + n = $jscomp.global.Event, + u = $jscomp.global.dispatchEvent + if ('undefined' === typeof u) return !0 + 'function' === typeof g + ? (g = new g('unhandledrejection', { cancelable: !0 })) + : 'function' === typeof n + ? (g = new n('unhandledrejection', { cancelable: !0 })) + : ((g = $jscomp.global.document.createEvent('CustomEvent')), + g.initCustomEvent('unhandledrejection', !1, !0, g)) + g.promise = this + g.reason = this.result_ + return u(g) + } + k.prototype.executeOnSettledCallbacks_ = function () { + if (null != this.onSettledCallbacks_) { + for (var g = 0; g < this.onSettledCallbacks_.length; ++g) + A.asyncExecute(this.onSettledCallbacks_[g]) + this.onSettledCallbacks_ = null + } + } + var A = new p() + k.prototype.settleSameAsPromise_ = function (g) { + var n = this.createResolveAndReject_() + g.callWhenSettled_(n.resolve, n.reject) + } + k.prototype.settleSameAsThenable_ = function (g, n) { + var u = this.createResolveAndReject_() + try { + g.call(n, u.resolve, u.reject) + } catch (z) { + u.reject(z) + } + } + k.prototype.then = function (g, n) { + function u(V, W) { + return 'function' == typeof V + ? function (v) { + try { + z(V(v)) + } catch (x) { + P(x) + } + } + : W + } + var z, + P, + aa = new k(function (V, W) { + z = V + P = W + }) + this.callWhenSettled_(u(g, z), u(n, P)) + return aa + } + k.prototype.catch = function (g) { + return this.then(void 0, g) + } + k.prototype.callWhenSettled_ = function (g, n) { + function u() { + switch (z.state_) { + case 1: + g(z.result_) + break + case 2: + n(z.result_) + break + default: + throw Error('Unexpected state: ' + z.state_) + } + } + var z = this + null == this.onSettledCallbacks_ + ? A.asyncExecute(u) + : this.onSettledCallbacks_.push(u) + this.isRejectionHandled_ = !0 + } + k.resolve = l + k.reject = function (g) { + return new k(function (n, u) { + u(g) + }) + } + k.race = function (g) { + return new k(function (n, u) { + for ( + var z = $jscomp.makeIterator(g), P = z.next(); + !P.done; + P = z.next() + ) + l(P.value).callWhenSettled_(n, u) + }) + } + k.all = function (g) { + var n = $jscomp.makeIterator(g), + u = n.next() + return u.done + ? l([]) + : new k(function (z, P) { + function aa(v) { + return function (x) { + V[v] = x + W-- + 0 == W && z(V) + } + } + var V = [], + W = 0 + do + V.push(void 0), + W++, + l(u.value).callWhenSettled_(aa(V.length - 1), P), + (u = n.next()) + while (!u.done) + }) + } + return k + }, + 'es6', + 'es3', +) +$jscomp.polyfill( + 'Array.prototype.copyWithin', + function (m) { + function p(l) { + l = Number(l) + return Infinity === l || -Infinity === l ? l : l | 0 + } + return m + ? m + : function (l, q, k) { + var A = this.length + l = p(l) + q = p(q) + k = void 0 === k ? A : p(k) + l = 0 > l ? Math.max(A + l, 0) : Math.min(l, A) + q = 0 > q ? Math.max(A + q, 0) : Math.min(q, A) + k = 0 > k ? Math.max(A + k, 0) : Math.min(k, A) + if (l < q) + for (; q < k; ) + q in this ? (this[l++] = this[q++]) : (delete this[l++], q++) + else + for (k = Math.min(k, A + q - l), l += k - q; k > q; ) + --k in this ? (this[--l] = this[k]) : delete this[--l] + return this + } + }, + 'es6', + 'es3', +) +$jscomp.typedArrayCopyWithin = function (m) { + return m ? m : Array.prototype.copyWithin +} +$jscomp.polyfill( + 'Int8Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Uint8Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Uint8ClampedArray.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Int16Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Uint16Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Int32Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Uint32Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Float32Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +$jscomp.polyfill( + 'Float64Array.prototype.copyWithin', + $jscomp.typedArrayCopyWithin, + 'es6', + 'es5', +) +var DracoDecoderModule = (function () { + var m = + 'undefined' !== typeof document && document.currentScript + ? document.currentScript.src + : void 0 + 'undefined' !== typeof __filename && (m = m || __filename) + return function (p) { + function l(e) { + return a.locateFile ? a.locateFile(e, X) : X + e + } + function q(e, b) { + e || n('Assertion failed: ' + b) + } + function k(e, b, c) { + var d = b + c + for (c = b; e[c] && !(c >= d); ) ++c + if (16 < c - b && e.subarray && Da) return Da.decode(e.subarray(b, c)) + for (d = ''; b < c; ) { + var f = e[b++] + if (f & 128) { + var t = e[b++] & 63 + if (192 == (f & 224)) d += String.fromCharCode(((f & 31) << 6) | t) + else { + var Y = e[b++] & 63 + f = + 224 == (f & 240) + ? ((f & 15) << 12) | (t << 6) | Y + : ((f & 7) << 18) | (t << 12) | (Y << 6) | (e[b++] & 63) + 65536 > f + ? (d += String.fromCharCode(f)) + : ((f -= 65536), + (d += String.fromCharCode( + 55296 | (f >> 10), + 56320 | (f & 1023), + ))) + } + } else d += String.fromCharCode(f) + } + return d + } + function A(e, b) { + return e ? k(ja, e, b) : '' + } + function g(e) { + Ea = e + a.HEAP8 = ba = new Int8Array(e) + a.HEAP16 = new Int16Array(e) + a.HEAP32 = F = new Int32Array(e) + a.HEAPU8 = ja = new Uint8Array(e) + a.HEAPU16 = new Uint16Array(e) + a.HEAPU32 = new Uint32Array(e) + a.HEAPF32 = new Float32Array(e) + a.HEAPF64 = new Float64Array(e) + } + function n(e) { + if (a.onAbort) a.onAbort(e) + e += '' + ia(e) + Fa = !0 + e = new WebAssembly.RuntimeError( + 'abort(' + e + '). Build with -s ASSERTIONS=1 for more info.', + ) + sa(e) + throw e + } + function u(e, b) { + return String.prototype.startsWith ? e.startsWith(b) : 0 === e.indexOf(b) + } + function z(e) { + try { + if (e == Q && ka) return new Uint8Array(ka) + if (oa) return oa(e) + throw 'both async and sync fetching of the wasm failed' + } catch (b) { + n(b) + } + } + function P() { + if (!ka && (pa || fa)) { + if ('function' === typeof fetch && !u(Q, 'file://')) + return fetch(Q, { credentials: 'same-origin' }) + .then(function (e) { + if (!e.ok) throw "failed to load wasm binary file at '" + Q + "'" + return e.arrayBuffer() + }) + .catch(function () { + return z(Q) + }) + if (Ga) + return new Promise(function (e, b) { + Ga( + Q, + function (c) { + e(new Uint8Array(c)) + }, + b, + ) + }) + } + return Promise.resolve().then(function () { + return z(Q) + }) + } + function aa(e) { + for (; 0 < e.length; ) { + var b = e.shift() + if ('function' == typeof b) b(a) + else { + var c = b.func + 'number' === typeof c + ? void 0 === b.arg + ? ta.get(c)() + : ta.get(c)(b.arg) + : c(void 0 === b.arg ? null : b.arg) + } + } + } + function V(e) { + this.excPtr = e + this.ptr = e - D.SIZE + this.set_type = function (b) { + F[(this.ptr + D.TYPE_OFFSET) >> 2] = b + } + this.get_type = function () { + return F[(this.ptr + D.TYPE_OFFSET) >> 2] + } + this.set_destructor = function (b) { + F[(this.ptr + D.DESTRUCTOR_OFFSET) >> 2] = b + } + this.get_destructor = function () { + return F[(this.ptr + D.DESTRUCTOR_OFFSET) >> 2] + } + this.set_refcount = function (b) { + F[(this.ptr + D.REFCOUNT_OFFSET) >> 2] = b + } + this.set_caught = function (b) { + ba[(this.ptr + D.CAUGHT_OFFSET) >> 0] = b ? 1 : 0 + } + this.get_caught = function () { + return 0 != ba[(this.ptr + D.CAUGHT_OFFSET) >> 0] + } + this.set_rethrown = function (b) { + ba[(this.ptr + D.RETHROWN_OFFSET) >> 0] = b ? 1 : 0 + } + this.get_rethrown = function () { + return 0 != ba[(this.ptr + D.RETHROWN_OFFSET) >> 0] + } + this.init = function (b, c) { + this.set_type(b) + this.set_destructor(c) + this.set_refcount(0) + this.set_caught(!1) + this.set_rethrown(!1) + } + this.add_ref = function () { + F[(this.ptr + D.REFCOUNT_OFFSET) >> 2] += 1 + } + this.release_ref = function () { + var b = F[(this.ptr + D.REFCOUNT_OFFSET) >> 2] + F[(this.ptr + D.REFCOUNT_OFFSET) >> 2] = b - 1 + return 1 === b + } + } + function W(e) { + function b() { + if (!qa && ((qa = !0), (a.calledRun = !0), !Fa)) { + Ha = !0 + aa(Ia) + aa(ua) + Ja(a) + if (a.onRuntimeInitialized) a.onRuntimeInitialized() + if (a.postRun) + for ( + 'function' == typeof a.postRun && (a.postRun = [a.postRun]); + a.postRun.length; + + ) + Ka.unshift(a.postRun.shift()) + aa(Ka) + } + } + if (!(0 < ha)) { + if (a.preRun) + for ( + 'function' == typeof a.preRun && (a.preRun = [a.preRun]); + a.preRun.length; + + ) + La.unshift(a.preRun.shift()) + aa(La) + 0 < ha || + (a.setStatus + ? (a.setStatus('Running...'), + setTimeout(function () { + setTimeout(function () { + a.setStatus('') + }, 1) + b() + }, 1)) + : b()) + } + } + function v() {} + function x(e) { + return (e || v).__cache__ + } + function S(e, b) { + var c = x(b), + d = c[e] + if (d) return d + d = Object.create((b || v).prototype) + d.ptr = e + return (c[e] = d) + } + function da(e) { + if ('string' === typeof e) { + for (var b = 0, c = 0; c < e.length; ++c) { + var d = e.charCodeAt(c) + 55296 <= d && + 57343 >= d && + (d = (65536 + ((d & 1023) << 10)) | (e.charCodeAt(++c) & 1023)) + 127 >= d ? ++b : (b = 2047 >= d ? b + 2 : 65535 >= d ? b + 3 : b + 4) + } + b = Array(b + 1) + c = 0 + d = b.length + if (0 < d) { + d = c + d - 1 + for (var f = 0; f < e.length; ++f) { + var t = e.charCodeAt(f) + if (55296 <= t && 57343 >= t) { + var Y = e.charCodeAt(++f) + t = (65536 + ((t & 1023) << 10)) | (Y & 1023) + } + if (127 >= t) { + if (c >= d) break + b[c++] = t + } else { + if (2047 >= t) { + if (c + 1 >= d) break + b[c++] = 192 | (t >> 6) + } else { + if (65535 >= t) { + if (c + 2 >= d) break + b[c++] = 224 | (t >> 12) + } else { + if (c + 3 >= d) break + b[c++] = 240 | (t >> 18) + b[c++] = 128 | ((t >> 12) & 63) + } + b[c++] = 128 | ((t >> 6) & 63) + } + b[c++] = 128 | (t & 63) + } + } + b[c] = 0 + } + e = r.alloc(b, ba) + r.copy(b, ba, e) + return e + } + return e + } + function va(e) { + if ('object' === typeof e) { + var b = r.alloc(e, ba) + r.copy(e, ba, b) + return b + } + return e + } + function ca() { + throw 'cannot construct a VoidPtr, no constructor in IDL' + } + function T() { + this.ptr = Ma() + x(T)[this.ptr] = this + } + function R() { + this.ptr = Na() + x(R)[this.ptr] = this + } + function Z() { + this.ptr = Oa() + x(Z)[this.ptr] = this + } + function w() { + this.ptr = Pa() + x(w)[this.ptr] = this + } + function C() { + this.ptr = Qa() + x(C)[this.ptr] = this + } + function G() { + this.ptr = Ra() + x(G)[this.ptr] = this + } + function H() { + this.ptr = Sa() + x(H)[this.ptr] = this + } + function E() { + this.ptr = Ta() + x(E)[this.ptr] = this + } + function U() { + this.ptr = Ua() + x(U)[this.ptr] = this + } + function B() { + throw 'cannot construct a Status, no constructor in IDL' + } + function I() { + this.ptr = Va() + x(I)[this.ptr] = this + } + function J() { + this.ptr = Wa() + x(J)[this.ptr] = this + } + function K() { + this.ptr = Xa() + x(K)[this.ptr] = this + } + function L() { + this.ptr = Ya() + x(L)[this.ptr] = this + } + function M() { + this.ptr = Za() + x(M)[this.ptr] = this + } + function N() { + this.ptr = $a() + x(N)[this.ptr] = this + } + function O() { + this.ptr = ab() + x(O)[this.ptr] = this + } + function y() { + this.ptr = bb() + x(y)[this.ptr] = this + } + function h() { + this.ptr = cb() + x(h)[this.ptr] = this + } + p = p || {} + var a = 'undefined' !== typeof p ? p : {}, + Ja, + sa + a.ready = new Promise(function (e, b) { + Ja = e + sa = b + }) + var db = !1, + eb = !1 + a.onRuntimeInitialized = function () { + db = !0 + a.callRuntimeCallbacks(a.mainCallbacks) + if (eb && 'function' === typeof a.onModuleLoaded) a.onModuleLoaded(a) + } + a.onModuleParsed = function () { + eb = !0 + if (db && 'function' === typeof a.onModuleLoaded) a.onModuleLoaded(a) + } + a.isVersionSupported = function (e) { + if ('string' !== typeof e) return !1 + e = e.split('.') + return 2 > e.length || 3 < e.length + ? !1 + : 1 == e[0] && 0 <= e[1] && 4 >= e[1] + ? !0 + : 0 != e[0] || 10 < e[1] + ? !1 + : !0 + } + var la = {}, + ea + for (ea in a) a.hasOwnProperty(ea) && (la[ea] = a[ea]) + var pa = !1, + fa = !1, + wa = !1, + fb = !1 + pa = 'object' === typeof window + fa = 'function' === typeof importScripts + wa = + 'object' === typeof process && + 'object' === typeof process.versions && + 'string' === typeof process.versions.node + fb = !pa && !wa && !fa + var X = '', + xa, + ya + if (wa) { + X = fa ? require('path').dirname(X) + '/' : __dirname + '/' + var za = function (e, b) { + xa || (xa = require('fs')) + ya || (ya = require('path')) + e = ya.normalize(e) + return xa.readFileSync(e, b ? null : 'utf8') + } + var oa = function (e) { + e = za(e, !0) + e.buffer || (e = new Uint8Array(e)) + q(e.buffer) + return e + } + 1 < process.argv.length && process.argv[1].replace(/\\/g, '/') + process.argv.slice(2) + a.inspect = function () { + return '[Emscripten Module object]' + } + } else if (fb) + 'undefined' != typeof read && + (za = function (e) { + return read(e) + }), + (oa = function (e) { + if ('function' === typeof readbuffer) + return new Uint8Array(readbuffer(e)) + e = read(e, 'binary') + q('object' === typeof e) + return e + }), + 'undefined' !== typeof print && + ('undefined' === typeof console && (console = {}), + (console.log = print), + (console.warn = console.error = + 'undefined' !== typeof printErr ? printErr : print)) + else if (pa || fa) { + fa + ? (X = self.location.href) + : 'undefined' !== typeof document && + document.currentScript && + (X = document.currentScript.src) + m && (X = m) + X = 0 !== X.indexOf('blob:') ? X.substr(0, X.lastIndexOf('/') + 1) : '' + za = function (e) { + var b = new XMLHttpRequest() + b.open('GET', e, !1) + b.send(null) + return b.responseText + } + fa && + (oa = function (e) { + var b = new XMLHttpRequest() + b.open('GET', e, !1) + b.responseType = 'arraybuffer' + b.send(null) + return new Uint8Array(b.response) + }) + var Ga = function (e, b, c) { + var d = new XMLHttpRequest() + d.open('GET', e, !0) + d.responseType = 'arraybuffer' + d.onload = function () { + 200 == d.status || (0 == d.status && d.response) ? b(d.response) : c() + } + d.onerror = c + d.send(null) + } + } + var Ad = a.print || console.log.bind(console), + ia = a.printErr || console.warn.bind(console) + for (ea in la) la.hasOwnProperty(ea) && (a[ea] = la[ea]) + la = null + var ka + a.wasmBinary && (ka = a.wasmBinary) + 'object' !== typeof WebAssembly && n('no native wasm support detected') + var ra, + Fa = !1, + Da = + 'undefined' !== typeof TextDecoder ? new TextDecoder('utf8') : void 0, + Ea, + ba, + ja, + F, + ta, + La = [], + Ia = [], + ua = [], + Ka = [], + Ha = !1, + ha = 0, + Aa = null, + ma = null + a.preloadedImages = {} + a.preloadedAudios = {} + var Q = 'draco_decoder.wasm' + u(Q, 'data:application/octet-stream;base64,') || (Q = l(Q)) + var D = { + DESTRUCTOR_OFFSET: 0, + REFCOUNT_OFFSET: 4, + TYPE_OFFSET: 8, + CAUGHT_OFFSET: 12, + RETHROWN_OFFSET: 13, + SIZE: 16, + }, + Bd = 0, + na = { + mappings: {}, + buffers: [null, [], []], + printChar: function (e, b) { + var c = na.buffers[e] + 0 === b || 10 === b + ? ((1 === e ? Ad : ia)(k(c, 0)), (c.length = 0)) + : c.push(b) + }, + varargs: void 0, + get: function () { + na.varargs += 4 + return F[(na.varargs - 4) >> 2] + }, + getStr: function (e) { + return A(e) + }, + get64: function (e, b) { + return e + }, + }, + Cd = { + h: function (e) { + return gb(e + D.SIZE) + D.SIZE + }, + g: function (e, b, c) { + new V(e).init(b, c) + Bd++ + throw e + }, + a: function () { + n() + }, + d: function (e, b, c) { + ja.copyWithin(e, b, b + c) + }, + e: function (e) { + var b = ja.length + e >>>= 0 + if (2147483648 < e) return !1 + for (var c = 1; 4 >= c; c *= 2) { + var d = b * (1 + 0.2 / c) + d = Math.min(d, e + 100663296) + var f = Math, + t = f.min + d = Math.max(e, d) + 0 < d % 65536 && (d += 65536 - (d % 65536)) + f = t.call(f, 2147483648, d) + a: { + try { + ra.grow((f - Ea.byteLength + 65535) >>> 16) + g(ra.buffer) + var Y = 1 + break a + } catch (Ba) {} + Y = void 0 + } + if (Y) return !0 + } + return !1 + }, + f: function (e) { + return 0 + }, + c: function (e, b, c, d, f) {}, + b: function (e, b, c, d) { + for (var f = 0, t = 0; t < c; t++) { + for ( + var Y = F[(b + 8 * t) >> 2], + Ba = F[(b + (8 * t + 4)) >> 2], + Ca = 0; + Ca < Ba; + Ca++ + ) + na.printChar(e, ja[Y + Ca]) + f += Ba + } + F[d >> 2] = f + return 0 + }, + } + ;(function () { + function e(f, t) { + a.asm = f.exports + ra = a.asm.i + g(ra.buffer) + ta = a.asm.k + Ia.unshift(a.asm.j) + ha-- + a.monitorRunDependencies && a.monitorRunDependencies(ha) + 0 == ha && + (null !== Aa && (clearInterval(Aa), (Aa = null)), + ma && ((f = ma), (ma = null), f())) + } + function b(f) { + e(f.instance) + } + function c(f) { + return P() + .then(function (t) { + return WebAssembly.instantiate(t, d) + }) + .then(f, function (t) { + ia('failed to asynchronously prepare wasm: ' + t) + n(t) + }) + } + var d = { a: Cd } + ha++ + a.monitorRunDependencies && a.monitorRunDependencies(ha) + if (a.instantiateWasm) + try { + return a.instantiateWasm(d, e) + } catch (f) { + return ( + ia('Module.instantiateWasm callback failed with error: ' + f), !1 + ) + } + ;(function () { + return ka || + 'function' !== typeof WebAssembly.instantiateStreaming || + u(Q, 'data:application/octet-stream;base64,') || + u(Q, 'file://') || + 'function' !== typeof fetch + ? c(b) + : fetch(Q, { credentials: 'same-origin' }).then(function (f) { + return WebAssembly.instantiateStreaming(f, d).then( + b, + function (t) { + ia('wasm streaming compile failed: ' + t) + ia('falling back to ArrayBuffer instantiation') + return c(b) + }, + ) + }) + })().catch(sa) + return {} + })() + a.___wasm_call_ctors = function () { + return (a.___wasm_call_ctors = a.asm.j).apply(null, arguments) + } + var hb = (a._emscripten_bind_VoidPtr___destroy___0 = function () { + return (hb = a._emscripten_bind_VoidPtr___destroy___0 = a.asm.l).apply( + null, + arguments, + ) + }), + Ma = (a._emscripten_bind_DecoderBuffer_DecoderBuffer_0 = function () { + return (Ma = a._emscripten_bind_DecoderBuffer_DecoderBuffer_0 = + a.asm.m).apply(null, arguments) + }), + ib = (a._emscripten_bind_DecoderBuffer_Init_2 = function () { + return (ib = a._emscripten_bind_DecoderBuffer_Init_2 = a.asm.n).apply( + null, + arguments, + ) + }), + jb = (a._emscripten_bind_DecoderBuffer___destroy___0 = function () { + return (jb = a._emscripten_bind_DecoderBuffer___destroy___0 = + a.asm.o).apply(null, arguments) + }), + Na = (a._emscripten_bind_AttributeTransformData_AttributeTransformData_0 = + function () { + return (Na = + a._emscripten_bind_AttributeTransformData_AttributeTransformData_0 = + a.asm.p).apply(null, arguments) + }), + kb = (a._emscripten_bind_AttributeTransformData_transform_type_0 = + function () { + return (kb = + a._emscripten_bind_AttributeTransformData_transform_type_0 = + a.asm.q).apply(null, arguments) + }), + lb = (a._emscripten_bind_AttributeTransformData___destroy___0 = + function () { + return (lb = a._emscripten_bind_AttributeTransformData___destroy___0 = + a.asm.r).apply(null, arguments) + }), + Oa = (a._emscripten_bind_GeometryAttribute_GeometryAttribute_0 = + function () { + return (Oa = + a._emscripten_bind_GeometryAttribute_GeometryAttribute_0 = + a.asm.s).apply(null, arguments) + }), + mb = (a._emscripten_bind_GeometryAttribute___destroy___0 = function () { + return (mb = a._emscripten_bind_GeometryAttribute___destroy___0 = + a.asm.t).apply(null, arguments) + }), + Pa = (a._emscripten_bind_PointAttribute_PointAttribute_0 = function () { + return (Pa = a._emscripten_bind_PointAttribute_PointAttribute_0 = + a.asm.u).apply(null, arguments) + }), + nb = (a._emscripten_bind_PointAttribute_size_0 = function () { + return (nb = a._emscripten_bind_PointAttribute_size_0 = a.asm.v).apply( + null, + arguments, + ) + }), + ob = (a._emscripten_bind_PointAttribute_GetAttributeTransformData_0 = + function () { + return (ob = + a._emscripten_bind_PointAttribute_GetAttributeTransformData_0 = + a.asm.w).apply(null, arguments) + }), + pb = (a._emscripten_bind_PointAttribute_attribute_type_0 = function () { + return (pb = a._emscripten_bind_PointAttribute_attribute_type_0 = + a.asm.x).apply(null, arguments) + }), + qb = (a._emscripten_bind_PointAttribute_data_type_0 = function () { + return (qb = a._emscripten_bind_PointAttribute_data_type_0 = + a.asm.y).apply(null, arguments) + }), + rb = (a._emscripten_bind_PointAttribute_num_components_0 = function () { + return (rb = a._emscripten_bind_PointAttribute_num_components_0 = + a.asm.z).apply(null, arguments) + }), + sb = (a._emscripten_bind_PointAttribute_normalized_0 = function () { + return (sb = a._emscripten_bind_PointAttribute_normalized_0 = + a.asm.A).apply(null, arguments) + }), + tb = (a._emscripten_bind_PointAttribute_byte_stride_0 = function () { + return (tb = a._emscripten_bind_PointAttribute_byte_stride_0 = + a.asm.B).apply(null, arguments) + }), + ub = (a._emscripten_bind_PointAttribute_byte_offset_0 = function () { + return (ub = a._emscripten_bind_PointAttribute_byte_offset_0 = + a.asm.C).apply(null, arguments) + }), + vb = (a._emscripten_bind_PointAttribute_unique_id_0 = function () { + return (vb = a._emscripten_bind_PointAttribute_unique_id_0 = + a.asm.D).apply(null, arguments) + }), + wb = (a._emscripten_bind_PointAttribute___destroy___0 = function () { + return (wb = a._emscripten_bind_PointAttribute___destroy___0 = + a.asm.E).apply(null, arguments) + }), + Qa = + (a._emscripten_bind_AttributeQuantizationTransform_AttributeQuantizationTransform_0 = + function () { + return (Qa = + a._emscripten_bind_AttributeQuantizationTransform_AttributeQuantizationTransform_0 = + a.asm.F).apply(null, arguments) + }), + xb = + (a._emscripten_bind_AttributeQuantizationTransform_InitFromAttribute_1 = + function () { + return (xb = + a._emscripten_bind_AttributeQuantizationTransform_InitFromAttribute_1 = + a.asm.G).apply(null, arguments) + }), + yb = + (a._emscripten_bind_AttributeQuantizationTransform_quantization_bits_0 = + function () { + return (yb = + a._emscripten_bind_AttributeQuantizationTransform_quantization_bits_0 = + a.asm.H).apply(null, arguments) + }), + zb = (a._emscripten_bind_AttributeQuantizationTransform_min_value_1 = + function () { + return (zb = + a._emscripten_bind_AttributeQuantizationTransform_min_value_1 = + a.asm.I).apply(null, arguments) + }), + Ab = (a._emscripten_bind_AttributeQuantizationTransform_range_0 = + function () { + return (Ab = + a._emscripten_bind_AttributeQuantizationTransform_range_0 = + a.asm.J).apply(null, arguments) + }), + Bb = (a._emscripten_bind_AttributeQuantizationTransform___destroy___0 = + function () { + return (Bb = + a._emscripten_bind_AttributeQuantizationTransform___destroy___0 = + a.asm.K).apply(null, arguments) + }), + Ra = + (a._emscripten_bind_AttributeOctahedronTransform_AttributeOctahedronTransform_0 = + function () { + return (Ra = + a._emscripten_bind_AttributeOctahedronTransform_AttributeOctahedronTransform_0 = + a.asm.L).apply(null, arguments) + }), + Cb = + (a._emscripten_bind_AttributeOctahedronTransform_InitFromAttribute_1 = + function () { + return (Cb = + a._emscripten_bind_AttributeOctahedronTransform_InitFromAttribute_1 = + a.asm.M).apply(null, arguments) + }), + Db = + (a._emscripten_bind_AttributeOctahedronTransform_quantization_bits_0 = + function () { + return (Db = + a._emscripten_bind_AttributeOctahedronTransform_quantization_bits_0 = + a.asm.N).apply(null, arguments) + }), + Eb = (a._emscripten_bind_AttributeOctahedronTransform___destroy___0 = + function () { + return (Eb = + a._emscripten_bind_AttributeOctahedronTransform___destroy___0 = + a.asm.O).apply(null, arguments) + }), + Sa = (a._emscripten_bind_PointCloud_PointCloud_0 = function () { + return (Sa = a._emscripten_bind_PointCloud_PointCloud_0 = + a.asm.P).apply(null, arguments) + }), + Fb = (a._emscripten_bind_PointCloud_num_attributes_0 = function () { + return (Fb = a._emscripten_bind_PointCloud_num_attributes_0 = + a.asm.Q).apply(null, arguments) + }), + Gb = (a._emscripten_bind_PointCloud_num_points_0 = function () { + return (Gb = a._emscripten_bind_PointCloud_num_points_0 = + a.asm.R).apply(null, arguments) + }), + Hb = (a._emscripten_bind_PointCloud___destroy___0 = function () { + return (Hb = a._emscripten_bind_PointCloud___destroy___0 = + a.asm.S).apply(null, arguments) + }), + Ta = (a._emscripten_bind_Mesh_Mesh_0 = function () { + return (Ta = a._emscripten_bind_Mesh_Mesh_0 = a.asm.T).apply( + null, + arguments, + ) + }), + Ib = (a._emscripten_bind_Mesh_num_faces_0 = function () { + return (Ib = a._emscripten_bind_Mesh_num_faces_0 = a.asm.U).apply( + null, + arguments, + ) + }), + Jb = (a._emscripten_bind_Mesh_num_attributes_0 = function () { + return (Jb = a._emscripten_bind_Mesh_num_attributes_0 = a.asm.V).apply( + null, + arguments, + ) + }), + Kb = (a._emscripten_bind_Mesh_num_points_0 = function () { + return (Kb = a._emscripten_bind_Mesh_num_points_0 = a.asm.W).apply( + null, + arguments, + ) + }), + Lb = (a._emscripten_bind_Mesh___destroy___0 = function () { + return (Lb = a._emscripten_bind_Mesh___destroy___0 = a.asm.X).apply( + null, + arguments, + ) + }), + Ua = (a._emscripten_bind_Metadata_Metadata_0 = function () { + return (Ua = a._emscripten_bind_Metadata_Metadata_0 = a.asm.Y).apply( + null, + arguments, + ) + }), + Mb = (a._emscripten_bind_Metadata___destroy___0 = function () { + return (Mb = a._emscripten_bind_Metadata___destroy___0 = a.asm.Z).apply( + null, + arguments, + ) + }), + Nb = (a._emscripten_bind_Status_code_0 = function () { + return (Nb = a._emscripten_bind_Status_code_0 = a.asm._).apply( + null, + arguments, + ) + }), + Ob = (a._emscripten_bind_Status_ok_0 = function () { + return (Ob = a._emscripten_bind_Status_ok_0 = a.asm.$).apply( + null, + arguments, + ) + }), + Pb = (a._emscripten_bind_Status_error_msg_0 = function () { + return (Pb = a._emscripten_bind_Status_error_msg_0 = a.asm.aa).apply( + null, + arguments, + ) + }), + Qb = (a._emscripten_bind_Status___destroy___0 = function () { + return (Qb = a._emscripten_bind_Status___destroy___0 = a.asm.ba).apply( + null, + arguments, + ) + }), + Va = (a._emscripten_bind_DracoFloat32Array_DracoFloat32Array_0 = + function () { + return (Va = + a._emscripten_bind_DracoFloat32Array_DracoFloat32Array_0 = + a.asm.ca).apply(null, arguments) + }), + Rb = (a._emscripten_bind_DracoFloat32Array_GetValue_1 = function () { + return (Rb = a._emscripten_bind_DracoFloat32Array_GetValue_1 = + a.asm.da).apply(null, arguments) + }), + Sb = (a._emscripten_bind_DracoFloat32Array_size_0 = function () { + return (Sb = a._emscripten_bind_DracoFloat32Array_size_0 = + a.asm.ea).apply(null, arguments) + }), + Tb = (a._emscripten_bind_DracoFloat32Array___destroy___0 = function () { + return (Tb = a._emscripten_bind_DracoFloat32Array___destroy___0 = + a.asm.fa).apply(null, arguments) + }), + Wa = (a._emscripten_bind_DracoInt8Array_DracoInt8Array_0 = function () { + return (Wa = a._emscripten_bind_DracoInt8Array_DracoInt8Array_0 = + a.asm.ga).apply(null, arguments) + }), + Ub = (a._emscripten_bind_DracoInt8Array_GetValue_1 = function () { + return (Ub = a._emscripten_bind_DracoInt8Array_GetValue_1 = + a.asm.ha).apply(null, arguments) + }), + Vb = (a._emscripten_bind_DracoInt8Array_size_0 = function () { + return (Vb = a._emscripten_bind_DracoInt8Array_size_0 = a.asm.ia).apply( + null, + arguments, + ) + }), + Wb = (a._emscripten_bind_DracoInt8Array___destroy___0 = function () { + return (Wb = a._emscripten_bind_DracoInt8Array___destroy___0 = + a.asm.ja).apply(null, arguments) + }), + Xa = (a._emscripten_bind_DracoUInt8Array_DracoUInt8Array_0 = function () { + return (Xa = a._emscripten_bind_DracoUInt8Array_DracoUInt8Array_0 = + a.asm.ka).apply(null, arguments) + }), + Xb = (a._emscripten_bind_DracoUInt8Array_GetValue_1 = function () { + return (Xb = a._emscripten_bind_DracoUInt8Array_GetValue_1 = + a.asm.la).apply(null, arguments) + }), + Yb = (a._emscripten_bind_DracoUInt8Array_size_0 = function () { + return (Yb = a._emscripten_bind_DracoUInt8Array_size_0 = + a.asm.ma).apply(null, arguments) + }), + Zb = (a._emscripten_bind_DracoUInt8Array___destroy___0 = function () { + return (Zb = a._emscripten_bind_DracoUInt8Array___destroy___0 = + a.asm.na).apply(null, arguments) + }), + Ya = (a._emscripten_bind_DracoInt16Array_DracoInt16Array_0 = function () { + return (Ya = a._emscripten_bind_DracoInt16Array_DracoInt16Array_0 = + a.asm.oa).apply(null, arguments) + }), + $b = (a._emscripten_bind_DracoInt16Array_GetValue_1 = function () { + return ($b = a._emscripten_bind_DracoInt16Array_GetValue_1 = + a.asm.pa).apply(null, arguments) + }), + ac = (a._emscripten_bind_DracoInt16Array_size_0 = function () { + return (ac = a._emscripten_bind_DracoInt16Array_size_0 = + a.asm.qa).apply(null, arguments) + }), + bc = (a._emscripten_bind_DracoInt16Array___destroy___0 = function () { + return (bc = a._emscripten_bind_DracoInt16Array___destroy___0 = + a.asm.ra).apply(null, arguments) + }), + Za = (a._emscripten_bind_DracoUInt16Array_DracoUInt16Array_0 = + function () { + return (Za = a._emscripten_bind_DracoUInt16Array_DracoUInt16Array_0 = + a.asm.sa).apply(null, arguments) + }), + cc = (a._emscripten_bind_DracoUInt16Array_GetValue_1 = function () { + return (cc = a._emscripten_bind_DracoUInt16Array_GetValue_1 = + a.asm.ta).apply(null, arguments) + }), + dc = (a._emscripten_bind_DracoUInt16Array_size_0 = function () { + return (dc = a._emscripten_bind_DracoUInt16Array_size_0 = + a.asm.ua).apply(null, arguments) + }), + ec = (a._emscripten_bind_DracoUInt16Array___destroy___0 = function () { + return (ec = a._emscripten_bind_DracoUInt16Array___destroy___0 = + a.asm.va).apply(null, arguments) + }), + $a = (a._emscripten_bind_DracoInt32Array_DracoInt32Array_0 = function () { + return ($a = a._emscripten_bind_DracoInt32Array_DracoInt32Array_0 = + a.asm.wa).apply(null, arguments) + }), + fc = (a._emscripten_bind_DracoInt32Array_GetValue_1 = function () { + return (fc = a._emscripten_bind_DracoInt32Array_GetValue_1 = + a.asm.xa).apply(null, arguments) + }), + gc = (a._emscripten_bind_DracoInt32Array_size_0 = function () { + return (gc = a._emscripten_bind_DracoInt32Array_size_0 = + a.asm.ya).apply(null, arguments) + }), + hc = (a._emscripten_bind_DracoInt32Array___destroy___0 = function () { + return (hc = a._emscripten_bind_DracoInt32Array___destroy___0 = + a.asm.za).apply(null, arguments) + }), + ab = (a._emscripten_bind_DracoUInt32Array_DracoUInt32Array_0 = + function () { + return (ab = a._emscripten_bind_DracoUInt32Array_DracoUInt32Array_0 = + a.asm.Aa).apply(null, arguments) + }), + ic = (a._emscripten_bind_DracoUInt32Array_GetValue_1 = function () { + return (ic = a._emscripten_bind_DracoUInt32Array_GetValue_1 = + a.asm.Ba).apply(null, arguments) + }), + jc = (a._emscripten_bind_DracoUInt32Array_size_0 = function () { + return (jc = a._emscripten_bind_DracoUInt32Array_size_0 = + a.asm.Ca).apply(null, arguments) + }), + kc = (a._emscripten_bind_DracoUInt32Array___destroy___0 = function () { + return (kc = a._emscripten_bind_DracoUInt32Array___destroy___0 = + a.asm.Da).apply(null, arguments) + }), + bb = (a._emscripten_bind_MetadataQuerier_MetadataQuerier_0 = function () { + return (bb = a._emscripten_bind_MetadataQuerier_MetadataQuerier_0 = + a.asm.Ea).apply(null, arguments) + }), + lc = (a._emscripten_bind_MetadataQuerier_HasEntry_2 = function () { + return (lc = a._emscripten_bind_MetadataQuerier_HasEntry_2 = + a.asm.Fa).apply(null, arguments) + }), + mc = (a._emscripten_bind_MetadataQuerier_GetIntEntry_2 = function () { + return (mc = a._emscripten_bind_MetadataQuerier_GetIntEntry_2 = + a.asm.Ga).apply(null, arguments) + }), + nc = (a._emscripten_bind_MetadataQuerier_GetIntEntryArray_3 = + function () { + return (nc = a._emscripten_bind_MetadataQuerier_GetIntEntryArray_3 = + a.asm.Ha).apply(null, arguments) + }), + oc = (a._emscripten_bind_MetadataQuerier_GetDoubleEntry_2 = function () { + return (oc = a._emscripten_bind_MetadataQuerier_GetDoubleEntry_2 = + a.asm.Ia).apply(null, arguments) + }), + pc = (a._emscripten_bind_MetadataQuerier_GetStringEntry_2 = function () { + return (pc = a._emscripten_bind_MetadataQuerier_GetStringEntry_2 = + a.asm.Ja).apply(null, arguments) + }), + qc = (a._emscripten_bind_MetadataQuerier_NumEntries_1 = function () { + return (qc = a._emscripten_bind_MetadataQuerier_NumEntries_1 = + a.asm.Ka).apply(null, arguments) + }), + rc = (a._emscripten_bind_MetadataQuerier_GetEntryName_2 = function () { + return (rc = a._emscripten_bind_MetadataQuerier_GetEntryName_2 = + a.asm.La).apply(null, arguments) + }), + sc = (a._emscripten_bind_MetadataQuerier___destroy___0 = function () { + return (sc = a._emscripten_bind_MetadataQuerier___destroy___0 = + a.asm.Ma).apply(null, arguments) + }), + cb = (a._emscripten_bind_Decoder_Decoder_0 = function () { + return (cb = a._emscripten_bind_Decoder_Decoder_0 = a.asm.Na).apply( + null, + arguments, + ) + }), + tc = (a._emscripten_bind_Decoder_DecodeArrayToPointCloud_3 = function () { + return (tc = a._emscripten_bind_Decoder_DecodeArrayToPointCloud_3 = + a.asm.Oa).apply(null, arguments) + }), + uc = (a._emscripten_bind_Decoder_DecodeArrayToMesh_3 = function () { + return (uc = a._emscripten_bind_Decoder_DecodeArrayToMesh_3 = + a.asm.Pa).apply(null, arguments) + }), + vc = (a._emscripten_bind_Decoder_GetAttributeId_2 = function () { + return (vc = a._emscripten_bind_Decoder_GetAttributeId_2 = + a.asm.Qa).apply(null, arguments) + }), + wc = (a._emscripten_bind_Decoder_GetAttributeIdByName_2 = function () { + return (wc = a._emscripten_bind_Decoder_GetAttributeIdByName_2 = + a.asm.Ra).apply(null, arguments) + }), + xc = (a._emscripten_bind_Decoder_GetAttributeIdByMetadataEntry_3 = + function () { + return (xc = + a._emscripten_bind_Decoder_GetAttributeIdByMetadataEntry_3 = + a.asm.Sa).apply(null, arguments) + }), + yc = (a._emscripten_bind_Decoder_GetAttribute_2 = function () { + return (yc = a._emscripten_bind_Decoder_GetAttribute_2 = + a.asm.Ta).apply(null, arguments) + }), + zc = (a._emscripten_bind_Decoder_GetAttributeByUniqueId_2 = function () { + return (zc = a._emscripten_bind_Decoder_GetAttributeByUniqueId_2 = + a.asm.Ua).apply(null, arguments) + }), + Ac = (a._emscripten_bind_Decoder_GetMetadata_1 = function () { + return (Ac = a._emscripten_bind_Decoder_GetMetadata_1 = a.asm.Va).apply( + null, + arguments, + ) + }), + Bc = (a._emscripten_bind_Decoder_GetAttributeMetadata_2 = function () { + return (Bc = a._emscripten_bind_Decoder_GetAttributeMetadata_2 = + a.asm.Wa).apply(null, arguments) + }), + Cc = (a._emscripten_bind_Decoder_GetFaceFromMesh_3 = function () { + return (Cc = a._emscripten_bind_Decoder_GetFaceFromMesh_3 = + a.asm.Xa).apply(null, arguments) + }), + Dc = (a._emscripten_bind_Decoder_GetTriangleStripsFromMesh_2 = + function () { + return (Dc = a._emscripten_bind_Decoder_GetTriangleStripsFromMesh_2 = + a.asm.Ya).apply(null, arguments) + }), + Ec = (a._emscripten_bind_Decoder_GetTrianglesUInt16Array_3 = function () { + return (Ec = a._emscripten_bind_Decoder_GetTrianglesUInt16Array_3 = + a.asm.Za).apply(null, arguments) + }), + Fc = (a._emscripten_bind_Decoder_GetTrianglesUInt32Array_3 = function () { + return (Fc = a._emscripten_bind_Decoder_GetTrianglesUInt32Array_3 = + a.asm._a).apply(null, arguments) + }), + Gc = (a._emscripten_bind_Decoder_GetAttributeFloat_3 = function () { + return (Gc = a._emscripten_bind_Decoder_GetAttributeFloat_3 = + a.asm.$a).apply(null, arguments) + }), + Hc = (a._emscripten_bind_Decoder_GetAttributeFloatForAllPoints_3 = + function () { + return (Hc = + a._emscripten_bind_Decoder_GetAttributeFloatForAllPoints_3 = + a.asm.ab).apply(null, arguments) + }), + Ic = (a._emscripten_bind_Decoder_GetAttributeIntForAllPoints_3 = + function () { + return (Ic = + a._emscripten_bind_Decoder_GetAttributeIntForAllPoints_3 = + a.asm.bb).apply(null, arguments) + }), + Jc = (a._emscripten_bind_Decoder_GetAttributeInt8ForAllPoints_3 = + function () { + return (Jc = + a._emscripten_bind_Decoder_GetAttributeInt8ForAllPoints_3 = + a.asm.cb).apply(null, arguments) + }), + Kc = (a._emscripten_bind_Decoder_GetAttributeUInt8ForAllPoints_3 = + function () { + return (Kc = + a._emscripten_bind_Decoder_GetAttributeUInt8ForAllPoints_3 = + a.asm.db).apply(null, arguments) + }), + Lc = (a._emscripten_bind_Decoder_GetAttributeInt16ForAllPoints_3 = + function () { + return (Lc = + a._emscripten_bind_Decoder_GetAttributeInt16ForAllPoints_3 = + a.asm.eb).apply(null, arguments) + }), + Mc = (a._emscripten_bind_Decoder_GetAttributeUInt16ForAllPoints_3 = + function () { + return (Mc = + a._emscripten_bind_Decoder_GetAttributeUInt16ForAllPoints_3 = + a.asm.fb).apply(null, arguments) + }), + Nc = (a._emscripten_bind_Decoder_GetAttributeInt32ForAllPoints_3 = + function () { + return (Nc = + a._emscripten_bind_Decoder_GetAttributeInt32ForAllPoints_3 = + a.asm.gb).apply(null, arguments) + }), + Oc = (a._emscripten_bind_Decoder_GetAttributeUInt32ForAllPoints_3 = + function () { + return (Oc = + a._emscripten_bind_Decoder_GetAttributeUInt32ForAllPoints_3 = + a.asm.hb).apply(null, arguments) + }), + Pc = (a._emscripten_bind_Decoder_GetAttributeDataArrayForAllPoints_5 = + function () { + return (Pc = + a._emscripten_bind_Decoder_GetAttributeDataArrayForAllPoints_5 = + a.asm.ib).apply(null, arguments) + }), + Qc = (a._emscripten_bind_Decoder_SkipAttributeTransform_1 = function () { + return (Qc = a._emscripten_bind_Decoder_SkipAttributeTransform_1 = + a.asm.jb).apply(null, arguments) + }), + Rc = (a._emscripten_bind_Decoder_GetEncodedGeometryType_Deprecated_1 = + function () { + return (Rc = + a._emscripten_bind_Decoder_GetEncodedGeometryType_Deprecated_1 = + a.asm.kb).apply(null, arguments) + }), + Sc = (a._emscripten_bind_Decoder_DecodeBufferToPointCloud_2 = + function () { + return (Sc = a._emscripten_bind_Decoder_DecodeBufferToPointCloud_2 = + a.asm.lb).apply(null, arguments) + }), + Tc = (a._emscripten_bind_Decoder_DecodeBufferToMesh_2 = function () { + return (Tc = a._emscripten_bind_Decoder_DecodeBufferToMesh_2 = + a.asm.mb).apply(null, arguments) + }), + Uc = (a._emscripten_bind_Decoder___destroy___0 = function () { + return (Uc = a._emscripten_bind_Decoder___destroy___0 = a.asm.nb).apply( + null, + arguments, + ) + }), + Vc = + (a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_INVALID_TRANSFORM = + function () { + return (Vc = + a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_INVALID_TRANSFORM = + a.asm.ob).apply(null, arguments) + }), + Wc = + (a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_NO_TRANSFORM = + function () { + return (Wc = + a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_NO_TRANSFORM = + a.asm.pb).apply(null, arguments) + }), + Xc = + (a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_QUANTIZATION_TRANSFORM = + function () { + return (Xc = + a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_QUANTIZATION_TRANSFORM = + a.asm.qb).apply(null, arguments) + }), + Yc = + (a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_OCTAHEDRON_TRANSFORM = + function () { + return (Yc = + a._emscripten_enum_draco_AttributeTransformType_ATTRIBUTE_OCTAHEDRON_TRANSFORM = + a.asm.rb).apply(null, arguments) + }), + Zc = (a._emscripten_enum_draco_GeometryAttribute_Type_INVALID = + function () { + return (Zc = a._emscripten_enum_draco_GeometryAttribute_Type_INVALID = + a.asm.sb).apply(null, arguments) + }), + $c = (a._emscripten_enum_draco_GeometryAttribute_Type_POSITION = + function () { + return ($c = + a._emscripten_enum_draco_GeometryAttribute_Type_POSITION = + a.asm.tb).apply(null, arguments) + }), + ad = (a._emscripten_enum_draco_GeometryAttribute_Type_NORMAL = + function () { + return (ad = a._emscripten_enum_draco_GeometryAttribute_Type_NORMAL = + a.asm.ub).apply(null, arguments) + }), + bd = (a._emscripten_enum_draco_GeometryAttribute_Type_COLOR = + function () { + return (bd = a._emscripten_enum_draco_GeometryAttribute_Type_COLOR = + a.asm.vb).apply(null, arguments) + }), + cd = (a._emscripten_enum_draco_GeometryAttribute_Type_TEX_COORD = + function () { + return (cd = + a._emscripten_enum_draco_GeometryAttribute_Type_TEX_COORD = + a.asm.wb).apply(null, arguments) + }), + dd = (a._emscripten_enum_draco_GeometryAttribute_Type_GENERIC = + function () { + return (dd = a._emscripten_enum_draco_GeometryAttribute_Type_GENERIC = + a.asm.xb).apply(null, arguments) + }), + ed = (a._emscripten_enum_draco_EncodedGeometryType_INVALID_GEOMETRY_TYPE = + function () { + return (ed = + a._emscripten_enum_draco_EncodedGeometryType_INVALID_GEOMETRY_TYPE = + a.asm.yb).apply(null, arguments) + }), + fd = (a._emscripten_enum_draco_EncodedGeometryType_POINT_CLOUD = + function () { + return (fd = + a._emscripten_enum_draco_EncodedGeometryType_POINT_CLOUD = + a.asm.zb).apply(null, arguments) + }), + gd = (a._emscripten_enum_draco_EncodedGeometryType_TRIANGULAR_MESH = + function () { + return (gd = + a._emscripten_enum_draco_EncodedGeometryType_TRIANGULAR_MESH = + a.asm.Ab).apply(null, arguments) + }), + hd = (a._emscripten_enum_draco_DataType_DT_INVALID = function () { + return (hd = a._emscripten_enum_draco_DataType_DT_INVALID = + a.asm.Bb).apply(null, arguments) + }), + id = (a._emscripten_enum_draco_DataType_DT_INT8 = function () { + return (id = a._emscripten_enum_draco_DataType_DT_INT8 = + a.asm.Cb).apply(null, arguments) + }), + jd = (a._emscripten_enum_draco_DataType_DT_UINT8 = function () { + return (jd = a._emscripten_enum_draco_DataType_DT_UINT8 = + a.asm.Db).apply(null, arguments) + }), + kd = (a._emscripten_enum_draco_DataType_DT_INT16 = function () { + return (kd = a._emscripten_enum_draco_DataType_DT_INT16 = + a.asm.Eb).apply(null, arguments) + }), + ld = (a._emscripten_enum_draco_DataType_DT_UINT16 = function () { + return (ld = a._emscripten_enum_draco_DataType_DT_UINT16 = + a.asm.Fb).apply(null, arguments) + }), + md = (a._emscripten_enum_draco_DataType_DT_INT32 = function () { + return (md = a._emscripten_enum_draco_DataType_DT_INT32 = + a.asm.Gb).apply(null, arguments) + }), + nd = (a._emscripten_enum_draco_DataType_DT_UINT32 = function () { + return (nd = a._emscripten_enum_draco_DataType_DT_UINT32 = + a.asm.Hb).apply(null, arguments) + }), + od = (a._emscripten_enum_draco_DataType_DT_INT64 = function () { + return (od = a._emscripten_enum_draco_DataType_DT_INT64 = + a.asm.Ib).apply(null, arguments) + }), + pd = (a._emscripten_enum_draco_DataType_DT_UINT64 = function () { + return (pd = a._emscripten_enum_draco_DataType_DT_UINT64 = + a.asm.Jb).apply(null, arguments) + }), + qd = (a._emscripten_enum_draco_DataType_DT_FLOAT32 = function () { + return (qd = a._emscripten_enum_draco_DataType_DT_FLOAT32 = + a.asm.Kb).apply(null, arguments) + }), + rd = (a._emscripten_enum_draco_DataType_DT_FLOAT64 = function () { + return (rd = a._emscripten_enum_draco_DataType_DT_FLOAT64 = + a.asm.Lb).apply(null, arguments) + }), + sd = (a._emscripten_enum_draco_DataType_DT_BOOL = function () { + return (sd = a._emscripten_enum_draco_DataType_DT_BOOL = + a.asm.Mb).apply(null, arguments) + }), + td = (a._emscripten_enum_draco_DataType_DT_TYPES_COUNT = function () { + return (td = a._emscripten_enum_draco_DataType_DT_TYPES_COUNT = + a.asm.Nb).apply(null, arguments) + }), + ud = (a._emscripten_enum_draco_StatusCode_OK = function () { + return (ud = a._emscripten_enum_draco_StatusCode_OK = a.asm.Ob).apply( + null, + arguments, + ) + }), + vd = (a._emscripten_enum_draco_StatusCode_DRACO_ERROR = function () { + return (vd = a._emscripten_enum_draco_StatusCode_DRACO_ERROR = + a.asm.Pb).apply(null, arguments) + }), + wd = (a._emscripten_enum_draco_StatusCode_IO_ERROR = function () { + return (wd = a._emscripten_enum_draco_StatusCode_IO_ERROR = + a.asm.Qb).apply(null, arguments) + }), + xd = (a._emscripten_enum_draco_StatusCode_INVALID_PARAMETER = + function () { + return (xd = a._emscripten_enum_draco_StatusCode_INVALID_PARAMETER = + a.asm.Rb).apply(null, arguments) + }), + yd = (a._emscripten_enum_draco_StatusCode_UNSUPPORTED_VERSION = + function () { + return (yd = a._emscripten_enum_draco_StatusCode_UNSUPPORTED_VERSION = + a.asm.Sb).apply(null, arguments) + }), + zd = (a._emscripten_enum_draco_StatusCode_UNKNOWN_VERSION = function () { + return (zd = a._emscripten_enum_draco_StatusCode_UNKNOWN_VERSION = + a.asm.Tb).apply(null, arguments) + }) + a._free = function () { + return (a._free = a.asm.Ub).apply(null, arguments) + } + var gb = (a._malloc = function () { + return (gb = a._malloc = a.asm.Vb).apply(null, arguments) + }) + a.callRuntimeCallbacks = aa + var qa + ma = function b() { + qa || W() + qa || (ma = b) + } + a.run = W + if (a.preInit) + for ( + 'function' == typeof a.preInit && (a.preInit = [a.preInit]); + 0 < a.preInit.length; + + ) + a.preInit.pop()() + W() + v.prototype = Object.create(v.prototype) + v.prototype.constructor = v + v.prototype.__class__ = v + v.__cache__ = {} + a.WrapperObject = v + a.getCache = x + a.wrapPointer = S + a.castObject = function (b, c) { + return S(b.ptr, c) + } + a.NULL = S(0) + a.destroy = function (b) { + if (!b.__destroy__) + throw 'Error: Cannot destroy object. (Did you create it yourself?)' + b.__destroy__() + delete x(b.__class__)[b.ptr] + } + a.compare = function (b, c) { + return b.ptr === c.ptr + } + a.getPointer = function (b) { + return b.ptr + } + a.getClass = function (b) { + return b.__class__ + } + var r = { + buffer: 0, + size: 0, + pos: 0, + temps: [], + needed: 0, + prepare: function () { + if (r.needed) { + for (var b = 0; b < r.temps.length; b++) a._free(r.temps[b]) + r.temps.length = 0 + a._free(r.buffer) + r.buffer = 0 + r.size += r.needed + r.needed = 0 + } + r.buffer || + ((r.size += 128), (r.buffer = a._malloc(r.size)), q(r.buffer)) + r.pos = 0 + }, + alloc: function (b, c) { + q(r.buffer) + b = b.length * c.BYTES_PER_ELEMENT + b = (b + 7) & -8 + r.pos + b >= r.size + ? (q(0 < b), (r.needed += b), (c = a._malloc(b)), r.temps.push(c)) + : ((c = r.buffer + r.pos), (r.pos += b)) + return c + }, + copy: function (b, c, d) { + d >>>= 0 + switch (c.BYTES_PER_ELEMENT) { + case 2: + d >>>= 1 + break + case 4: + d >>>= 2 + break + case 8: + d >>>= 3 + } + for (var f = 0; f < b.length; f++) c[d + f] = b[f] + }, + } + ca.prototype = Object.create(v.prototype) + ca.prototype.constructor = ca + ca.prototype.__class__ = ca + ca.__cache__ = {} + a.VoidPtr = ca + ca.prototype.__destroy__ = ca.prototype.__destroy__ = function () { + hb(this.ptr) + } + T.prototype = Object.create(v.prototype) + T.prototype.constructor = T + T.prototype.__class__ = T + T.__cache__ = {} + a.DecoderBuffer = T + T.prototype.Init = T.prototype.Init = function (b, c) { + var d = this.ptr + r.prepare() + 'object' == typeof b && (b = va(b)) + c && 'object' === typeof c && (c = c.ptr) + ib(d, b, c) + } + T.prototype.__destroy__ = T.prototype.__destroy__ = function () { + jb(this.ptr) + } + R.prototype = Object.create(v.prototype) + R.prototype.constructor = R + R.prototype.__class__ = R + R.__cache__ = {} + a.AttributeTransformData = R + R.prototype.transform_type = R.prototype.transform_type = function () { + return kb(this.ptr) + } + R.prototype.__destroy__ = R.prototype.__destroy__ = function () { + lb(this.ptr) + } + Z.prototype = Object.create(v.prototype) + Z.prototype.constructor = Z + Z.prototype.__class__ = Z + Z.__cache__ = {} + a.GeometryAttribute = Z + Z.prototype.__destroy__ = Z.prototype.__destroy__ = function () { + mb(this.ptr) + } + w.prototype = Object.create(v.prototype) + w.prototype.constructor = w + w.prototype.__class__ = w + w.__cache__ = {} + a.PointAttribute = w + w.prototype.size = w.prototype.size = function () { + return nb(this.ptr) + } + w.prototype.GetAttributeTransformData = + w.prototype.GetAttributeTransformData = function () { + return S(ob(this.ptr), R) + } + w.prototype.attribute_type = w.prototype.attribute_type = function () { + return pb(this.ptr) + } + w.prototype.data_type = w.prototype.data_type = function () { + return qb(this.ptr) + } + w.prototype.num_components = w.prototype.num_components = function () { + return rb(this.ptr) + } + w.prototype.normalized = w.prototype.normalized = function () { + return !!sb(this.ptr) + } + w.prototype.byte_stride = w.prototype.byte_stride = function () { + return tb(this.ptr) + } + w.prototype.byte_offset = w.prototype.byte_offset = function () { + return ub(this.ptr) + } + w.prototype.unique_id = w.prototype.unique_id = function () { + return vb(this.ptr) + } + w.prototype.__destroy__ = w.prototype.__destroy__ = function () { + wb(this.ptr) + } + C.prototype = Object.create(v.prototype) + C.prototype.constructor = C + C.prototype.__class__ = C + C.__cache__ = {} + a.AttributeQuantizationTransform = C + C.prototype.InitFromAttribute = C.prototype.InitFromAttribute = function ( + b, + ) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return !!xb(c, b) + } + C.prototype.quantization_bits = C.prototype.quantization_bits = + function () { + return yb(this.ptr) + } + C.prototype.min_value = C.prototype.min_value = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return zb(c, b) + } + C.prototype.range = C.prototype.range = function () { + return Ab(this.ptr) + } + C.prototype.__destroy__ = C.prototype.__destroy__ = function () { + Bb(this.ptr) + } + G.prototype = Object.create(v.prototype) + G.prototype.constructor = G + G.prototype.__class__ = G + G.__cache__ = {} + a.AttributeOctahedronTransform = G + G.prototype.InitFromAttribute = G.prototype.InitFromAttribute = function ( + b, + ) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return !!Cb(c, b) + } + G.prototype.quantization_bits = G.prototype.quantization_bits = + function () { + return Db(this.ptr) + } + G.prototype.__destroy__ = G.prototype.__destroy__ = function () { + Eb(this.ptr) + } + H.prototype = Object.create(v.prototype) + H.prototype.constructor = H + H.prototype.__class__ = H + H.__cache__ = {} + a.PointCloud = H + H.prototype.num_attributes = H.prototype.num_attributes = function () { + return Fb(this.ptr) + } + H.prototype.num_points = H.prototype.num_points = function () { + return Gb(this.ptr) + } + H.prototype.__destroy__ = H.prototype.__destroy__ = function () { + Hb(this.ptr) + } + E.prototype = Object.create(v.prototype) + E.prototype.constructor = E + E.prototype.__class__ = E + E.__cache__ = {} + a.Mesh = E + E.prototype.num_faces = E.prototype.num_faces = function () { + return Ib(this.ptr) + } + E.prototype.num_attributes = E.prototype.num_attributes = function () { + return Jb(this.ptr) + } + E.prototype.num_points = E.prototype.num_points = function () { + return Kb(this.ptr) + } + E.prototype.__destroy__ = E.prototype.__destroy__ = function () { + Lb(this.ptr) + } + U.prototype = Object.create(v.prototype) + U.prototype.constructor = U + U.prototype.__class__ = U + U.__cache__ = {} + a.Metadata = U + U.prototype.__destroy__ = U.prototype.__destroy__ = function () { + Mb(this.ptr) + } + B.prototype = Object.create(v.prototype) + B.prototype.constructor = B + B.prototype.__class__ = B + B.__cache__ = {} + a.Status = B + B.prototype.code = B.prototype.code = function () { + return Nb(this.ptr) + } + B.prototype.ok = B.prototype.ok = function () { + return !!Ob(this.ptr) + } + B.prototype.error_msg = B.prototype.error_msg = function () { + return A(Pb(this.ptr)) + } + B.prototype.__destroy__ = B.prototype.__destroy__ = function () { + Qb(this.ptr) + } + I.prototype = Object.create(v.prototype) + I.prototype.constructor = I + I.prototype.__class__ = I + I.__cache__ = {} + a.DracoFloat32Array = I + I.prototype.GetValue = I.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return Rb(c, b) + } + I.prototype.size = I.prototype.size = function () { + return Sb(this.ptr) + } + I.prototype.__destroy__ = I.prototype.__destroy__ = function () { + Tb(this.ptr) + } + J.prototype = Object.create(v.prototype) + J.prototype.constructor = J + J.prototype.__class__ = J + J.__cache__ = {} + a.DracoInt8Array = J + J.prototype.GetValue = J.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return Ub(c, b) + } + J.prototype.size = J.prototype.size = function () { + return Vb(this.ptr) + } + J.prototype.__destroy__ = J.prototype.__destroy__ = function () { + Wb(this.ptr) + } + K.prototype = Object.create(v.prototype) + K.prototype.constructor = K + K.prototype.__class__ = K + K.__cache__ = {} + a.DracoUInt8Array = K + K.prototype.GetValue = K.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return Xb(c, b) + } + K.prototype.size = K.prototype.size = function () { + return Yb(this.ptr) + } + K.prototype.__destroy__ = K.prototype.__destroy__ = function () { + Zb(this.ptr) + } + L.prototype = Object.create(v.prototype) + L.prototype.constructor = L + L.prototype.__class__ = L + L.__cache__ = {} + a.DracoInt16Array = L + L.prototype.GetValue = L.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return $b(c, b) + } + L.prototype.size = L.prototype.size = function () { + return ac(this.ptr) + } + L.prototype.__destroy__ = L.prototype.__destroy__ = function () { + bc(this.ptr) + } + M.prototype = Object.create(v.prototype) + M.prototype.constructor = M + M.prototype.__class__ = M + M.__cache__ = {} + a.DracoUInt16Array = M + M.prototype.GetValue = M.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return cc(c, b) + } + M.prototype.size = M.prototype.size = function () { + return dc(this.ptr) + } + M.prototype.__destroy__ = M.prototype.__destroy__ = function () { + ec(this.ptr) + } + N.prototype = Object.create(v.prototype) + N.prototype.constructor = N + N.prototype.__class__ = N + N.__cache__ = {} + a.DracoInt32Array = N + N.prototype.GetValue = N.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return fc(c, b) + } + N.prototype.size = N.prototype.size = function () { + return gc(this.ptr) + } + N.prototype.__destroy__ = N.prototype.__destroy__ = function () { + hc(this.ptr) + } + O.prototype = Object.create(v.prototype) + O.prototype.constructor = O + O.prototype.__class__ = O + O.__cache__ = {} + a.DracoUInt32Array = O + O.prototype.GetValue = O.prototype.GetValue = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return ic(c, b) + } + O.prototype.size = O.prototype.size = function () { + return jc(this.ptr) + } + O.prototype.__destroy__ = O.prototype.__destroy__ = function () { + kc(this.ptr) + } + y.prototype = Object.create(v.prototype) + y.prototype.constructor = y + y.prototype.__class__ = y + y.__cache__ = {} + a.MetadataQuerier = y + y.prototype.HasEntry = y.prototype.HasEntry = function (b, c) { + var d = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + return !!lc(d, b, c) + } + y.prototype.GetIntEntry = y.prototype.GetIntEntry = function (b, c) { + var d = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + return mc(d, b, c) + } + y.prototype.GetIntEntryArray = y.prototype.GetIntEntryArray = function ( + b, + c, + d, + ) { + var f = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + d && 'object' === typeof d && (d = d.ptr) + nc(f, b, c, d) + } + y.prototype.GetDoubleEntry = y.prototype.GetDoubleEntry = function (b, c) { + var d = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + return oc(d, b, c) + } + y.prototype.GetStringEntry = y.prototype.GetStringEntry = function (b, c) { + var d = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + return A(pc(d, b, c)) + } + y.prototype.NumEntries = y.prototype.NumEntries = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return qc(c, b) + } + y.prototype.GetEntryName = y.prototype.GetEntryName = function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return A(rc(d, b, c)) + } + y.prototype.__destroy__ = y.prototype.__destroy__ = function () { + sc(this.ptr) + } + h.prototype = Object.create(v.prototype) + h.prototype.constructor = h + h.prototype.__class__ = h + h.__cache__ = {} + a.Decoder = h + h.prototype.DecodeArrayToPointCloud = h.prototype.DecodeArrayToPointCloud = + function (b, c, d) { + var f = this.ptr + r.prepare() + 'object' == typeof b && (b = va(b)) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return S(tc(f, b, c, d), B) + } + h.prototype.DecodeArrayToMesh = h.prototype.DecodeArrayToMesh = function ( + b, + c, + d, + ) { + var f = this.ptr + r.prepare() + 'object' == typeof b && (b = va(b)) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return S(uc(f, b, c, d), B) + } + h.prototype.GetAttributeId = h.prototype.GetAttributeId = function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return vc(d, b, c) + } + h.prototype.GetAttributeIdByName = h.prototype.GetAttributeIdByName = + function (b, c) { + var d = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + return wc(d, b, c) + } + h.prototype.GetAttributeIdByMetadataEntry = + h.prototype.GetAttributeIdByMetadataEntry = function (b, c, d) { + var f = this.ptr + r.prepare() + b && 'object' === typeof b && (b = b.ptr) + c = c && 'object' === typeof c ? c.ptr : da(c) + d = d && 'object' === typeof d ? d.ptr : da(d) + return xc(f, b, c, d) + } + h.prototype.GetAttribute = h.prototype.GetAttribute = function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return S(yc(d, b, c), w) + } + h.prototype.GetAttributeByUniqueId = h.prototype.GetAttributeByUniqueId = + function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return S(zc(d, b, c), w) + } + h.prototype.GetMetadata = h.prototype.GetMetadata = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return S(Ac(c, b), U) + } + h.prototype.GetAttributeMetadata = h.prototype.GetAttributeMetadata = + function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return S(Bc(d, b, c), U) + } + h.prototype.GetFaceFromMesh = h.prototype.GetFaceFromMesh = function ( + b, + c, + d, + ) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Cc(f, b, c, d) + } + h.prototype.GetTriangleStripsFromMesh = + h.prototype.GetTriangleStripsFromMesh = function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return Dc(d, b, c) + } + h.prototype.GetTrianglesUInt16Array = h.prototype.GetTrianglesUInt16Array = + function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Ec(f, b, c, d) + } + h.prototype.GetTrianglesUInt32Array = h.prototype.GetTrianglesUInt32Array = + function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Fc(f, b, c, d) + } + h.prototype.GetAttributeFloat = h.prototype.GetAttributeFloat = function ( + b, + c, + d, + ) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Gc(f, b, c, d) + } + h.prototype.GetAttributeFloatForAllPoints = + h.prototype.GetAttributeFloatForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Hc(f, b, c, d) + } + h.prototype.GetAttributeIntForAllPoints = + h.prototype.GetAttributeIntForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Ic(f, b, c, d) + } + h.prototype.GetAttributeInt8ForAllPoints = + h.prototype.GetAttributeInt8ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Jc(f, b, c, d) + } + h.prototype.GetAttributeUInt8ForAllPoints = + h.prototype.GetAttributeUInt8ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Kc(f, b, c, d) + } + h.prototype.GetAttributeInt16ForAllPoints = + h.prototype.GetAttributeInt16ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Lc(f, b, c, d) + } + h.prototype.GetAttributeUInt16ForAllPoints = + h.prototype.GetAttributeUInt16ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Mc(f, b, c, d) + } + h.prototype.GetAttributeInt32ForAllPoints = + h.prototype.GetAttributeInt32ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Nc(f, b, c, d) + } + h.prototype.GetAttributeUInt32ForAllPoints = + h.prototype.GetAttributeUInt32ForAllPoints = function (b, c, d) { + var f = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + return !!Oc(f, b, c, d) + } + h.prototype.GetAttributeDataArrayForAllPoints = + h.prototype.GetAttributeDataArrayForAllPoints = function (b, c, d, f, t) { + var Y = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + d && 'object' === typeof d && (d = d.ptr) + f && 'object' === typeof f && (f = f.ptr) + t && 'object' === typeof t && (t = t.ptr) + return !!Pc(Y, b, c, d, f, t) + } + h.prototype.SkipAttributeTransform = h.prototype.SkipAttributeTransform = + function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + Qc(c, b) + } + h.prototype.GetEncodedGeometryType_Deprecated = + h.prototype.GetEncodedGeometryType_Deprecated = function (b) { + var c = this.ptr + b && 'object' === typeof b && (b = b.ptr) + return Rc(c, b) + } + h.prototype.DecodeBufferToPointCloud = + h.prototype.DecodeBufferToPointCloud = function (b, c) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return S(Sc(d, b, c), B) + } + h.prototype.DecodeBufferToMesh = h.prototype.DecodeBufferToMesh = function ( + b, + c, + ) { + var d = this.ptr + b && 'object' === typeof b && (b = b.ptr) + c && 'object' === typeof c && (c = c.ptr) + return S(Tc(d, b, c), B) + } + h.prototype.__destroy__ = h.prototype.__destroy__ = function () { + Uc(this.ptr) + } + ;(function () { + function b() { + a.ATTRIBUTE_INVALID_TRANSFORM = Vc() + a.ATTRIBUTE_NO_TRANSFORM = Wc() + a.ATTRIBUTE_QUANTIZATION_TRANSFORM = Xc() + a.ATTRIBUTE_OCTAHEDRON_TRANSFORM = Yc() + a.INVALID = Zc() + a.POSITION = $c() + a.NORMAL = ad() + a.COLOR = bd() + a.TEX_COORD = cd() + a.GENERIC = dd() + a.INVALID_GEOMETRY_TYPE = ed() + a.POINT_CLOUD = fd() + a.TRIANGULAR_MESH = gd() + a.DT_INVALID = hd() + a.DT_INT8 = id() + a.DT_UINT8 = jd() + a.DT_INT16 = kd() + a.DT_UINT16 = ld() + a.DT_INT32 = md() + a.DT_UINT32 = nd() + a.DT_INT64 = od() + a.DT_UINT64 = pd() + a.DT_FLOAT32 = qd() + a.DT_FLOAT64 = rd() + a.DT_BOOL = sd() + a.DT_TYPES_COUNT = td() + a.OK = ud() + a.DRACO_ERROR = vd() + a.IO_ERROR = wd() + a.INVALID_PARAMETER = xd() + a.UNSUPPORTED_VERSION = yd() + a.UNKNOWN_VERSION = zd() + } + Ha ? b() : ua.unshift(b) + })() + a.mainCallbacks = ua + if ('function' === typeof a.onModuleParsed) a.onModuleParsed() + a.Decoder.prototype.GetEncodedGeometryType = function (b) { + if (b.__class__ && b.__class__ === a.DecoderBuffer) + return a.Decoder.prototype.GetEncodedGeometryType_Deprecated(b) + if (8 > b.byteLength) return a.INVALID_GEOMETRY_TYPE + switch (b[7]) { + case 0: + return a.POINT_CLOUD + case 1: + return a.TRIANGULAR_MESH + default: + return a.INVALID_GEOMETRY_TYPE + } + } + return p.ready + } +})() +'object' === typeof exports && 'object' === typeof module + ? (module.exports = DracoDecoderModule) + : 'function' === typeof define && define.amd + ? define([], function () { + return DracoDecoderModule + }) + : 'object' === typeof exports && + (exports.DracoDecoderModule = DracoDecoderModule) diff --git a/search-index.json b/search-index.json new file mode 100644 index 00000000..65a5b9b0 --- /dev/null +++ b/search-index.json @@ -0,0 +1 @@ +[{"documents":[{"i":1,"t":"About Vac","u":"/","b":["About Vac"]},{"i":3,"t":"Current job openings","u":"/join-us","b":["Join Us"]},{"i":5,"t":"","u":"/media","b":[]},{"i":7,"t":"Join the community","u":"/community","b":["Community"]},{"i":9,"t":"","u":"/principles","b":[]},{"i":33,"t":"Privacy Policy","u":"/privacy-policy","b":[]},{"i":53,"t":"Vac RFC Process","u":"/rfcprocess","b":[]},{"i":55,"t":"Vac Deep Research","u":"/deepresearch","b":[]},{"i":63,"t":"Contribute","u":"/contribute","b":[]},{"i":68,"t":"Security","u":"/security","b":[]},{"i":70,"t":"Vac R&D Service Units","u":"/vsus","b":[]},{"i":88,"t":"Publications","u":"/publications","b":[]},{"i":93,"t":"Vac Incubator Projects","u":"/vips","b":[]},{"i":97,"t":"Terms of Use","u":"/terms","b":[]}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/1",[0,1.237]],["t/3",[1,1.839,2,1.839,3,1.839]],["t/5",[]],["t/7",[4,2.232,5,2.232]],["t/9",[]],["t/33",[6,2.232,7,2.232]],["t/53",[0,0.802,8,1.839,9,1.839]],["t/55",[0,0.802,10,1.839,11,1.839]],["t/63",[12,2.839]],["t/68",[13,2.839]],["t/70",[0,0.682,14,1.564,15,1.564,16,1.564]],["t/88",[17,2.839]],["t/93",[0,0.802,18,1.839,19,1.839]],["t/97",[20,2.232,21,2.232]]],"invertedIndex":[["commun",{"_index":5,"t":{"7":{"position":[[9,9]]}}}],["contribut",{"_index":12,"t":{"63":{"position":[[0,10]]}}}],["current",{"_index":1,"t":{"3":{"position":[[0,7]]}}}],["deep",{"_index":10,"t":{"55":{"position":[[4,4]]}}}],["incub",{"_index":18,"t":{"93":{"position":[[4,9]]}}}],["job",{"_index":2,"t":{"3":{"position":[[8,3]]}}}],["join",{"_index":4,"t":{"7":{"position":[[0,4]]}}}],["open",{"_index":3,"t":{"3":{"position":[[12,8]]}}}],["polici",{"_index":7,"t":{"33":{"position":[[8,6]]}}}],["privaci",{"_index":6,"t":{"33":{"position":[[0,7]]}}}],["process",{"_index":9,"t":{"53":{"position":[[8,7]]}}}],["project",{"_index":19,"t":{"93":{"position":[[14,8]]}}}],["public",{"_index":17,"t":{"88":{"position":[[0,12]]}}}],["r&d",{"_index":14,"t":{"70":{"position":[[4,3]]}}}],["research",{"_index":11,"t":{"55":{"position":[[9,8]]}}}],["rfc",{"_index":8,"t":{"53":{"position":[[4,3]]}}}],["secur",{"_index":13,"t":{"68":{"position":[[0,8]]}}}],["servic",{"_index":15,"t":{"70":{"position":[[8,7]]}}}],["term",{"_index":20,"t":{"97":{"position":[[0,5]]}}}],["unit",{"_index":16,"t":{"70":{"position":[[16,5]]}}}],["us",{"_index":21,"t":{"97":{"position":[[9,3]]}}}],["vac",{"_index":0,"t":{"1":{"position":[[6,3]]},"53":{"position":[[0,3]]},"55":{"position":[[0,3]]},"70":{"position":[[0,3]]},"93":{"position":[[0,3]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":11,"t":"Principles","u":"/principles","h":"","p":9},{"i":13,"t":"I. Liberty","u":"/principles","h":"#i-liberty","p":9},{"i":15,"t":"II. Censorship resistance","u":"/principles","h":"#ii-censorship-resistance","p":9},{"i":17,"t":"III. Security","u":"/principles","h":"#iii-security","p":9},{"i":19,"t":"IV. Privacy","u":"/principles","h":"#iv-privacy","p":9},{"i":21,"t":"V. Transparency","u":"/principles","h":"#v-transparency","p":9},{"i":23,"t":"VI. Openness","u":"/principles","h":"#vi-openness","p":9},{"i":25,"t":"VII. Decentralisation","u":"/principles","h":"#vii-decentralisation","p":9},{"i":27,"t":"VIII. Inclusivity","u":"/principles","h":"#viii-inclusivity","p":9},{"i":29,"t":"IX. Continuance","u":"/principles","h":"#ix-continuance","p":9},{"i":31,"t":"X. Resourcefulness","u":"/principles","h":"#x-resourcefulness","p":9},{"i":35,"t":"1) Who we are","u":"/privacy-policy","h":"#1-who-we-are","p":33},{"i":37,"t":"2) We limit the collection and processing of personal data from your use of the Website","u":"/privacy-policy","h":"#2-we-limit-the-collection-and-processing-of-personal-data-from-your-use-of-the-website","p":33},{"i":39,"t":"3) Third party processing of personal data","u":"/privacy-policy","h":"#3-third-party-processing-of-personal-data","p":33},{"i":41,"t":"4) Security measures we take in respect of the Website","u":"/privacy-policy","h":"#4-security-measures-we-take-in-respect-of-the-website","p":33},{"i":43,"t":"5) Exporting data outside the European Union and Switzerland","u":"/privacy-policy","h":"#5-exporting-data-outside-the-european-union-and-switzerland","p":33},{"i":45,"t":"6) Your choices and rights","u":"/privacy-policy","h":"#6-your-choices-and-rights","p":33},{"i":47,"t":"7) Third party links","u":"/privacy-policy","h":"#7-third-party-links","p":33},{"i":49,"t":"8) This Privacy Policy might change","u":"/privacy-policy","h":"#8-this-privacy-policy-might-change","p":33},{"i":51,"t":"9) Contact information","u":"/privacy-policy","h":"#9-contact-information","p":33},{"i":57,"t":"Zero-Knowledge Proofs","u":"/deepresearch","h":"#zero-knowledge-proofs","p":55},{"i":59,"t":"Libp2p Gossipsub Improvements","u":"/deepresearch","h":"#libp2p-gossipsub-improvements","p":55},{"i":61,"t":"Anonymisation Networks","u":"/deepresearch","h":"#anonymisation-networks","p":55},{"i":64,"t":"How to Contribute","u":"/contribute","h":"#how-to-contribute","p":63},{"i":66,"t":"What to Contribute","u":"/contribute","h":"#what-to-contribute","p":63},{"i":72,"t":"P2P","u":"/vsus","h":"#p2p","p":70},{"i":74,"t":"Token Economics (TKE)","u":"/vsus","h":"#token-economics-tke","p":70},{"i":76,"t":"Distributed Systems Testing (DST)","u":"/vsus","h":"#distributed-systems-testing-dst","p":70},{"i":78,"t":"Quality Assurance (QA)","u":"/vsus","h":"#quality-assurance-qa","p":70},{"i":80,"t":"Smart Contracts (SC)","u":"/vsus","h":"#smart-contracts-sc","p":70},{"i":82,"t":"Nim","u":"/vsus","h":"#nim","p":70},{"i":84,"t":"Applied Cryptography & ZK (ACZ)","u":"/vsus","h":"#applied-cryptography--zk-acz","p":70},{"i":86,"t":"RFC","u":"/vsus","h":"#rfc","p":70},{"i":89,"t":"Papers","u":"/publications","h":"#papers","p":88},{"i":91,"t":"Write-ups","u":"/publications","h":"#write-ups","p":88},{"i":95,"t":"Nescience","u":"/vips","h":"#nescience","p":93},{"i":99,"t":"1) Who we are","u":"/terms","h":"#1-who-we-are","p":97},{"i":101,"t":"2) Disclaimers","u":"/terms","h":"#2-disclaimers","p":97},{"i":103,"t":"3) Forward looking statements","u":"/terms","h":"#3-forward-looking-statements","p":97},{"i":105,"t":"4) Intellectual property rights","u":"/terms","h":"#4-intellectual-property-rights","p":97},{"i":107,"t":"5) Third-party website links","u":"/terms","h":"#5-third-party-website-links","p":97},{"i":109,"t":"6) Limitation of liability","u":"/terms","h":"#6-limitation-of-liability","p":97},{"i":111,"t":"7) Indemnity","u":"/terms","h":"#7-indemnity","p":97},{"i":113,"t":"8) Modifications","u":"/terms","h":"#8-modifications","p":97},{"i":115,"t":"9) Governing law","u":"/terms","h":"#9-governing-law","p":97},{"i":117,"t":"10) Disputes","u":"/terms","h":"#10-disputes","p":97},{"i":119,"t":"11) About these Website Terms of Use","u":"/terms","h":"#11-about-these-website-terms-of-use","p":97}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/11",[0,4.691]],["t/13",[1,4.691]],["t/15",[2,3.35,3,3.35,4,3.35]],["t/17",[5,3.909,6,3.332]],["t/19",[7,3.909,8,3.332]],["t/21",[9,3.909,10,3.909]],["t/23",[11,3.909,12,3.909]],["t/25",[13,3.909,14,3.909]],["t/27",[15,3.909,16,3.909]],["t/29",[17,3.909,18,3.909]],["t/31",[19,3.909,20,3.909]],["t/35",[21,4]],["t/37",[22,1.666,23,1.666,24,1.953,25,1.666,26,1.666,27,1.476,28,1.666,29,1.334]],["t/39",[25,1.999,26,1.999,27,1.771,30,1.999,31,1.771,32,1.771]],["t/41",[6,1.999,29,1.601,33,1.999,34,2.344,35,2.344,36,2.344]],["t/43",[27,1.61,37,1.817,38,2.131,39,2.131,40,2.131,41,2.131,42,2.131]],["t/45",[43,2.856,44,3.35,45,2.856]],["t/47",[31,2.214,32,2.214,46,2.499,47,2.499]],["t/49",[8,2.499,48,2.499,49,2.931,50,2.931]],["t/51",[51,2.856,52,3.35,53,3.35]],["t/57",[54,3.35,55,3.35,56,3.35]],["t/59",[57,3.35,58,3.35,59,3.35]],["t/61",[60,3.909,61,3.909]],["t/64",[62,4]],["t/66",[62,4]],["t/72",[63,4.691]],["t/74",[64,3.35,65,3.35,66,3.35]],["t/76",[67,2.931,68,2.931,69,2.931,70,2.931]],["t/78",[71,3.35,72,3.35,73,3.35]],["t/80",[74,3.35,75,3.35,76,3.35]],["t/82",[77,4.691]],["t/84",[78,2.605,79,2.605,80,2.605,81,2.605,82,2.605]],["t/86",[83,4.691]],["t/89",[84,4.691]],["t/91",[85,3.909,86,3.909]],["t/95",[87,4.691]],["t/99",[21,4]],["t/101",[22,3.332,88,3.909]],["t/103",[30,2.499,89,2.931,90,2.931,91,2.931]],["t/105",[33,2.499,45,2.499,92,2.931,93,2.931]],["t/107",[29,1.779,31,1.968,32,1.968,37,2.221,47,2.221]],["t/109",[23,2.856,43,2.856,94,3.35]],["t/111",[46,3.332,95,3.909]],["t/113",[48,3.332,96,3.909]],["t/115",[51,2.856,97,3.35,98,3.35]],["t/117",[99,3.909,100,3.909]],["t/119",[28,2.499,29,2.002,101,2.931,102,2.931]]],"invertedIndex":[["",{"_index":80,"t":{"84":{"position":[[21,1]]}}}],["1",{"_index":21,"t":{"35":{"position":[[0,2]]},"99":{"position":[[0,2]]}}}],["10",{"_index":99,"t":{"117":{"position":[[0,3]]}}}],["11",{"_index":101,"t":{"119":{"position":[[0,3]]}}}],["2",{"_index":22,"t":{"37":{"position":[[0,2]]},"101":{"position":[[0,2]]}}}],["3",{"_index":30,"t":{"39":{"position":[[0,2]]},"103":{"position":[[0,2]]}}}],["4",{"_index":33,"t":{"41":{"position":[[0,2]]},"105":{"position":[[0,2]]}}}],["5",{"_index":37,"t":{"43":{"position":[[0,2]]},"107":{"position":[[0,2]]}}}],["6",{"_index":43,"t":{"45":{"position":[[0,2]]},"109":{"position":[[0,2]]}}}],["7",{"_index":46,"t":{"47":{"position":[[0,2]]},"111":{"position":[[0,2]]}}}],["8",{"_index":48,"t":{"49":{"position":[[0,2]]},"113":{"position":[[0,2]]}}}],["9",{"_index":51,"t":{"51":{"position":[[0,2]]},"115":{"position":[[0,2]]}}}],["acz",{"_index":82,"t":{"84":{"position":[[26,5]]}}}],["anonymis",{"_index":60,"t":{"61":{"position":[[0,13]]}}}],["appli",{"_index":78,"t":{"84":{"position":[[0,7]]}}}],["assur",{"_index":72,"t":{"78":{"position":[[8,9]]}}}],["censorship",{"_index":3,"t":{"15":{"position":[[4,10]]}}}],["chang",{"_index":50,"t":{"49":{"position":[[29,6]]}}}],["choic",{"_index":44,"t":{"45":{"position":[[8,7]]}}}],["collect",{"_index":24,"t":{"37":{"position":[[16,10]]}}}],["contact",{"_index":52,"t":{"51":{"position":[[3,7]]}}}],["continu",{"_index":18,"t":{"29":{"position":[[4,11]]}}}],["contract",{"_index":75,"t":{"80":{"position":[[6,9]]}}}],["contribut",{"_index":62,"t":{"64":{"position":[[7,10]]},"66":{"position":[[8,10]]}}}],["cryptographi",{"_index":79,"t":{"84":{"position":[[8,12]]}}}],["data",{"_index":27,"t":{"37":{"position":[[54,4]]},"39":{"position":[[38,4]]},"43":{"position":[[13,4]]}}}],["decentralis",{"_index":14,"t":{"25":{"position":[[5,16]]}}}],["disclaim",{"_index":88,"t":{"101":{"position":[[3,11]]}}}],["disput",{"_index":100,"t":{"117":{"position":[[4,8]]}}}],["distribut",{"_index":67,"t":{"76":{"position":[[0,11]]}}}],["dst",{"_index":70,"t":{"76":{"position":[[28,5]]}}}],["econom",{"_index":65,"t":{"74":{"position":[[6,9]]}}}],["european",{"_index":40,"t":{"43":{"position":[[30,8]]}}}],["export",{"_index":38,"t":{"43":{"position":[[3,9]]}}}],["forward",{"_index":89,"t":{"103":{"position":[[3,7]]}}}],["gossipsub",{"_index":58,"t":{"59":{"position":[[7,9]]}}}],["govern",{"_index":97,"t":{"115":{"position":[[3,9]]}}}],["ii",{"_index":2,"t":{"15":{"position":[[0,3]]}}}],["iii",{"_index":5,"t":{"17":{"position":[[0,4]]}}}],["improv",{"_index":59,"t":{"59":{"position":[[17,12]]}}}],["inclus",{"_index":16,"t":{"27":{"position":[[6,11]]}}}],["indemn",{"_index":95,"t":{"111":{"position":[[3,9]]}}}],["inform",{"_index":53,"t":{"51":{"position":[[11,11]]}}}],["intellectu",{"_index":92,"t":{"105":{"position":[[3,12]]}}}],["iv",{"_index":7,"t":{"19":{"position":[[0,3]]}}}],["ix",{"_index":17,"t":{"29":{"position":[[0,3]]}}}],["knowledg",{"_index":55,"t":{"57":{"position":[[5,9]]}}}],["law",{"_index":98,"t":{"115":{"position":[[13,3]]}}}],["liabil",{"_index":94,"t":{"109":{"position":[[17,9]]}}}],["liberti",{"_index":1,"t":{"13":{"position":[[3,7]]}}}],["libp2p",{"_index":57,"t":{"59":{"position":[[0,6]]}}}],["limit",{"_index":23,"t":{"37":{"position":[[6,5]]},"109":{"position":[[3,10]]}}}],["link",{"_index":47,"t":{"47":{"position":[[15,5]]},"107":{"position":[[23,5]]}}}],["look",{"_index":90,"t":{"103":{"position":[[11,7]]}}}],["measur",{"_index":34,"t":{"41":{"position":[[12,8]]}}}],["modif",{"_index":96,"t":{"113":{"position":[[3,13]]}}}],["nescienc",{"_index":87,"t":{"95":{"position":[[0,9]]}}}],["network",{"_index":61,"t":{"61":{"position":[[14,8]]}}}],["nim",{"_index":77,"t":{"82":{"position":[[0,3]]}}}],["open",{"_index":12,"t":{"23":{"position":[[4,8]]}}}],["outsid",{"_index":39,"t":{"43":{"position":[[18,7]]}}}],["p2p",{"_index":63,"t":{"72":{"position":[[0,3]]}}}],["paper",{"_index":84,"t":{"89":{"position":[[0,6]]}}}],["parti",{"_index":32,"t":{"39":{"position":[[9,5]]},"47":{"position":[[9,5]]},"107":{"position":[[9,5]]}}}],["person",{"_index":26,"t":{"37":{"position":[[45,8]]},"39":{"position":[[29,8]]}}}],["polici",{"_index":49,"t":{"49":{"position":[[16,6]]}}}],["principl",{"_index":0,"t":{"11":{"position":[[0,10]]}}}],["privaci",{"_index":8,"t":{"19":{"position":[[4,7]]},"49":{"position":[[8,7]]}}}],["process",{"_index":25,"t":{"37":{"position":[[31,10]]},"39":{"position":[[15,10]]}}}],["proof",{"_index":56,"t":{"57":{"position":[[15,6]]}}}],["properti",{"_index":93,"t":{"105":{"position":[[16,8]]}}}],["qa",{"_index":73,"t":{"78":{"position":[[18,4]]}}}],["qualiti",{"_index":71,"t":{"78":{"position":[[0,7]]}}}],["resist",{"_index":4,"t":{"15":{"position":[[15,10]]}}}],["resourc",{"_index":20,"t":{"31":{"position":[[3,15]]}}}],["respect",{"_index":36,"t":{"41":{"position":[[32,7]]}}}],["rfc",{"_index":83,"t":{"86":{"position":[[0,3]]}}}],["right",{"_index":45,"t":{"45":{"position":[[20,6]]},"105":{"position":[[25,6]]}}}],["sc",{"_index":76,"t":{"80":{"position":[[16,4]]}}}],["secur",{"_index":6,"t":{"17":{"position":[[5,8]]},"41":{"position":[[3,8]]}}}],["smart",{"_index":74,"t":{"80":{"position":[[0,5]]}}}],["statement",{"_index":91,"t":{"103":{"position":[[19,10]]}}}],["switzerland",{"_index":42,"t":{"43":{"position":[[49,11]]}}}],["system",{"_index":68,"t":{"76":{"position":[[12,7]]}}}],["take",{"_index":35,"t":{"41":{"position":[[24,4]]}}}],["term",{"_index":102,"t":{"119":{"position":[[24,5]]}}}],["test",{"_index":69,"t":{"76":{"position":[[20,7]]}}}],["third",{"_index":31,"t":{"39":{"position":[[3,5]]},"47":{"position":[[3,5]]},"107":{"position":[[3,5]]}}}],["tke",{"_index":66,"t":{"74":{"position":[[16,5]]}}}],["token",{"_index":64,"t":{"74":{"position":[[0,5]]}}}],["transpar",{"_index":10,"t":{"21":{"position":[[3,12]]}}}],["union",{"_index":41,"t":{"43":{"position":[[39,5]]}}}],["up",{"_index":86,"t":{"91":{"position":[[6,3]]}}}],["us",{"_index":28,"t":{"37":{"position":[[69,3]]},"119":{"position":[[33,3]]}}}],["v",{"_index":9,"t":{"21":{"position":[[0,2]]}}}],["vi",{"_index":11,"t":{"23":{"position":[[0,3]]}}}],["vii",{"_index":13,"t":{"25":{"position":[[0,4]]}}}],["viii",{"_index":15,"t":{"27":{"position":[[0,5]]}}}],["websit",{"_index":29,"t":{"37":{"position":[[80,7]]},"41":{"position":[[47,7]]},"107":{"position":[[15,7]]},"119":{"position":[[16,7]]}}}],["write",{"_index":85,"t":{"91":{"position":[[0,5]]}}}],["x",{"_index":19,"t":{"31":{"position":[[0,2]]}}}],["zero",{"_index":54,"t":{"57":{"position":[[0,4]]}}}],["zk",{"_index":81,"t":{"84":{"position":[[23,2]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":2,"t":"Vac is a principle-driven research and development group that provides technical support to each IFT startup. Vac comprises R&D Service Units, Deep Research, and Incubator Projects. We do applied research based on which we build protocols, libraries, specifications, and publications. As custodians of these protocols, our aim is to adhere to a set of principles that ensure their alignment with our core values and objectives.","s":"About Vac","u":"/","h":"","p":1},{"i":4,"t":"Vac Libp2p Networking Engineer Remote (Worldwide) Software Developer in Test (Rust or Go) Remote (Worldwide) Zero Knowledge Research Engineer (ACZ) Remote (Worldwide) Zero Knowledge Researcher (Nescience) Remote (Worldwide)","s":"Current job openings","u":"/join-us","h":"","p":3},{"i":6,"t":"Waku v2 training session Waku: Enabling a New Dimension for dApps Vac, Waku v2 and Ethereum Messaging ZKPodcast: ZKPs for Spam Protection & Decentralized Messaging with Status Franck Royer : DappConnect: Enabling decentralised communications using Waku Oskar Thoren | Vac, Waku v2 and Ethereum Messaging Dean Eigenman & Oskar Thoren: From Whisper to Waku Private and Reliable Data Sync for Messaging Over Whisper by Dean Eigenmann & Oskar Thoren (Devcon5)","s":"","u":"/media","h":"","p":5},{"i":8,"t":"Join the Vac Community! Keep up to date with our latest research by connecting with us on our communities channels. Follow us on X Join the community on Discord Share your thoughts on the latest research on the Vac research forum","s":"Join the community","u":"/community","h":"","p":7},{"i":10,"t":"These principles have been inherited from https://our.status.im/our-principles/. Only minor stylistic changes have been made to them.","s":"","u":"/principles","h":"","p":9},{"i":12,"t":"The goal of Vac is widespread adoption of the decentralised web. Our challenge is achieving mass adoption while staying true to the principles outlined below.","s":"Principles","u":"/principles","h":"","p":9},{"i":14,"t":"We believe in the sovereignty of individuals. As a research organisation that stands for the cause of personal liberty, we aim to maximise social, political, and economic freedoms. This includes being coercion resistant.","s":"I. Liberty","u":"/principles","h":"#i-liberty","p":9},{"i":16,"t":"We enable the free flow of information. No content is under surveillance. We abide by the cryptoeconomic design principle of censorship resistance. Even stronger, we design agnostic infrastructures for information.","s":"II. Censorship resistance","u":"/principles","h":"#ii-censorship-resistance","p":9},{"i":18,"t":"We don't compromise on security when building features. We use state-of-the-art technologies, and research new security methods and technologies to make strong security guarantees.","s":"III. Security","u":"/principles","h":"#iii-security","p":9},{"i":20,"t":"Privacy is the power to selectively reveal oneself to the world. For us, it's essential to protect privacy in both communications and transactions, as well as pseudo-anonymity. Additionally, we strive to provide the right of total anonymity.","s":"IV. Privacy","u":"/principles","h":"#iv-privacy","p":9},{"i":22,"t":"We strive for complete openness and symmetry of information within the organisation, and have no border between our core contributors and our community. We are frank about our shortcomings, especially when making short-term tradeoffs in service of our long-term goals.","s":"V. Transparency","u":"/principles","h":"#v-transparency","p":9},{"i":24,"t":"The software we create is a public good. It is made available via a free and open-source licence, for anyone to share, modify, and benefit from. We believe in permissionless participation.","s":"VI. Openness","u":"/principles","h":"#vi-openness","p":9},{"i":26,"t":"We minimise centralisation across both the software and the organisation itself. In other words, we maximise the number of physical computers composing the network, and maximise the number of individuals who have control over the system(s) we are building.","s":"VII. Decentralisation","u":"/principles","h":"#vii-decentralisation","p":9},{"i":28,"t":"We believe in fair and widespread access to our software, with an emphasis on ease of use. This also extends to social inclusivity, permissionless participation, interoperability, and investing in educational efforts.","s":"VIII. Inclusivity","u":"/principles","h":"#viii-inclusivity","p":9},{"i":30,"t":"We create software incentivised to continue to exist and improve without the stewardship of a single entity or any of the current team members.","s":"IX. Continuance","u":"/principles","h":"#ix-continuance","p":9},{"i":32,"t":"We are relentlessly resourceful. As we grow and have ready access to capital, it is our obligation to token holders to fight bureaucracy and inefficiencies within the organisation. This means solving problems in the most effective way possible at lower economic costs (in terms of capital, time, and resources).","s":"X. Resourcefulness","u":"/principles","h":"#x-resourcefulness","p":9},{"i":34,"t":"On this page Last updated: 9 February 2024 This Privacy Policy is intended to inform users of our approach to privacy in respect of this website (\"Website\"). In this regard, if you are visiting our Website, this Privacy Policy applies to you.","s":"Privacy Policy","u":"/privacy-policy","h":"","p":33},{"i":36,"t":"For the purposes of this Privacy Policy and the collection and processing of personal data as a controller, the relevant entity is the Logos Collective Association, which has its registered office in Zug and its legal domicile address at Logos Collective Association c/o PST Consulting GmbH Baarerstrasse 10 6300 Zug Switzerland Whenever we refer to “Logos”, “we” or other similar references, we are referring to the Logos Collective Association.","s":"1) Who we are","u":"/privacy-policy","h":"#1-who-we-are","p":33},{"i":38,"t":"We aim to limit the collection and collection and processing of personal data from users of the Website. We only collect and process certain personal data for specific purposes and where we have the legal basis to do so under applicable privacy legislation. We will not collect or process any personal data that we don’t need and where we do store any personal data, we will only store it for the least amount of time needed for the indicated purpose. In this regard, we collect and process the following personal data from your use of the Website: IP address: As part of such use of the Website we briefly process your IP address but we have no way of identifying you. We however have a legitimate interest in processing such IP addresses to ensure the technical functionality and enhance the security measures of the Website. This IP address is not stored by us over time.","s":"2) We limit the collection and processing of personal data from your use of the Website","u":"/privacy-policy","h":"#2-we-limit-the-collection-and-processing-of-personal-data-from-your-use-of-the-website","p":33},{"i":40,"t":"In addition to our limited and collection of personal data, third parties may collect or process personal data as a result of the Website making use of certain features or to provide certain content. To the extent you interact with such third party content or features, their respective privacy policies will apply.","s":"3) Third party processing of personal data","u":"/privacy-policy","h":"#3-third-party-processing-of-personal-data","p":33},{"i":42,"t":"As a general approach, we take data security seriously and we have implemented a variety of security measures on the Website to maintain the safety of your personal data when you submit such information to us.","s":"4) Security measures we take in respect of the Website","u":"/privacy-policy","h":"#4-security-measures-we-take-in-respect-of-the-website","p":33},{"i":44,"t":"We are obliged to protect the privacy of personal data that you may have submitted in the unlikely event that we export your personal data to places outside the European Union or Switzerland. This means that personal data will only be processed in countries or by parties that provide an adequate level of protection as deemed by Switzerland or the European Commission. Otherwise, we will use other forms of protections, such as specific forms of contractual clauses to ensure such personal data is provided the same protection as required in Switzerland or Europe. In any event, the transmission of personal data outside the European Union and Switzerland will always occur in conformity with applicable privacy legislation.","s":"5) Exporting data outside the European Union and Switzerland","u":"/privacy-policy","h":"#5-exporting-data-outside-the-european-union-and-switzerland","p":33},{"i":46,"t":"As explained in this Privacy Policy, we limit our collection and processing of your personal data wherever possible. Nonetheless, you still have certain choices and rights in respect of the personal data which we do collect and process. As laid out in relevant privacy legislation, you have the right to: Ask us to correct or update your personal data (where reasonably possible); Ask us to remove your personal data from our systems; Ask us for a copy of your personal data, which may also be transferred to another data controller at your request; Withdraw your consent to process your personal data (only if consent was asked for a processing activity), which only affects processing activities that are based on your consent and doesn’t affect the validity of such processing activities before you have withdrawn your consent; Object to the processing of your personal data; and File a complaint with the Federal Data Protection and Information Commissioner (FDPIC), if you believe that your personal data has been processed unlawfully.","s":"6) Your choices and rights","u":"/privacy-policy","h":"#6-your-choices-and-rights","p":33},{"i":48,"t":"On this Website, you may come across links to third party websites. These third party sites have separate and independent privacy policies. We therefore have no responsibility or liability for the content and activities of these third party websites.","s":"7) Third party links","u":"/privacy-policy","h":"#7-third-party-links","p":33},{"i":50,"t":"We may modify or replace any part of this Privacy Policy at any time and without notice. Please check the Website periodically for any changes. The new Privacy Policy will be effective immediately upon its posting on our Website.","s":"8) This Privacy Policy might change","u":"/privacy-policy","h":"#8-this-privacy-policy-might-change","p":33},{"i":52,"t":"To the extent that you have any questions about the Privacy Policy, please contact us at legal@free.technology. This document is licensed under CC-BY-SA.","s":"9) Contact information","u":"/privacy-policy","h":"#9-contact-information","p":33},{"i":54,"t":"The Vac RFC unit serves as a vital cornerstone in the Logos collective, taking on the responsibility of shepherding and editing specifications for Logos projects and Vac incubator projects. By meticulously crafting and overseeing these specifications, the Vac RFC unit acts as a linchpin for ensuring standardised and interoperable protocols within the Logos ecosystem. Their expertise and attention to detail contribute to a cohesive and collaborative environment, facilitating seamless integration and advancement of decentralised technologies throughout the Logos collective and beyond.","s":"Vac RFC Process","u":"/rfcprocess","h":"","p":53},{"i":56,"t":"Vac Deep Research is at the forefront of exploration and cutting-edge innovation within the IFT. Their work extends beyond scientific publications, actively bridging the gap between theory and practice. The team collaborates with various entities, such as Vac R&D units, incubator projects, and IFT projects, to bring their research findings to fruition. Part of this effort includes identifying opportunities for and spawning new incubator projects, allowing Vac Deep Research to translate their research findings into practical applications within the IFT. Deep Research encompasses several key areas, including zero knowledge (ZK), decentralised privacy-preserving node provider networks, validator privacy, and libp2p gossipsub improvements.","s":"Vac Deep Research","u":"/deepresearch","h":"","p":55},{"i":58,"t":"In the realm of ZKP, Vac Deep Research has made contributions that have given rise to the incubator project Nescience. The team delved into the intricacies of zero-knowledge proofs, exploring their applications and pushing the boundaries of privacy-preserving technologies. By advancing the field of ZK, Vac Deep Research strengthens the foundation for secure and confidential interactions within decentralised networks.","s":"Zero-Knowledge Proofs","u":"/deepresearch","h":"#zero-knowledge-proofs","p":55},{"i":60,"t":"Another area of focus for Vac Deep Research is \"libp2p gossipsub improvements\". The team explores ways to enhance the performance, efficiency, and reliability of the libp2p gossipsub protocol. By conducting in-depth research and proposing improvements, Vac Deep Research aims to optimise information sharing and communication within decentralised networks, contributing to the overall robustness and scalability of the P2P layers of IFT projects.","s":"Libp2p Gossipsub Improvements","u":"/deepresearch","h":"#libp2p-gossipsub-improvements","p":55},{"i":62,"t":"Vac also researches anonymisation networks, with the main goal of a libp2p gossipsub anonymisation layer with pluggable project-specific components.","s":"Anonymisation Networks","u":"/deepresearch","h":"#anonymisation-networks","p":55},{"i":65,"t":"Get in touch with us by joining our Discord, opening a thread in our forum, or opening issues / PRs on GitHub. Also, see our current job openings.","s":"How to Contribute","u":"/contribute","h":"#how-to-contribute","p":63},{"i":67,"t":"We are interested in both research and code contributions. For code contributions, see our \"good first issue\" lists for various Vac-related code bases: nim-libp2p zerokit stealth-address-kit","s":"What to Contribute","u":"/contribute","h":"#what-to-contribute","p":63},{"i":69,"t":"We take security seriously at Vac and across the Institute of Free Technology and its affiliates. Please report any security incidents via security@free.technology. Please report any discovered vulnerabilities in our bounty programme at HackenProof to help ensure our protocols and software remain secure.","s":"Security","u":"/security","h":"","p":68},{"i":71,"t":"Vac's R&D Service Units play a crucial role in supporting IFT projects. In addition to providing expertise, resources, and technical guidance, they also develop software artefacts, such as nim-libp2p and zerokit.","s":"Vac R&D Service Units","u":"/vsus","h":"","p":70},{"i":73,"t":"The P2P R&D Service Unit is a vital part of Vac, specialising in peer-to-peer (P2P) technologies. The P2P unit develops nim-libp2p, works on improving the libp2p gossipsub protocol, and assists projects with the integration of P2P network layers. The P2P unit collaborates closely with Vac Deep Research to conduct research aimed at enhancing libp2p gossipsub. By focusing on advancing P2P technologies, the P2P unit contributes to the overall improvement and efficiency of decentralised networks, enabling seamless decentralised communication within IFT projects and beyond.","s":"P2P","u":"/vsus","h":"#p2p","p":70},{"i":75,"t":"The Vac Token Economics Unit is dedicated to assisting IFT projects in designing their token economies, incentives, and markets. TKE's collaboration with IFT project teams is intensive and occurs on a day-to-day basis, where TKE not only responds to their needs but also proactively drives the conversation forward by suggesting new ideas and strategies based on TKE's research. The team brings together a broad spectrum of skills and knowledge, ranging from the modelling of dynamic systems to theoretical modelling and general cryptoeconomics.","s":"Token Economics (TKE)","u":"/vsus","h":"#token-economics-tke","p":70},{"i":77,"t":"The Distributed Systems Testing (DST) R&D Service Unit is responsible for developing distributed systems testing software. DST's primary objective is to assist IFT projects in understanding the scaling behaviour of their nodes within larger networks. By conducting thorough regression testing, the DST unit helps identify potential bottlenecks and performance issues, ensuring the reliability and stability of the projects. The DST unit's expertise in distributed systems testing enables IFT projects to deliver scalable and resilient solutions that can withstand the demands of real-world decentralised applications.","s":"Distributed Systems Testing (DST)","u":"/vsus","h":"#distributed-systems-testing-dst","p":70},{"i":79,"t":"The QA Service Unit is dedicated to supporting IFT projects through the development and execution of comprehensive test plans. Primary responsibilities include implementing unit tests and interoperability tests to ensure seamless integration and functionality across systems. The QA unit plays a crucial role in the verification of project implementations. By rigorously testing project implementations against defined specifications, QA ensures that all functionalities align with the project's requirements. QA's proactive approach to identifying and reporting bugs ensures that any issues are addressed promptly, enhancing the overall quality and reliability of the software. Through meticulous testing and quality assurance processes, the QA Service Unit ensures that IFT projects deliver robust and high-performing software solutions.","s":"Quality Assurance (QA)","u":"/vsus","h":"#quality-assurance-qa","p":70},{"i":81,"t":"Vac's Smart Contracts Service Unit specialises in the development, maintenance, and auditing of smart contracts for IFT projects. The SC unit ensures that all smart contracts are robust, secure, and aligned with project requirements. SC designs and develops smart contracts tailored to the specific needs of IFT projects, ensuring they function as intended and are up-to-date with any project changes or requirements. The unit's thorough auditing process involves meticulously reviewing smart contracts to identify and rectify potential vulnerabilities, ensuring the highest standards of security and reliability.","s":"Smart Contracts (SC)","u":"/vsus","h":"#smart-contracts-sc","p":70},{"i":83,"t":"With the IFT's extensive use of the Nim ecosystem, the Nim Service Unit focuses on the development and maintenance of Nim tooling and core libraries essential for IFT projects. The Nim unit works on critical tools in the Nim ecosystem, including the Nim compiler, Nimble (package manager), and nim-suggest, ensuring they are efficient, reliable, and up-to-date. The Nim unit further plans to maintain core libraries developed by IFT, such as Chronos. The mandate of the Nim unit also includes providing support to IFT projects regarding the use of Nim, identifying and addressing specific project needs and demands related to the Nim ecosystem.","s":"Nim","u":"/vsus","h":"#nim","p":70},{"i":85,"t":"The Applied Cryptography & ZK R&D Service Unit focuses on cryptographic solutions and zero-knowledge (ZK) proofs. ACZ provides valuable assistance to IFT projects by offering expertise in employing ZK proofs and implementing cryptographic techniques. The ACZ unit specialises in areas such as employing noise protocol channels and other cryptographic-related aspects. By leveraging cutting-edge cryptographic technologies, ACZ enhances the security, privacy, and trustworthiness of Logos projects, contributing to the overall integrity and resilience of the decentralised web ecosystem. ACZ develops zerokit and stealth-address-kit.","s":"Applied Cryptography & ZK (ACZ)","u":"/vsus","h":"#applied-cryptography--zk-acz","p":70},{"i":87,"t":"The Vac RFC unit serves as a vital cornerstone in the IFT, taking on the responsibility of shepherding and editing specifications for IFT projects. By meticulously crafting and overseeing these specifications, the Vac RFC unit acts as a linchpin for ensuring standardised and interoperable protocols within the IFT ecosystem The RFC unit's expertise and attention to detail contribute to a cohesive and collaborative environment, facilitating seamless integration and advancement of decentralised technologies throughout the IFT and beyond.","s":"RFC","u":"/vsus","h":"#rfc","p":70},{"i":90,"t":"Waku: A Family of Modular P2P Protocols For Secure & Censorship-Resistant Communication (demo) WAKU-RLN-RELAY: Privacy-Preserving Peer-to-Peer Economic Spam Protection (full) Privacy-Preserving Spam-Protected Gossip-Based Routing (poster)","s":"Papers","u":"/publications","h":"#papers","p":88},{"i":92,"t":"See write-ups.","s":"Write-ups","u":"/publications","h":"#write-ups","p":88},{"i":94,"t":"Vac incubator projects are emerging initiatives that stem from Vac's deep research efforts. These projects are envisioned to eventually become separate IFT projects, benefiting from the support and resources of both the Deep Research and R&D Service Units within Vac. The incubator projects operate within Vac, leveraging the expertise and collaboration available to them.","s":"Vac Incubator Projects","u":"/vips","h":"","p":93},{"i":96,"t":"Nescience focuses on developing a privacy-preserving general-purpose zero-knowledge virtual machine. It enables privacy-preserving computing, which holds significant importance for IFT programs, especially Logos.","s":"Nescience","u":"/vips","h":"#nescience","p":93},{"i":98,"t":"On this page Last updated: 14 February 2024 These website terms of use ('Website Terms of Use') are entered into by you and us, and they govern your access and use of this Website, including any content and functionality contained in the Website. It is your responsibility to read the Website Terms of Use carefully before your use of the Website and your use of the Website means you have agreed to be bound and comply with these Website Terms of Use. If you do not agree with these Website Terms of Use, you must not access or use the Website.","s":"Terms of Use","u":"/terms","h":"","p":97},{"i":100,"t":"For the purposes of these Website Terms of Use, the relevant entity is the Logos Collective Association, which has its registered office in Zug and its legal domicile address at: Logos Collective Association c/o PST Consulting GmbH Baarerstrasse 10 6300 Zug Switzerland Whenever we refer to “Logos”, “we”, “us” or any other similar references, we are referring to the Logos Collective Association.","s":"1) Who we are","u":"/terms","h":"#1-who-we-are","p":97},{"i":102,"t":"The Website is provided by us on an ‘as is’ basis and you use the Website at your own sole discretion and risk. We disclaim all warranties of any kind, express or implied, including without limitation the warranties of merchantability, fitness for a particular purpose, and non-infringement of intellectual property or other violation of rights. We do not warrant or make any representations concerning the completeness, accuracy, legality, utility, reliability, suitability or availability of the use of the Website, the content on this Website or otherwise relating to the Website, such content or on any sites linked to this site.These disclaimers will apply to the maximum extent permitted by applicable law. We make no claims that the Website or any of its content is accessible, legally compliant or appropriate in your jurisdiction. Your access or use of the Website is at your own sole discretion and you are solely responsible for complying with any applicable local laws. The content herein or as accessible through this website is intended to be made available for informational purposes only and should not be considered as creating any expectations or forming the basis of any contract, commitment or binding obligation with us. No information herein shall be considered to contain or be relied upon as a promise, representation, warranty or guarantee, whether express or implied and whether as to the past, present or the future in relation to the projects and matters described herein. The information contained herein does not constitute financial, legal, tax, or other advice and should not be treated as such. Nothing in this Website should be construed by you as an offer to buy or sell, or soliciting any offer to buy or sell any tokens or any security.","s":"2) Disclaimers","u":"/terms","h":"#2-disclaimers","p":97},{"i":104,"t":"The Website may also contain forward-looking statements that are based on current expectations, estimates, forecasts, assumptions and projections about the technology, industry and markets in general. The forward looking statements, which may include statements about the roadmap, project descriptions, technical details, functionalities, features, the development and use of tokens by projects, and any other statements related to such matters or as accessible through this website are subject to a high degree of risk and uncertainty. The forward looking statements are subject to change based on, among other things, market conditions, technical developments, and regulatory environment. The actual development and results, including the order and the timeline, might vary from what’s presented. The information contained herein is a summary and does not purport to be accurate, reliable or complete and we bear no responsibility for the accuracy, reliability or completeness of information contained herein. Because of the high degree of risk and uncertainty described above, you should not place undue reliance on any matters described in this website or as accessible through this website. While we aim to update our website regularly, all information, including the timeline and the specifics of each stage, is subject to change and may be amended or supplemented at any time, without notice and at our sole discretion.","s":"3) Forward looking statements","u":"/terms","h":"#3-forward-looking-statements","p":97},{"i":106,"t":"The Website and its contents are made available under Creative Commons Attribution 4.0 International license (CC-BY 4.0). In essence this licence allows users to copy, modify and distribute the content in any format for any purpose, including commercial use, subject to certain requirements such as attributing us. For the full terms of this licence, please refer to the following website: https://creativecommons.org/licenses/by/4.0/.","s":"4) Intellectual property rights","u":"/terms","h":"#4-intellectual-property-rights","p":97},{"i":108,"t":"To the extent the Website provides any links to a third party website, then their terms and conditions, including privacy policies, govern your use of those third party websites. By linking such third party websites, Status does not represent or imply that it endorses or supports such third party websites or content therein, or that it believes such third party websites and content therein to be accurate, useful or non-harmful. We have no control over such third party websites and will not be liable for your use of or activities on any third party websites accessed through the Website. If you access such third party websites through the Website, it is at your own risk and you are solely responsible for your activities on such third party websites.","s":"5) Third-party website links","u":"/terms","h":"#5-third-party-website-links","p":97},{"i":110,"t":"We will not be held liable to you under any contract, negligence, strict liability, or other legal or equitable theory for any lost profits, cost of procurement for substitute services, or any special, incidental, or consequential damages related to, arising from, or in any way connected with these Website Terms of Use, the Website, the content on the Website, or your use of the Website, even if we have been advised of the possibility of such damages. In any event, our aggregate liability for such claims is limited to EUR 100 (one hundred Euros). This limitation of liability will apply to the maximum extent permitted by applicable law.","s":"6) Limitation of liability","u":"/terms","h":"#6-limitation-of-liability","p":97},{"i":112,"t":"You shall indemnify us and hold us harmless from and against any and all claims, damages and expenses, including attorneys’ fees, arising from or related to your use of the Website, the content on the Website, including without limitation your violation of these Website Terms of Use.","s":"7) Indemnity","u":"/terms","h":"#7-indemnity","p":97},{"i":114,"t":"We may modify or replace any part of this Website Terms of Use at any time and without notice. You are responsible for checking the Website periodically for any changes. The new Website Terms of Use will be effective immediately upon its posting on the Website.","s":"8) Modifications","u":"/terms","h":"#8-modifications","p":97},{"i":116,"t":"Swiss law governs these Website Terms of Use and any disputes between you and us, whether in court or arbitration, without regard to conflict of laws provisions.","s":"9) Governing law","u":"/terms","h":"#9-governing-law","p":97},{"i":118,"t":"In these terms, “dispute” has the broadest meaning enforceable by law and includes any claim you make against or controversy you may have in relation to these Website Terms of Use, the Website, the content on the Website, or your use of the Website. We prefer arbitration over litigation as we believe it meets our principle of resolving disputes in the most effective and cost effective manner. You are bound by the following arbitration clause, which waives your right to litigation and to be heard by a judge. Please note that court review of an arbitration award is limited. You also waive all your rights to a jury trial (if any) in any and all jurisdictions. If a (potential) dispute arises, you must first use your reasonable efforts to resolve it amicably with us. If these efforts do not result in a resolution of such dispute, you shall then send us a written notice of dispute setting out (i) the nature of the dispute, and the claim you are making; and (ii) the remedy you are seeking. If we and you are unable to further resolve this dispute within sixty (60) calendar days of us receiving this notice of dispute, then any such dispute will be referred to and finally resolved by you and us through an arbitration administered by the Swiss Chambers’ Arbitration Institution in accordance with the Swiss Rules of International Arbitration for the time being in force, which rules are deemed to be incorporated herein by reference. The arbitral decision may be enforced in any court. The arbitration will be held in Zug, Switzerland, and may be conducted via video conference virtual/online methods if possible. The tribunal will consist of one arbitrator, and all proceedings as well as communications between the parties will be kept confidential. The language of the arbitration will be in English. Payment of all relevant fees in respect of the arbitration, including filing, administration and arbitrator fees will be in accordance with the Swiss Rules of International Arbitration. Regardless of any applicable statute of limitations, you must bring any claims within one year after the claim arose or the time when you should have reasonably known about the claim. You also waive the right to participate in a class action lawsuit or a classwide arbitration against us.","s":"10) Disputes","u":"/terms","h":"#10-disputes","p":97},{"i":120,"t":"These Website Terms of Use cover the entire agreement between you and us regarding the Website and supersede all prior and contemporaneous understandings, agreements, representations and warranties, both written and oral, with respect to the Website. The captions and headings identifying sections and subsections of these Website Terms of Use are for reference only and do not define, modify, expand, limit, or affect the interpretation of any provisions of these Website Terms of Use. If any part of these Website Terms of Use is held invalid or unenforceable, that part will be severable from these Website Terms of Use, and the remaining portions will remain in full force and effect. If we fail to enforce any of these Website Terms of Use, that does not mean that we have waived our right to enforce them. If you have any specific questions about these Website Terms of Use, please contact us at legal@free.technology. This document is licensed under CC-BY-SA.","s":"11) About these Website Terms of Use","u":"/terms","h":"#11-about-these-website-terms-of-use","p":97}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/2",[0,1.803,1,3.337,2,3.774,3,2.372,4,1.69,5,3.774,6,1.783,7,2.65,8,2.274,9,3.251,10,1.453,11,3.774,12,3.774,13,2.127,14,1.69,15,1.526,16,2.274,17,2.444,18,1.204,19,2.274,20,2.274,21,2.907,22,2.729,23,3.251,24,1.69,25,2.907,26,3.774,27,2.274,28,3.774,29,3.251,30,1.783,31,2.907,32,2.907,33,3.774,34,2.907]],["t/4",[0,1.533,3,2.301,4,1.961,35,2.469,36,2.32,37,5.689,38,6.689,39,6.689,40,2.188,41,3.374,42,4.38,43,4.38,44,3.685,45,3.428,46,3.773,47,3.374]],["t/6",[0,1.598,48,5.553,49,5.345,50,3.177,51,3.177,52,2.958,53,1.914,54,3.177,55,3.177,56,4.566,57,5.844,58,3.177,59,2.737,60,2.737,61,2.058,62,4.347,63,3.177,64,2.737,65,3.177,66,3.177,67,3.177,68,1.501,69,1.683,70,0.968,71,5.345,72,5.345,73,4.566,74,3.177,75,4.566,76,3.177,77,1.683,78,1.791,79,3.177,80,2.058,81,3.177,82,3.177]],["t/8",[0,2.037,3,2.598,69,3.404,83,5.014,84,4.536,85,3.185,86,3.494,87,5.819,88,3.908,89,3.908,90,3.185,91,4.536,92,3.908,93,3.494,94,4.536,95,3.908]],["t/10",[1,4.257,96,5.52,97,5.52,98,5.52,99,5.52,100,3.576,101,3.576]],["t/12",[0,1.754,1,3.247,68,2.368,102,3.861,103,4.319,104,6.197,105,4.319,106,5.013,107,5.013,108,5.013,109,5.013,110,5.013,111,5.013,112,5.013]],["t/14",[3,1.926,27,2.869,113,2.869,114,4.763,115,4.103,116,3.344,117,4.763,118,4.763,119,2.685,120,4.763,121,4.103,122,4.103,123,4.763,124,3.344,125,4.763,126,2.133,127,4.103,128,4.763,129,3.669]],["t/16",[1,3.085,52,3.085,129,3.669,130,3.669,131,4.763,132,3.18,133,2.25,134,2.869,135,4.763,136,4.763,137,4.103,138,4.624,139,4.103,140,4.103,141,4.763,142,4.763,143,4.763]],["t/18",[3,1.95,21,3.715,53,2.906,70,1.469,144,4.823,145,4.823,146,3.303,147,3.715,148,4.823,149,4.823,150,3.205,151,4.155,152,3.124,153,4.823,154,4.155]],["t/20",[6,2.169,61,2.974,69,2.432,155,2.052,156,4.591,157,4.591,158,4.591,159,4.591,160,3.955,161,4.591,162,3.955,163,2.974,164,4.591,165,3.955,166,4.591,167,5.864,168,4.591,169,3.955,170,2.974,171,4.591]],["t/22",[14,2.031,32,3.494,69,2.403,102,3.494,116,3.185,132,2.403,152,2.938,169,3.908,172,3.494,173,3.494,174,4.536,175,2.031,176,4.536,177,2.938,178,4.536,179,4.536,180,4.536,181,3.908,182,4.536,183,2.475,184,4.536,185,4.536]],["t/24",[25,3.715,40,2.41,93,3.715,101,3.124,113,2.906,130,3.715,173,3.715,186,3.715,187,4.155,188,3.386,189,3.715,190,4.823,191,4.155,192,4.823,193,3.124,194,4.155,195,4.155,196,3.715]],["t/26",[21,3.623,36,2.492,40,2.35,80,3.047,115,4.052,116,3.303,121,5.131,163,3.047,197,4.704,198,4.704,199,4.704,200,4.704,201,5.956,202,4.704,203,4.052,204,4.704,205,3.303,206,4.704]],["t/28",[40,2.44,70,1.488,103,4.208,113,2.943,122,4.208,195,4.208,196,3.763,207,4.884,208,2.943,209,4.884,210,4.884,211,4.208,212,4.884,213,3.43,214,4.884,215,4.884,216,3.43]],["t/30",[40,2.572,186,3.966,217,5.148,218,5.148,219,5.148,220,3.615,221,2.902,222,5.148,223,5.148,224,3.615,225,3.966,226,3.335,227,5.148]],["t/32",[116,3.041,124,3.041,175,1.939,183,1.841,208,2.609,228,4.33,229,4.35,230,4.33,231,4.33,232,5.647,233,3.336,234,3.041,235,4.33,236,4.33,237,4.33,238,4.33,239,2.805,240,4.33,241,4.33,242,2.805,243,3.041,244,3.041,245,4.33,246,3.336,247,2.609]],["t/34",[19,2.766,132,2.432,155,2.261,248,3.955,249,3.955,250,3.223,251,4.591,252,3.955,253,3.955,254,3.107,255,3.536,256,3.536,257,3.536,258,2.974,259,2.061,260,2.974,261,4.591]],["t/36",[78,2.127,119,2.127,155,1.321,205,2.65,224,2.65,254,1.999,262,2.274,263,3.797,264,2.127,265,2.65,266,4.083,267,5.053,268,3.251,269,3.251,270,3.968,271,2.444,272,3.251,273,2.127,274,3.251,275,3.251,276,3.251,277,3.251,278,3.251,279,3.251,280,3.251,281,2.65,282,3.251,283,3.8,284,3.251]],["t/38",[7,1.968,24,1.255,27,1.689,30,1.325,70,1.27,78,3.317,80,1.816,90,1.968,119,3.317,134,1.689,146,1.401,155,0.981,243,1.968,247,2.511,256,2.159,259,1.756,260,1.816,262,2.511,263,3.546,264,3.477,271,1.816,273,3.104,285,1.485,286,1.968,287,2.159,288,1.485,289,2.159,290,2.803,291,2.926,292,4.974,293,2.803,294,2.803,295,5.507,296,1.689,297,1.529,298,2.803,299,1.58,300,2.803,301,2.415,302,1.816,303,1.816,304,2.415]],["t/40",[6,1.957,19,2.496,70,1.262,78,3.092,119,3.092,133,2.592,147,4.225,152,2.683,155,1.45,254,2.194,258,2.683,259,1.321,263,3.305,264,2.335,285,2.194,286,3.851,297,1.52,305,3.569,306,4.225,307,3.553,308,3.191,309,2.683,310,3.569]],["t/42",[78,3.41,119,2.719,132,2.555,146,3.023,257,3.715,259,1.538,297,1.77,304,4.155,311,3.386,312,3.386,313,4.155,314,3.715,315,4.823,316,4.155,317,4.823,318,4.155]],["t/44",[6,2.107,24,1.377,30,1.453,61,3.729,70,0.937,78,3.445,119,3.445,155,1.561,233,2.368,239,1.992,264,1.733,281,4.042,288,1.629,289,2.368,297,1.636,307,1.992,318,2.649,319,3.074,320,3.842,321,3.074,322,2.649,323,4.459,324,5.247,325,4.459,326,3.074,327,3.074,328,3.074,329,2.649,330,3.074,331,2.649,332,3.842,333,3.074,334,2.649,335,3.074,336,2.159,337,3.074,338,3.074,339,3.074,340,2.649,341,3.074]],["t/46",[20,1.552,34,1.985,61,1.669,78,3.765,113,1.552,119,3.606,132,1.365,155,1.369,170,2.533,205,1.809,244,2.746,250,1.809,254,1.365,258,1.669,263,2.356,264,3.606,265,1.809,285,1.365,286,1.809,289,1.985,297,0.945,342,2.576,343,2.576,344,2.576,345,2.576,346,2.576,347,2.576,348,2.22,349,5.278,350,2.576,351,2.22,352,2.576,353,1.809,354,2.22,355,2.576,356,2.22,357,2.576,358,2.576,359,5.278,360,3.32,361,3.369,362,2.576,363,2.22,364,2.22,365,2.576,366,2.22,367,2.576,368,2.576,369,2.576,370,2.576,371,2.576]],["t/48",[133,2.195,155,1.626,254,2.462,259,2.073,306,5.006,307,4.21,360,3.263,372,4.647,373,3.579,374,4.003,375,4.003,376,4.647,377,4.647,378,2.195,379,4.003]],["t/50",[53,2.8,100,3.01,155,2.068,193,3.01,221,2.619,242,3.01,247,2.8,254,3.131,259,1.885,296,2.8,380,4.003,381,3.263,382,2.8,383,4.003,384,4.003,385,4.003,386,3.579,387,4.003]],["t/52",[134,3.144,155,1.826,254,2.765,309,3.38,382,3.144,388,4.496,389,4.496,390,4.496,391,4.496,392,4.02,393,4.02,394,4.496]],["t/54",[0,1.943,15,1.939,17,2.205,18,1.53,22,1.803,24,2.148,30,1.608,68,1.608,150,1.803,175,1.524,213,2.39,263,2.889,266,3.905,312,2.39,378,1.608,395,4.132,396,2.932,397,2.622,398,2.932,399,2.932,400,2.932,401,2.39,402,2.932,403,2.932,404,2.932,405,2.932,406,2.932,407,2.39,408,2.051,409,2.932,410,2.622,411,1.919,412,2.932,413,2.051,414,2.622,415,2.932,416,2.39,417,2.205,418,2.39,419,2.932,420,2.39]],["t/56",[0,1.718,3,2.358,6,1.296,10,1.89,13,1.546,15,1.109,16,2.958,17,2.656,18,1.566,25,2.113,35,1.546,36,1.453,44,1.777,45,1.653,53,1.653,68,1.296,126,1.836,155,1.435,175,1.836,177,1.777,211,2.363,216,1.926,220,1.926,224,1.926,226,1.777,288,1.453,296,1.653,297,1.006,299,1.546,360,1.926,363,2.363,413,1.653,420,1.926,421,2.743,422,2.113,423,2.363,424,2.363,425,2.743,426,2.113,427,2.743,428,2.743,429,2.743,430,2.363,431,4.1,432,2.363,433,2.113,434,4.1,435,2.743,436,2.743,437,2.743,438,2.363,439,2.743,440,2.743,441,2.363,442,2.743,443,2.113,444,2.113,445,1.926,446,2.363,447,1.926]],["t/58",[0,1.815,3,2.097,16,3.125,17,2.469,18,1.216,36,2.019,44,2.469,45,2.296,47,2.936,59,3.283,68,1.801,101,2.469,146,1.904,150,2.019,155,1.334,175,1.707,226,2.469,288,2.019,310,3.283,411,2.148,418,2.676,422,2.936,444,2.936,445,2.676,448,3.811,449,3.811,450,3.811,451,3.811,452,3.811,453,3.283,454,3.811,455,3.811,456,3.811,457,3.811,458,3.811,459,3.283]],["t/60",[0,1.767,3,2.336,10,1.411,16,3.042,18,1.169,22,1.942,27,2.208,35,2.846,36,1.942,68,1.732,69,1.942,77,1.942,93,2.823,132,1.942,175,1.641,220,3.546,226,2.374,243,2.573,303,2.374,356,3.157,411,2.066,422,2.823,443,2.823,447,3.546,460,3.665,461,2.823,462,2.823,463,2.573,464,3.665,465,3.665,466,3.665,467,2.573,468,2.823,469,3.157,470,2.823,471,2.823]],["t/62",[0,1.778,3,2.054,18,1.62,24,2.275,35,2.863,36,2.691,102,3.913,447,3.567,471,3.913,472,6.248,473,5.079,474,5.079,475,5.079]],["t/65",[62,3.52,83,4.319,92,4.319,95,4.319,173,5.182,225,3.861,476,5.013,477,5.013,478,3.52,479,5.013,480,5.013,481,3.861,482,5.013]],["t/67",[0,1.587,3,1.834,20,2.733,35,2.557,163,2.938,187,3.908,273,2.557,301,3.908,411,3.28,432,3.908,478,3.185,481,3.494,483,6.425,484,3.908,485,4.536,486,2.403,487,3.185,488,3.494,489,3.908,490,3.908]],["t/69",[0,1.515,22,2.294,30,2.046,40,2.163,130,3.336,146,3.14,150,2.294,189,3.336,312,3.041,313,3.731,382,3.402,491,3.731,492,4.33,493,4.865,494,4.33,495,4.33,496,4.33,497,3.731,498,4.33,499,4.33,500,4.33,501,3.731,502,3.731]],["t/71",[4,2.031,6,2.143,7,3.185,8,2.733,10,1.746,13,2.557,14,2.031,15,1.834,18,1.447,35,2.557,40,2.266,229,3.494,297,1.664,305,3.908,408,2.733,487,3.185,488,3.494,503,3.494,504,3.908,505,3.908,506,3.908,507,4.536,508,4.536]],["t/73",[0,1.579,3,1.825,4,1.399,10,1.203,13,1.761,14,1.399,15,2.345,16,1.883,18,1.439,22,1.655,27,1.883,35,2.985,36,2.39,52,2.024,68,2.132,69,1.655,150,2.39,175,1.399,220,3.168,296,1.883,303,2.024,397,2.407,411,1.761,413,1.883,416,2.194,417,2.024,418,2.194,420,2.194,426,2.407,447,3.168,462,2.407,463,2.194,467,2.194,470,5.09,471,2.407,487,2.194,509,2.407,510,3.887,511,2.194,512,3.125,513,2.194]],["t/75",[0,1.181,3,1.364,10,1.835,15,1.364,18,1.52,20,2.032,45,2.032,53,2.032,124,2.369,137,2.906,138,2.598,226,3.087,234,3.346,287,2.598,291,2.369,311,2.369,340,2.906,353,2.369,413,2.032,433,2.598,511,2.369,514,2.906,515,3.373,516,3.373,517,2.906,518,4.766,519,3.373,520,4.106,521,3.373,522,3.373,523,2.906,524,3.373,525,3.373,526,2.906,527,2.906,528,3.373,529,3.373,530,3.373,531,3.373,532,3.373,533,3.373,534,3.373,535,4.766,536,3.373,537,3.373]],["t/77",[4,1.399,10,1.737,13,1.761,14,1.399,15,1.825,18,1.689,30,1.476,34,2.407,36,1.655,40,1.561,41,4.468,52,2.024,68,1.476,77,1.655,160,2.692,175,1.399,288,1.655,299,1.761,353,3.719,378,1.476,408,1.883,446,2.692,461,2.407,463,2.194,469,2.692,478,2.194,501,2.692,511,2.194,538,4.563,539,5.296,540,3.125,541,2.692,542,2.692,543,3.125,544,3.125,545,3.125,546,2.692,547,3.125,548,2.407,549,3.125,550,3.125,551,2.407,552,2.692,553,2.692,554,2.407,555,3.125,556,2.692,557,3.125]],["t/79",[4,1.211,8,1.629,10,1.562,14,1.817,15,2.187,18,1.725,24,1.211,30,2.556,31,2.083,40,2.027,41,4.465,77,1.433,126,1.211,213,1.899,257,2.083,264,1.524,273,1.524,299,1.524,302,2.628,303,1.752,314,3.75,336,1.899,353,1.899,378,1.278,401,1.899,416,1.899,417,1.752,461,2.083,467,1.899,468,2.083,478,1.899,493,2.33,504,2.33,505,2.33,506,2.33,514,2.33,523,2.33,541,2.33,552,2.33,554,2.083,558,5.409,559,2.628,560,2.704,561,2.704,562,2.33,563,2.704,564,2.704,565,2.083,566,2.33,567,2.704,568,2.704,569,2.704,570,2.704,571,4.057,572,2.704,573,2.33]],["t/81",[4,2.045,10,1.758,14,1.423,15,1.846,18,1.864,24,1.423,30,2.526,31,2.447,77,1.683,85,2.231,86,2.447,100,2.058,138,2.447,146,2.281,255,2.447,264,1.791,291,2.231,299,1.791,302,2.058,336,3.206,401,2.231,468,2.447,497,2.737,503,2.447,509,2.447,546,2.737,548,2.447,551,2.447,574,6.191,575,4.769,576,2.737,577,4.566,578,4.566,579,3.177,580,3.177,581,2.737,582,3.177,583,3.177,584,3.177]],["t/83",[4,1.929,6,1.386,8,1.767,10,1.966,14,1.313,15,2.276,18,1.629,23,3.712,24,1.313,30,1.386,32,3.319,70,1.313,77,1.554,85,2.059,86,2.259,126,1.929,162,2.527,260,1.9,273,1.653,291,2.059,297,1.076,299,1.653,316,2.527,407,3.586,426,2.259,462,2.259,486,1.554,487,4.91,513,2.059,527,2.527,556,2.527,562,2.527,576,2.527,585,2.933,586,2.933,587,4.308,588,2.933,589,2.933,590,2.933,591,2.933,592,2.933,593,2.527,594,2.933,595,2.933]],["t/85",[4,1.355,6,1.43,10,1.165,13,1.706,14,1.355,15,1.782,18,1.406,19,1.823,22,1.603,44,1.96,45,1.823,46,4.922,62,2.125,68,1.43,89,2.607,105,2.607,146,1.512,150,1.603,155,1.059,266,1.96,273,1.706,297,1.11,303,1.96,314,2.331,407,2.125,408,1.823,411,1.706,417,1.96,423,2.607,424,2.607,443,2.331,444,4.005,453,3.798,467,2.125,486,1.603,488,2.331,489,2.607,490,2.607,509,2.331,511,2.125,513,2.125,553,2.607,554,2.331,596,3.026,597,5.713,598,3.026,599,2.607,600,4.408,601,3.026,602,3.026,603,3.026,604,2.607,605,3.026]],["t/87",[0,1.711,10,2.349,15,1.976,18,1.115,22,1.853,24,2.189,30,1.652,68,1.652,150,1.853,175,1.566,213,2.455,312,2.455,378,1.652,395,4.855,396,3.013,397,2.694,398,3.013,399,3.013,400,3.013,401,2.455,402,3.013,403,3.013,404,3.013,405,3.013,406,3.013,407,2.455,408,2.107,409,3.013,410,2.694,411,1.971,412,3.013,413,2.107,414,2.694,415,3.013,416,2.455,417,2.265,418,2.455,419,3.013,420,2.455,551,2.694]],["t/90",[20,2.523,22,2.219,48,4.76,60,4.76,61,3.579,62,2.941,69,2.219,124,2.941,129,3.226,139,3.608,146,2.092,155,1.933,445,3.879,470,3.226,510,4.76,606,4.188,607,4.188,608,4.188,609,4.188,610,4.188,611,3.226,612,4.188,613,4.188,614,4.188]],["t/92",[85,4.178,481,4.584,615,5.951]],["t/94",[0,2.085,3,2.126,8,2.343,10,1.497,13,2.192,14,1.742,15,1.573,16,3.168,17,3.406,18,2.035,163,2.519,175,2.355,188,2.731,194,3.35,216,2.731,229,2.996,375,3.35,408,2.343,413,2.343,503,2.996,604,3.35,616,3.889,617,3.889,618,3.889,619,3.889,620,3.889,621,3.889,622,3.889]],["t/96",[4,2.056,10,1.767,44,2.974,45,2.766,47,3.536,52,2.974,155,2.052,181,3.955,203,3.955,262,2.766,266,2.974,311,3.223,445,4.118,513,3.223,623,4.591,624,4.591,625,3.955,626,4.591,627,4.591,628,4.591]],["t/98",[70,2.161,126,1.581,133,1.668,183,2.739,208,2.964,239,2.286,248,3.041,249,3.041,250,2.478,252,3.041,253,3.041,259,2.291,302,2.286,364,3.041,378,1.668,629,3.529,630,3.529,631,2.719,632,2.719,633,3.529,634,3.529,635,4.919,636,3.041,637,3.041]],["t/100",[70,1.197,183,1.671,224,2.759,259,1.253,262,2.367,263,3.608,265,2.759,266,4.151,267,5.159,268,3.385,269,3.385,270,4.079,271,2.545,272,3.385,273,2.215,274,3.385,275,3.385,276,3.385,277,3.385,278,3.385,279,3.385,280,3.385,281,2.759,282,3.385,283,3.879,284,3.385]],["t/102",[6,0.892,18,0.602,19,1.138,70,1.178,77,1,101,1.223,126,0.846,132,2.048,133,2.102,146,0.943,152,1.984,154,1.627,170,1.223,172,1.454,186,1.454,188,2.151,208,2.329,221,1.064,233,1.454,234,1.326,255,1.454,259,1.894,262,1.846,271,2.504,285,1,287,2.36,288,1.623,297,1.124,309,1.223,331,1.627,332,1.627,373,1.454,374,1.627,378,0.892,386,1.454,486,1.623,559,1.223,575,1.454,599,2.639,632,2.36,637,1.627,638,2.978,639,2.639,640,1.454,641,3.063,642,3.33,643,1.888,644,3.063,645,2.639,646,1.888,647,1.888,648,1.888,649,1.627,650,1.888,651,1.888,652,1.888,653,1.627,654,1.888,655,2.639,656,1.888,657,1.627,658,1.888,659,1.888,660,1.888,661,1.627,662,1.627,663,2.151,664,1.326,665,1.888,666,1.888,667,1.627,668,1.888,669,3.426,670,3.063,671,1.627,672,1.888,673,1.888,674,1.454,675,1.888,676,1.888,677,2.639,678,1.888,679,1.627,680,1.888,681,1.627,682,1.627,683,1.888,684,1.888,685,1.888,686,1.888,687,1.888,688,1.888,689,1.888,690,3.063,691,3.063,692,1.888]],["t/104",[4,1.891,7,2.391,9,1.856,18,1.347,20,2.052,24,0.965,27,1.298,70,0.656,77,1.804,100,2.206,126,1.891,132,2.237,147,1.66,150,1.141,172,2.623,208,2.052,221,1.214,225,1.66,234,1.513,247,1.298,250,1.513,259,1.666,297,0.791,302,1.396,308,1.66,311,1.513,322,1.856,378,1.018,381,1.513,410,1.66,414,1.66,486,1.141,517,2.934,526,3.637,559,2.206,573,2.934,632,3.252,638,1.66,639,1.856,640,2.623,657,1.856,669,2.623,671,1.856,679,1.856,681,2.934,682,2.934,693,4.222,694,5.225,695,2.154,696,2.154,697,2.154,698,2.154,699,2.154,700,2.154,701,3.637,702,3.405,703,3.405,704,2.154,705,1.856,706,2.154,707,2.154,708,2.154,709,3.405,710,2.154,711,2.154,712,2.154,713,2.154,714,1.856,715,2.154,716,2.154,717,2.154,718,2.154,719,2.154,720,2.154,721,2.154,722,2.154]],["t/106",[70,1.161,90,2.676,101,2.469,126,1.707,133,2.451,134,2.296,183,1.621,188,2.676,191,4.468,193,2.469,256,2.936,259,1.654,262,2.296,283,2.469,286,2.676,297,1.398,336,2.676,354,3.283,382,2.296,392,2.936,393,2.936,438,3.283,538,3.283,611,2.936,701,3.283,723,3.811,724,3.811,725,5.187,726,5.187,727,3.283,728,3.811,729,3.811,730,3.811,731,3.811]],["t/108",[6,1.325,8,1.689,64,2.415,70,1.515,80,1.816,113,1.689,126,1.255,133,1.969,155,0.981,183,1.192,205,1.968,208,2.511,254,1.485,259,2.235,297,2.263,306,5.164,307,4.343,309,1.816,360,2.926,373,3.21,378,1.325,559,2.699,631,2.159,638,2.159,640,2.159,645,2.415,649,2.415,705,2.415,714,2.415,732,2.803,733,2.803,734,2.803,735,4.167,736,2.803,737,2.415]],["t/110",[14,1.459,19,1.963,70,1.417,88,2.807,133,1.539,134,1.963,140,2.807,183,1.385,243,2.288,244,2.288,246,2.51,259,1.886,271,2.111,285,2.463,288,1.726,297,1.706,309,2.111,320,2.807,379,4.671,430,2.807,486,1.726,575,2.51,661,2.807,662,2.807,663,2.288,664,2.288,737,2.807,738,2.51,739,3.258,740,3.258,741,3.258,742,3.258,743,3.258,744,3.258,745,3.258,746,3.258,747,3.258,748,3.258,749,4.006,750,2.51,751,3.258,752,3.258,753,3.258,754,3.258,755,2.807,756,3.258,757,3.258]],["t/112",[70,1.76,126,2.586,133,2.118,183,1.906,221,2.527,259,2.038,285,2.375,486,2.375,565,3.453,625,3.862,653,3.862,664,3.148,674,3.453,749,3.862,750,3.453,758,4.483,759,4.483,760,4.483,761,4.483,762,3.862]],["t/114",[53,2.733,70,1.773,100,2.938,183,2.475,193,2.938,221,2.557,242,2.938,247,2.733,259,2.162,296,2.733,378,2.143,380,3.908,381,3.185,383,3.908,384,3.908,385,3.908,386,3.494,387,3.908]],["t/116",[70,1.507,177,3.205,183,2.104,221,2.789,259,1.578,260,3.205,631,3.811,663,4.317,677,4.263,763,4.263,764,4.263,765,4.263,766,4.263,767,4.948,768,4.263]],["t/118",[1,0.996,29,1.325,69,0.815,70,1.019,80,0.996,90,1.08,113,0.927,126,1.158,127,1.325,133,0.727,151,1.325,152,1.675,165,1.325,170,2.167,175,1.158,177,0.996,183,1.1,189,1.185,196,1.185,216,1.816,239,0.996,242,1.675,244,1.08,246,1.185,247,1.558,258,0.996,259,1.251,265,1.08,270,1.185,281,1.08,283,1.675,285,1.37,288,0.815,297,0.949,307,0.996,308,1.185,329,1.325,334,1.325,348,1.325,351,2.228,366,1.325,381,1.816,382,0.927,433,1.185,459,1.325,463,1.08,484,1.325,486,0.815,491,1.325,520,1.325,548,1.185,559,0.996,565,1.992,581,1.325,593,1.325,636,1.325,663,1.08,664,3.071,667,1.325,669,1.185,674,1.185,727,2.228,738,1.185,750,1.185,755,2.228,762,2.228,763,2.883,764,4.74,765,2.228,766,5.356,769,1.538,770,2.228,771,1.538,772,1.538,773,2.586,774,1.538,775,3.922,776,1.538,777,2.883,778,1.538,779,1.538,780,1.538,781,1.538,782,1.538,783,1.538,784,1.538,785,1.538,786,1.538,787,1.325,788,1.538,789,1.538,790,1.538,791,1.538,792,1.538,793,1.538,794,1.538,795,1.538,796,1.538,797,1.538,798,1.538,799,1.538,800,2.586,801,3.346,802,1.325,803,1.538,804,1.538,805,1.538,806,1.538,807,1.538,808,1.538,809,1.538,810,1.538,811,1.538,812,1.538,813,1.538,814,1.538,815,1.538,816,1.538,817,1.538,818,1.538,819,1.538,820,1.538,821,1.538,822,1.538,823,1.538,824,1.538]],["t/120",[24,1.211,70,1.923,134,1.629,163,1.752,170,1.752,177,1.752,183,2.684,193,1.752,239,1.752,242,1.752,258,1.752,259,2.118,260,1.752,283,1.752,285,1.433,296,2.444,299,1.524,361,2.33,382,1.629,388,2.33,389,2.33,390,2.33,391,2.33,392,2.083,393,2.083,394,2.33,441,2.33,502,3.495,542,2.33,566,2.33,611,2.083,642,2.33,655,2.33,738,2.083,768,2.33,770,3.495,777,2.33,787,2.33,802,2.33,825,2.704,826,2.704,827,4.057,828,2.704,829,2.704,830,2.704,831,2.704,832,2.704,833,2.704,834,2.704,835,2.704,836,2.704,837,2.704,838,2.704,839,2.704,840,2.704,841,2.704]]],"invertedIndex":[["",{"_index":62,"t":{"6":{"position":[[138,1],[189,1],[266,1],[318,1],[431,1]]},"65":{"position":[[94,1]]},"85":{"position":[[25,1]]},"90":{"position":[[51,1]]}}}],["10",{"_index":279,"t":{"36":{"position":[[305,2]]},"100":{"position":[[246,2]]}}}],["100",{"_index":754,"t":{"110":{"position":[[528,3]]}}}],["14",{"_index":629,"t":{"98":{"position":[[27,2]]}}}],["2024",{"_index":253,"t":{"34":{"position":[[38,4]]},"98":{"position":[[39,4]]}}}],["4.0",{"_index":726,"t":{"106":{"position":[[83,3],[116,5]]}}}],["60",{"_index":794,"t":{"118":{"position":[[1068,4]]}}}],["6300",{"_index":280,"t":{"36":{"position":[[308,4]]},"100":{"position":[[249,4]]}}}],["9",{"_index":251,"t":{"34":{"position":[[27,1]]}}}],["abid",{"_index":136,"t":{"16":{"position":[[77,5]]}}}],["abov",{"_index":716,"t":{"104":{"position":[[1073,6]]}}}],["access",{"_index":208,"t":{"28":{"position":[[34,6]]},"32":{"position":[[59,6]]},"98":{"position":[[149,6],[519,6]]},"102":{"position":[[773,11],[845,6],[1007,10]]},"104":{"position":[[451,10],[1163,10]]},"108":{"position":[[563,8],[600,6]]}}}],["accord",{"_index":800,"t":{"118":{"position":[[1290,10],[1937,10]]}}}],["accur",{"_index":714,"t":{"104":{"position":[[872,9]]},"108":{"position":[[399,9]]}}}],["accuraci",{"_index":657,"t":{"102":{"position":[[421,9]]},"104":{"position":[[941,9]]}}}],["achiev",{"_index":107,"t":{"12":{"position":[[82,9]]}}}],["act",{"_index":404,"t":{"54":{"position":[[269,4]]},"87":{"position":[[227,4]]}}}],["action",{"_index":822,"t":{"118":{"position":[[2234,6]]}}}],["activ",{"_index":360,"t":{"46":{"position":[[646,10],[687,10],[780,10]]},"48":{"position":[[209,10]]},"56":{"position":[[148,8]]},"108":{"position":[[524,10],[717,10]]}}}],["actual",{"_index":707,"t":{"104":{"position":[[695,6]]}}}],["acz",{"_index":46,"t":{"4":{"position":[[142,5]]},"85":{"position":[[114,3],[255,3],[423,3],[587,3]]}}}],["addit",{"_index":305,"t":{"40":{"position":[[3,8]]},"71":{"position":[[75,8]]}}}],["addition",{"_index":168,"t":{"20":{"position":[[177,13]]}}}],["address",{"_index":273,"t":{"36":{"position":[[227,7]]},"38":{"position":[[552,8],[623,7],[730,9],[836,7]]},"67":{"position":[[179,7]]},"79":{"position":[[596,9]]},"83":{"position":[[569,10]]},"85":{"position":[[620,7]]},"100":{"position":[[167,7]]}}}],["adequ",{"_index":327,"t":{"44":{"position":[[288,8]]}}}],["adher",{"_index":28,"t":{"2":{"position":[[333,6]]}}}],["administ",{"_index":798,"t":{"118":{"position":[[1227,12]]}}}],["administr",{"_index":815,"t":{"118":{"position":[[1891,14]]}}}],["adopt",{"_index":104,"t":{"12":{"position":[[30,8],[97,8]]}}}],["advanc",{"_index":418,"t":{"54":{"position":[[504,11]]},"58":{"position":[[277,9]]},"73":{"position":[[376,9]]},"87":{"position":[[468,11]]}}}],["advic",{"_index":686,"t":{"102":{"position":[[1586,6]]}}}],["advis",{"_index":751,"t":{"110":{"position":[[412,7]]}}}],["affect",{"_index":361,"t":{"46":{"position":[[668,7],[741,6]]},"120":{"position":[[412,6]]}}}],["affili",{"_index":492,"t":{"69":{"position":[[86,11]]}}}],["against",{"_index":565,"t":{"79":{"position":[[403,7]]},"112":{"position":[[53,7]]},"118":{"position":[[102,7],[2276,7]]}}}],["aggreg",{"_index":752,"t":{"110":{"position":[[474,9]]}}}],["agnost",{"_index":142,"t":{"16":{"position":[[173,8]]}}}],["agre",{"_index":635,"t":{"98":{"position":[[390,6],[467,5]]}}}],["agreement",{"_index":827,"t":{"120":{"position":[[44,9],[155,11]]}}}],["aim",{"_index":27,"t":{"2":{"position":[[323,3]]},"14":{"position":[[123,3]]},"38":{"position":[[3,3]]},"60":{"position":[[271,4]]},"73":{"position":[[324,5]]},"104":{"position":[[1205,3]]}}}],["align",{"_index":31,"t":{"2":{"position":[[381,9]]},"79":{"position":[[471,5]]},"81":{"position":[[199,7]]}}}],["allow",{"_index":438,"t":{"56":{"position":[[451,8]]},"106":{"position":[[146,6]]}}}],["alway",{"_index":339,"t":{"44":{"position":[[662,6]]}}}],["amend",{"_index":721,"t":{"104":{"position":[[1347,7]]}}}],["amic",{"_index":784,"t":{"118":{"position":[[755,8]]}}}],["amount",{"_index":293,"t":{"38":{"position":[[403,6]]}}}],["anonym",{"_index":167,"t":{"20":{"position":[[166,10],[231,10]]}}}],["anonymis",{"_index":472,"t":{"62":{"position":[[20,13],[85,13]]}}}],["anoth",{"_index":356,"t":{"46":{"position":[[509,7]]},"60":{"position":[[0,7]]}}}],["anyon",{"_index":192,"t":{"24":{"position":[[102,6]]}}}],["appli",{"_index":19,"t":{"2":{"position":[[188,7]]},"34":{"position":[[227,7]]},"40":{"position":[[309,6]]},"85":{"position":[[4,7]]},"102":{"position":[[656,5]]},"110":{"position":[[587,5]]}}}],["applic",{"_index":288,"t":{"38":{"position":[[226,10]]},"44":{"position":[[694,10]]},"56":{"position":[[530,12]]},"58":{"position":[[198,12]]},"77":{"position":[[604,13]]},"102":{"position":[[697,10],[959,10]]},"110":{"position":[[628,10]]},"118":{"position":[[2017,10]]}}}],["approach",{"_index":257,"t":{"34":{"position":[[98,8]]},"42":{"position":[[13,9]]},"79":{"position":[[525,8]]}}}],["appropri",{"_index":666,"t":{"102":{"position":[[806,11]]}}}],["arbitr",{"_index":766,"t":{"116":{"position":[[102,12]]},"118":{"position":[[260,11],[427,11],[549,11],[1215,11],[1263,11],[1339,11],[1447,8],[1499,11],[1656,11],[1781,11],[1860,12],[1910,10],[1986,12],[2264,11]]}}}],["area",{"_index":443,"t":{"56":{"position":[[597,6]]},"60":{"position":[[8,4]]},"85":{"position":[[279,5]]}}}],["aris",{"_index":750,"t":{"110":{"position":[[251,7]]},"112":{"position":[[130,7]]},"118":{"position":[[690,7]]}}}],["aros",{"_index":819,"t":{"118":{"position":[[2110,5]]}}}],["art",{"_index":149,"t":{"18":{"position":[[76,3]]}}}],["artefact",{"_index":508,"t":{"71":{"position":[[170,10]]}}}],["ask",{"_index":349,"t":{"46":{"position":[[305,3],[381,3],[435,3],[623,5]]}}}],["aspect",{"_index":603,"t":{"85":{"position":[[359,8]]}}}],["assist",{"_index":511,"t":{"73":{"position":[[186,7]]},"75":{"position":[[45,9]]},"77":{"position":[[153,6]]},"85":{"position":[[136,10]]}}}],["associ",{"_index":267,"t":{"36":{"position":[[152,12],[255,11],[434,12]]},"100":{"position":[[92,12],[196,11],[385,12]]}}}],["assumpt",{"_index":697,"t":{"104":{"position":[[118,11]]}}}],["assur",{"_index":572,"t":{"79":{"position":[[718,9]]}}}],["attent",{"_index":409,"t":{"54":{"position":[[390,9]]},"87":{"position":[[354,9]]}}}],["attorney",{"_index":761,"t":{"112":{"position":[[113,10]]}}}],["attribut",{"_index":725,"t":{"106":{"position":[[71,11],[299,11]]}}}],["audit",{"_index":577,"t":{"81":{"position":[[84,8],[438,8]]}}}],["avail",{"_index":188,"t":{"24":{"position":[[52,9]]},"94":{"position":[[354,9]]},"102":{"position":[[478,12],[1062,9]]},"106":{"position":[[38,9]]}}}],["award",{"_index":781,"t":{"118":{"position":[[561,5]]}}}],["baarerstrass",{"_index":278,"t":{"36":{"position":[[291,13]]},"100":{"position":[[232,13]]}}}],["base",{"_index":20,"t":{"2":{"position":[[205,5]]},"46":{"position":[[707,5]]},"67":{"position":[[145,6]]},"75":{"position":[[354,5]]},"90":{"position":[[216,5]]},"104":{"position":[[65,5],[590,5]]}}}],["basi",{"_index":287,"t":{"38":{"position":[[205,5]]},"75":{"position":[[212,6]]},"102":{"position":[[44,5],[1177,5]]}}}],["be",{"_index":127,"t":{"14":{"position":[[195,5]]},"118":{"position":[[1364,5]]}}}],["bear",{"_index":715,"t":{"104":{"position":[[910,4]]}}}],["becom",{"_index":621,"t":{"94":{"position":[[136,6]]}}}],["befor",{"_index":364,"t":{"46":{"position":[[791,6]]},"98":{"position":[[316,6]]}}}],["behaviour",{"_index":544,"t":{"77":{"position":[[202,9]]}}}],["believ",{"_index":113,"t":{"14":{"position":[[3,7]]},"24":{"position":[[148,7]]},"28":{"position":[[3,7]]},"46":{"position":[[978,7]]},"108":{"position":[[338,8]]},"118":{"position":[[294,7]]}}}],["below",{"_index":112,"t":{"12":{"position":[[152,6]]}}}],["benefit",{"_index":194,"t":{"24":{"position":[[131,7]]},"94":{"position":[[166,10]]}}}],["between",{"_index":177,"t":{"22":{"position":[[104,7]]},"56":{"position":[[174,7]]},"116":{"position":[[62,7]]},"118":{"position":[[1714,7]]},"120":{"position":[[54,7]]}}}],["beyond",{"_index":420,"t":{"54":{"position":[[582,7]]},"56":{"position":[[116,6]]},"73":{"position":[[568,7]]},"87":{"position":[[533,7]]}}}],["bind",{"_index":673,"t":{"102":{"position":[[1214,7]]}}}],["border",{"_index":176,"t":{"22":{"position":[[97,6]]}}}],["both",{"_index":163,"t":{"20":{"position":[[110,4]]},"26":{"position":[[34,4]]},"67":{"position":[[21,4]]},"94":{"position":[[211,4]]},"120":{"position":[[199,4]]}}}],["bottleneck",{"_index":549,"t":{"77":{"position":[[332,11]]}}}],["bound",{"_index":636,"t":{"98":{"position":[[403,5]]},"118":{"position":[[404,5]]}}}],["boundari",{"_index":455,"t":{"58":{"position":[[227,10]]}}}],["bounti",{"_index":498,"t":{"69":{"position":[[217,6]]}}}],["bridg",{"_index":428,"t":{"56":{"position":[[157,8]]}}}],["briefli",{"_index":298,"t":{"38":{"position":[[599,7]]}}}],["bring",{"_index":433,"t":{"56":{"position":[[312,5]]},"75":{"position":[[388,6]]},"118":{"position":[[2061,5]]}}}],["broad",{"_index":531,"t":{"75":{"position":[[406,5]]}}}],["broadest",{"_index":769,"t":{"118":{"position":[[34,8]]}}}],["bug",{"_index":569,"t":{"79":{"position":[[563,4]]}}}],["build",{"_index":21,"t":{"2":{"position":[[223,5]]},"18":{"position":[[37,8]]},"26":{"position":[[247,9]]}}}],["bureaucraci",{"_index":237,"t":{"32":{"position":[[125,11]]}}}],["buy",{"_index":690,"t":{"102":{"position":[[1694,3],[1734,3]]}}}],["c/o",{"_index":274,"t":{"36":{"position":[[267,3]]},"100":{"position":[[208,3]]}}}],["calendar",{"_index":795,"t":{"118":{"position":[[1073,8]]}}}],["capit",{"_index":232,"t":{"32":{"position":[[69,8],[281,8]]}}}],["caption",{"_index":832,"t":{"120":{"position":[[255,8]]}}}],["carefulli",{"_index":634,"t":{"98":{"position":[[306,9]]}}}],["caus",{"_index":118,"t":{"14":{"position":[[93,5]]}}}],["cc",{"_index":393,"t":{"52":{"position":[[144,2]]},"106":{"position":[[109,3]]},"120":{"position":[[957,2]]}}}],["censorship",{"_index":139,"t":{"16":{"position":[[125,10]]},"90":{"position":[[53,10]]}}}],["centralis",{"_index":198,"t":{"26":{"position":[[12,14]]}}}],["certain",{"_index":286,"t":{"38":{"position":[[133,7]]},"40":{"position":[[152,7],[183,7]]},"46":{"position":[[145,7]]},"106":{"position":[[270,7]]}}}],["challeng",{"_index":106,"t":{"12":{"position":[[69,9]]}}}],["chamber",{"_index":799,"t":{"118":{"position":[[1253,9]]}}}],["chang",{"_index":100,"t":{"10":{"position":[[102,7]]},"50":{"position":[[135,8]]},"81":{"position":[[393,7]]},"104":{"position":[[583,6],[1329,6]]},"114":{"position":[[161,8]]}}}],["channel",{"_index":89,"t":{"8":{"position":[[106,9]]},"85":{"position":[[318,8]]}}}],["check",{"_index":383,"t":{"50":{"position":[[96,5]]},"114":{"position":[[119,8]]}}}],["choic",{"_index":346,"t":{"46":{"position":[[153,7]]}}}],["chrono",{"_index":594,"t":{"83":{"position":[[442,8]]}}}],["claim",{"_index":664,"t":{"102":{"position":[[724,6]]},"110":{"position":[[503,6]]},"112":{"position":[[73,7]]},"118":{"position":[[87,5],[939,5],[2071,6],[2104,5],[2176,6]]}}}],["class",{"_index":821,"t":{"118":{"position":[[2228,5]]}}}],["classwid",{"_index":824,"t":{"118":{"position":[[2254,9]]}}}],["claus",{"_index":334,"t":{"44":{"position":[[459,7]]},"118":{"position":[[439,7]]}}}],["close",{"_index":512,"t":{"73":{"position":[[273,7]]}}}],["code",{"_index":483,"t":{"67":{"position":[[39,4],[63,4],[140,4]]}}}],["coercion",{"_index":128,"t":{"14":{"position":[[201,8]]}}}],["cohes",{"_index":412,"t":{"54":{"position":[[426,8]]},"87":{"position":[[390,8]]}}}],["collabor",{"_index":413,"t":{"54":{"position":[[439,13]]},"56":{"position":[[212,12]]},"73":{"position":[[260,12]]},"75":{"position":[[135,13]]},"87":{"position":[[403,13]]},"94":{"position":[[340,13]]}}}],["collect",{"_index":263,"t":{"36":{"position":[[48,10],[141,10],[244,10],[423,10]]},"38":{"position":[[20,10],[35,10],[113,7],[270,7],[471,7]]},"40":{"position":[[31,10],[78,7]]},"46":{"position":[[50,10],[216,7]]},"54":{"position":[[60,11],[567,10]]},"100":{"position":[[81,10],[185,10],[374,10]]}}}],["come",{"_index":372,"t":{"48":{"position":[[25,4]]}}}],["commerci",{"_index":730,"t":{"106":{"position":[[243,10]]}}}],["commiss",{"_index":330,"t":{"44":{"position":[[358,11]]}}}],["commission",{"_index":369,"t":{"46":{"position":[[949,12]]}}}],["commit",{"_index":672,"t":{"102":{"position":[[1200,10]]}}}],["common",{"_index":724,"t":{"106":{"position":[[63,7]]}}}],["commun",{"_index":69,"t":{"6":{"position":[[227,14]]},"8":{"position":[[13,10],[94,11],[140,9]]},"20":{"position":[[115,14]]},"22":{"position":[[142,10]]},"60":{"position":[[312,13]]},"73":{"position":[[530,13]]},"90":{"position":[[74,13]]},"118":{"position":[[1699,14]]}}}],["compil",{"_index":589,"t":{"83":{"position":[[254,9]]}}}],["complaint",{"_index":367,"t":{"46":{"position":[[890,9]]}}}],["complet",{"_index":172,"t":{"22":{"position":[[14,8]]},"102":{"position":[[407,13]]},"104":{"position":[[894,8],[966,12]]}}}],["compli",{"_index":637,"t":{"98":{"position":[[413,6]]},"102":{"position":[[940,9]]}}}],["compliant",{"_index":665,"t":{"102":{"position":[[793,9]]}}}],["compon",{"_index":475,"t":{"62":{"position":[[137,11]]}}}],["compos",{"_index":204,"t":{"26":{"position":[[142,9]]}}}],["comprehens",{"_index":561,"t":{"79":{"position":[[101,13]]}}}],["compris",{"_index":12,"t":{"2":{"position":[[114,9]]}}}],["compromis",{"_index":145,"t":{"18":{"position":[[9,10]]}}}],["comput",{"_index":203,"t":{"26":{"position":[[132,9]]},"96":{"position":[[131,10]]}}}],["concern",{"_index":656,"t":{"102":{"position":[[392,10]]}}}],["condit",{"_index":705,"t":{"104":{"position":[[627,11]]},"108":{"position":[[92,11]]}}}],["conduct",{"_index":463,"t":{"60":{"position":[[196,10]]},"73":{"position":[[307,7]]},"77":{"position":[[254,10]]},"118":{"position":[[1556,9]]}}}],["confer",{"_index":806,"t":{"118":{"position":[[1576,10]]}}}],["confidenti",{"_index":459,"t":{"58":{"position":[[364,12]]},"118":{"position":[[1747,13]]}}}],["conflict",{"_index":767,"t":{"116":{"position":[[133,8]]}}}],["conform",{"_index":341,"t":{"44":{"position":[[678,10]]}}}],["connect",{"_index":88,"t":{"8":{"position":[[68,10]]},"110":{"position":[[279,9]]}}}],["consent",{"_index":359,"t":{"46":{"position":[[564,7],[611,7],[721,7],[822,8]]}}}],["consequenti",{"_index":748,"t":{"110":{"position":[[217,13]]}}}],["consid",{"_index":670,"t":{"102":{"position":[[1122,10],[1273,10]]}}}],["consist",{"_index":809,"t":{"118":{"position":[[1641,7]]}}}],["constitut",{"_index":683,"t":{"102":{"position":[[1543,10]]}}}],["constru",{"_index":689,"t":{"102":{"position":[[1662,9]]}}}],["consult",{"_index":276,"t":{"36":{"position":[[275,10]]},"100":{"position":[[216,10]]}}}],["contact",{"_index":389,"t":{"52":{"position":[[75,7]]},"120":{"position":[[888,7]]}}}],["contain",{"_index":632,"t":{"98":{"position":[[221,9]]},"102":{"position":[[1287,7],[1517,9]]},"104":{"position":[[21,7],[815,9],[994,9]]}}}],["contemporan",{"_index":830,"t":{"120":{"position":[[123,15]]}}}],["content",{"_index":133,"t":{"16":{"position":[[43,7]]},"40":{"position":[[191,8],[249,7]]},"48":{"position":[[197,7]]},"98":{"position":[[195,7]]},"102":{"position":[[522,7],[589,7],[762,7],[986,7]]},"106":{"position":[[20,8],[194,7]]},"108":{"position":[[310,7],[377,7]]},"110":{"position":[[339,7]]},"112":{"position":[[186,7]]},"118":{"position":[[198,7]]}}}],["continu",{"_index":218,"t":{"30":{"position":[[35,8]]}}}],["contract",{"_index":575,"t":{"81":{"position":[[12,9],[102,9],[165,9],[264,9],[493,9]]},"102":{"position":[[1190,9]]},"110":{"position":[[44,9]]}}}],["contractu",{"_index":333,"t":{"44":{"position":[[447,11]]}}}],["contribut",{"_index":411,"t":{"54":{"position":[[410,10]]},"58":{"position":[[48,13]]},"60":{"position":[[357,12]]},"67":{"position":[[44,14],[68,14]]},"73":{"position":[[417,11]]},"85":{"position":[[498,12]]},"87":{"position":[[374,10]]}}}],["contributor",{"_index":178,"t":{"22":{"position":[[121,12]]}}}],["control",{"_index":205,"t":{"26":{"position":[[213,7]]},"36":{"position":[[96,11]]},"46":{"position":[[522,10]]},"108":{"position":[[443,7]]}}}],["controversi",{"_index":771,"t":{"118":{"position":[[113,11]]}}}],["convers",{"_index":525,"t":{"75":{"position":[[294,12]]}}}],["copi",{"_index":354,"t":{"46":{"position":[[448,4]]},"106":{"position":[[162,5]]}}}],["core",{"_index":32,"t":{"2":{"position":[[400,4]]},"22":{"position":[[116,4]]},"83":{"position":[[134,4],[401,4]]}}}],["cornerston",{"_index":398,"t":{"54":{"position":[[35,11]]},"87":{"position":[[35,11]]}}}],["correct",{"_index":350,"t":{"46":{"position":[[315,7]]}}}],["cost",{"_index":246,"t":{"32":{"position":[[262,5]]},"110":{"position":[[141,4]]},"118":{"position":[[373,4]]}}}],["countri",{"_index":326,"t":{"44":{"position":[[248,9]]}}}],["court",{"_index":765,"t":{"116":{"position":[[93,5]]},"118":{"position":[[530,5],[1488,6]]}}}],["cover",{"_index":825,"t":{"120":{"position":[[27,5]]}}}],["craft",{"_index":402,"t":{"54":{"position":[[206,8]]},"87":{"position":[[164,8]]}}}],["creat",{"_index":186,"t":{"24":{"position":[[16,6]]},"30":{"position":[[3,6]]},"102":{"position":[[1136,8]]}}}],["creativ",{"_index":723,"t":{"106":{"position":[[54,8]]}}}],["critic",{"_index":588,"t":{"83":{"position":[[199,8]]}}}],["crucial",{"_index":505,"t":{"71":{"position":[[31,7]]},"79":{"position":[[296,7]]}}}],["cryptoeconom",{"_index":137,"t":{"16":{"position":[[90,14]]},"75":{"position":[[529,16]]}}}],["cryptograph",{"_index":597,"t":{"85":{"position":[[58,13],[225,13],[337,13],[395,13]]}}}],["cryptographi",{"_index":596,"t":{"85":{"position":[[12,12]]}}}],["current",{"_index":225,"t":{"30":{"position":[[122,7]]},"65":{"position":[[125,7]]},"104":{"position":[[74,7]]}}}],["custodian",{"_index":26,"t":{"2":{"position":[[288,10]]}}}],["cut",{"_index":423,"t":{"56":{"position":[[57,7]]},"85":{"position":[[382,7]]}}}],["damag",{"_index":749,"t":{"110":{"position":[[231,7],[447,8]]},"112":{"position":[[81,7]]}}}],["dapp",{"_index":55,"t":{"6":{"position":[[60,5]]}}}],["dappconnect",{"_index":67,"t":{"6":{"position":[[191,12]]}}}],["data",{"_index":78,"t":{"6":{"position":[[376,4]]},"36":{"position":[[86,4]]},"38":{"position":[[73,4],[150,4],[302,4],[361,5],[514,4]]},"40":{"position":[[54,5],[106,4]]},"42":{"position":[[31,4],[165,4]]},"44":{"position":[[50,4],[134,4],[217,4],[491,4],[609,4]]},"46":{"position":[[93,4],[199,4],[347,4],[412,4],[470,5],[517,4],[597,4],[873,5],[917,4],[1005,4]]}}}],["date",{"_index":86,"t":{"8":{"position":[[35,4]]},"81":{"position":[[371,4]]},"83":{"position":[[356,5]]}}}],["day",{"_index":520,"t":{"75":{"position":[[201,3],[208,3]]},"118":{"position":[[1082,4]]}}}],["dean",{"_index":73,"t":{"6":{"position":[[304,4],[416,4]]}}}],["decentr",{"_index":63,"t":{"6":{"position":[[140,13]]}}}],["decentralis",{"_index":68,"t":{"6":{"position":[[213,13]]},"12":{"position":[[46,13]]},"54":{"position":[[519,13]]},"56":{"position":[[635,13]]},"58":{"position":[[397,13]]},"60":{"position":[[333,13]]},"73":{"position":[[474,13],[516,13]]},"77":{"position":[[590,13]]},"85":{"position":[[558,13]]},"87":{"position":[[483,13]]}}}],["decis",{"_index":804,"t":{"118":{"position":[[1456,8]]}}}],["dedic",{"_index":514,"t":{"75":{"position":[[32,9]]},"79":{"position":[[23,9]]}}}],["deem",{"_index":329,"t":{"44":{"position":[[320,6]]},"118":{"position":[[1396,6]]}}}],["deep",{"_index":16,"t":{"2":{"position":[[143,4]]},"56":{"position":[[4,4],[464,4],[559,4]]},"58":{"position":[[25,4],[308,4]]},"60":{"position":[[30,4],[257,4]]},"73":{"position":[[290,4]]},"94":{"position":[[69,4],[220,4]]}}}],["defin",{"_index":566,"t":{"79":{"position":[[411,7]]},"120":{"position":[[378,7]]}}}],["degre",{"_index":702,"t":{"104":{"position":[[505,6],[1032,6]]}}}],["deliv",{"_index":552,"t":{"77":{"position":[[504,7]]},"79":{"position":[[785,7]]}}}],["delv",{"_index":451,"t":{"58":{"position":[[128,6]]}}}],["demand",{"_index":556,"t":{"77":{"position":[[568,7]]},"83":{"position":[[607,7]]}}}],["demo",{"_index":608,"t":{"90":{"position":[[88,6]]}}}],["depth",{"_index":464,"t":{"60":{"position":[[210,5]]}}}],["describ",{"_index":682,"t":{"102":{"position":[[1483,9]]},"104":{"position":[[1063,9],[1131,9]]}}}],["descript",{"_index":700,"t":{"104":{"position":[[289,13]]}}}],["design",{"_index":138,"t":{"16":{"position":[[105,6],[166,6]]},"75":{"position":[[71,9]]},"81":{"position":[[237,7]]}}}],["detail",{"_index":410,"t":{"54":{"position":[[403,6]]},"87":{"position":[[367,6]]},"104":{"position":[[313,8]]}}}],["devcon5",{"_index":82,"t":{"6":{"position":[[446,9]]}}}],["develop",{"_index":4,"t":{"2":{"position":[[39,11]]},"4":{"position":[[59,9]]},"71":{"position":[[153,7]]},"73":{"position":[[111,8]]},"77":{"position":[[74,10]]},"79":{"position":[[72,11]]},"81":{"position":[[54,12],[249,8]]},"83":{"position":[[87,11],[416,9]]},"85":{"position":[[591,8]]},"96":{"position":[[21,10]]},"104":{"position":[[353,11],[649,13],[702,11]]}}}],["dimens",{"_index":54,"t":{"6":{"position":[[46,9]]}}}],["disclaim",{"_index":641,"t":{"102":{"position":[[115,8],[639,11]]}}}],["discord",{"_index":92,"t":{"8":{"position":[[153,7]]},"65":{"position":[[36,8]]}}}],["discov",{"_index":496,"t":{"69":{"position":[[183,10]]}}}],["discret",{"_index":639,"t":{"102":{"position":[[91,10],[894,10]]},"104":{"position":[[1415,11]]}}}],["disput",{"_index":764,"t":{"116":{"position":[[53,8]]},"118":{"position":[[16,9],[338,8],[682,7],[828,8],[880,7],[922,8],[1047,7],[1118,8],[1141,7]]}}}],["distribut",{"_index":538,"t":{"77":{"position":[[4,11],[85,11],[452,11]]},"106":{"position":[[179,10]]}}}],["document",{"_index":391,"t":{"52":{"position":[[117,8]]},"120":{"position":[[930,8]]}}}],["doesn’t",{"_index":362,"t":{"46":{"position":[[733,7]]}}}],["domicil",{"_index":272,"t":{"36":{"position":[[218,8]]},"100":{"position":[[158,8]]}}}],["don't",{"_index":144,"t":{"18":{"position":[[3,5]]}}}],["don’t",{"_index":290,"t":{"38":{"position":[[315,5]]}}}],["drive",{"_index":524,"t":{"75":{"position":[[283,6]]}}}],["driven",{"_index":2,"t":{"2":{"position":[[19,6]]}}}],["dst",{"_index":539,"t":{"77":{"position":[[32,5],[298,3],[428,3]]}}}],["dst'",{"_index":540,"t":{"77":{"position":[[123,5]]}}}],["dynam",{"_index":536,"t":{"75":{"position":[[476,7]]}}}],["each",{"_index":9,"t":{"2":{"position":[[92,4]]},"104":{"position":[[1303,4]]}}}],["eas",{"_index":210,"t":{"28":{"position":[[78,4]]}}}],["econom",{"_index":124,"t":{"14":{"position":[[162,8]]},"32":{"position":[[253,8]]},"75":{"position":[[14,9]]},"90":{"position":[[143,8]]}}}],["economi",{"_index":515,"t":{"75":{"position":[[93,10]]}}}],["ecosystem",{"_index":407,"t":{"54":{"position":[[359,10]]},"83":{"position":[[40,10],[225,10],[634,10]]},"85":{"position":[[576,10]]},"87":{"position":[[315,9]]}}}],["edg",{"_index":424,"t":{"56":{"position":[[65,4]]},"85":{"position":[[390,4]]}}}],["edit",{"_index":400,"t":{"54":{"position":[[120,7]]},"87":{"position":[[107,7]]}}}],["educ",{"_index":215,"t":{"28":{"position":[[197,11]]}}}],["effect",{"_index":242,"t":{"32":{"position":[[221,9]]},"50":{"position":[[175,9]]},"114":{"position":[[207,9]]},"118":{"position":[[359,9],[378,9]]},"120":{"position":[[681,7]]}}}],["effici",{"_index":462,"t":{"60":{"position":[[131,11]]},"73":{"position":[[460,10]]},"83":{"position":[[325,10]]}}}],["effort",{"_index":216,"t":{"28":{"position":[[209,8]]},"56":{"position":[[368,6]]},"94":{"position":[[83,8]]},"118":{"position":[[733,7],[782,7]]}}}],["eigenman",{"_index":74,"t":{"6":{"position":[[309,8]]}}}],["eigenmann",{"_index":81,"t":{"6":{"position":[[421,9]]}}}],["emerg",{"_index":616,"t":{"94":{"position":[[27,8]]}}}],["emphasi",{"_index":209,"t":{"28":{"position":[[66,8]]}}}],["employ",{"_index":600,"t":{"85":{"position":[[188,9],[293,9]]}}}],["enabl",{"_index":52,"t":{"6":{"position":[[31,8],[204,8]]},"16":{"position":[[3,6]]},"73":{"position":[[498,8]]},"77":{"position":[[480,7]]},"96":{"position":[[104,7]]}}}],["encompass",{"_index":440,"t":{"56":{"position":[[573,11]]}}}],["endors",{"_index":734,"t":{"108":{"position":[[260,8]]}}}],["enforc",{"_index":770,"t":{"118":{"position":[[51,11],[1472,8]]},"120":{"position":[[703,7],[798,7]]}}}],["engin",{"_index":37,"t":{"4":{"position":[[22,8],[133,8]]}}}],["english",{"_index":813,"t":{"118":{"position":[[1804,8]]}}}],["enhanc",{"_index":303,"t":{"38":{"position":[[782,7]]},"60":{"position":[[106,7]]},"73":{"position":[[333,9]]},"79":{"position":[[616,9]]},"85":{"position":[[427,8]]}}}],["ensur",{"_index":30,"t":{"2":{"position":[[368,6]]},"38":{"position":[[743,6]]},"44":{"position":[[470,6]]},"54":{"position":[[292,8]]},"69":{"position":[[257,6]]},"77":{"position":[[368,8]]},"79":{"position":[[214,6],[438,7],[568,7],[759,7]]},"81":{"position":[[142,7],[322,8],[554,8]]},"83":{"position":[[307,8]]},"87":{"position":[[250,8]]}}}],["enter",{"_index":630,"t":{"98":{"position":[[100,7]]}}}],["entir",{"_index":826,"t":{"120":{"position":[[37,6]]}}}],["entiti",{"_index":224,"t":{"30":{"position":[[101,6]]},"36":{"position":[[121,6]]},"56":{"position":[[238,9]]},"100":{"position":[[61,6]]}}}],["environ",{"_index":414,"t":{"54":{"position":[[453,12]]},"87":{"position":[[417,12]]},"104":{"position":[[678,12]]}}}],["envis",{"_index":619,"t":{"94":{"position":[[111,10]]}}}],["equit",{"_index":741,"t":{"110":{"position":[[102,9]]}}}],["especi",{"_index":181,"t":{"22":{"position":[[190,10]]},"96":{"position":[[195,10]]}}}],["essenc",{"_index":728,"t":{"106":{"position":[[125,7]]}}}],["essenti",{"_index":162,"t":{"20":{"position":[[78,9]]},"83":{"position":[[149,9]]}}}],["estim",{"_index":695,"t":{"104":{"position":[[96,10]]}}}],["ethereum",{"_index":56,"t":{"6":{"position":[[83,8],[285,8]]}}}],["eur",{"_index":753,"t":{"110":{"position":[[524,3]]}}}],["euro",{"_index":757,"t":{"110":{"position":[[545,7]]}}}],["europ",{"_index":337,"t":{"44":{"position":[[558,7]]}}}],["european",{"_index":324,"t":{"44":{"position":[[161,8],[349,8],[626,8]]}}}],["even",{"_index":140,"t":{"16":{"position":[[148,4]]},"110":{"position":[[391,4]]}}}],["event",{"_index":320,"t":{"44":{"position":[[99,5],[573,6]]},"110":{"position":[[463,6]]}}}],["eventu",{"_index":620,"t":{"94":{"position":[[125,10]]}}}],["execut",{"_index":560,"t":{"79":{"position":[[88,9]]}}}],["exist",{"_index":219,"t":{"30":{"position":[[47,5]]}}}],["expand",{"_index":836,"t":{"120":{"position":[[394,7]]}}}],["expect",{"_index":671,"t":{"102":{"position":[[1149,12]]},"104":{"position":[[82,13]]}}}],["expens",{"_index":760,"t":{"112":{"position":[[93,9]]}}}],["expertis",{"_index":408,"t":{"54":{"position":[[376,9]]},"71":{"position":[[97,10]]},"77":{"position":[[439,9]]},"85":{"position":[[175,9]]},"87":{"position":[[340,9]]},"94":{"position":[[326,9]]}}}],["explain",{"_index":342,"t":{"46":{"position":[[3,9]]}}}],["explor",{"_index":422,"t":{"56":{"position":[[41,11]]},"58":{"position":[[182,9]]},"60":{"position":[[89,8]]}}}],["export",{"_index":321,"t":{"44":{"position":[[113,6]]}}}],["express",{"_index":644,"t":{"102":{"position":[[152,7],[1374,7]]}}}],["extend",{"_index":211,"t":{"28":{"position":[[101,7]]},"56":{"position":[[108,7]]}}}],["extens",{"_index":586,"t":{"83":{"position":[[15,9]]}}}],["extent",{"_index":309,"t":{"40":{"position":[[207,6]]},"52":{"position":[[7,6]]},"102":{"position":[[677,6]]},"108":{"position":[[7,6]]},"110":{"position":[[608,6]]}}}],["facilit",{"_index":415,"t":{"54":{"position":[[466,12]]},"87":{"position":[[430,12]]}}}],["fail",{"_index":841,"t":{"120":{"position":[[695,4]]}}}],["fair",{"_index":207,"t":{"28":{"position":[[14,4]]}}}],["famili",{"_index":606,"t":{"90":{"position":[[8,6]]}}}],["fdpic",{"_index":370,"t":{"46":{"position":[[962,8]]}}}],["featur",{"_index":147,"t":{"18":{"position":[[46,9]]},"40":{"position":[[160,8],[260,9]]},"104":{"position":[[339,9]]}}}],["februari",{"_index":252,"t":{"34":{"position":[[29,8]]},"98":{"position":[[30,8]]}}}],["feder",{"_index":368,"t":{"46":{"position":[[909,7]]}}}],["fee",{"_index":762,"t":{"112":{"position":[[124,5]]},"118":{"position":[[1837,4],[1921,4]]}}}],["field",{"_index":456,"t":{"58":{"position":[[291,5]]}}}],["fight",{"_index":236,"t":{"32":{"position":[[119,5]]}}}],["file",{"_index":366,"t":{"46":{"position":[[883,4]]},"118":{"position":[[1883,7]]}}}],["final",{"_index":797,"t":{"118":{"position":[[1173,7]]}}}],["financi",{"_index":684,"t":{"102":{"position":[[1554,10]]}}}],["find",{"_index":434,"t":{"56":{"position":[[333,8],[506,8]]}}}],["first",{"_index":484,"t":{"67":{"position":[[97,5]]},"118":{"position":[[707,5]]}}}],["fit",{"_index":647,"t":{"102":{"position":[[236,7]]}}}],["flow",{"_index":131,"t":{"16":{"position":[[19,4]]}}}],["focu",{"_index":460,"t":{"60":{"position":[[16,5]]}}}],["focus",{"_index":513,"t":{"73":{"position":[[364,8]]},"83":{"position":[[72,7]]},"85":{"position":[[47,7]]},"96":{"position":[[10,7]]}}}],["follow",{"_index":90,"t":{"8":{"position":[[116,6]]},"38":{"position":[[495,9]]},"106":{"position":[[371,9]]},"118":{"position":[[417,9]]}}}],["forc",{"_index":802,"t":{"118":{"position":[[1373,6]]},"120":{"position":[[671,5]]}}}],["forecast",{"_index":696,"t":{"104":{"position":[[107,10]]}}}],["forefront",{"_index":421,"t":{"56":{"position":[[28,9]]}}}],["form",{"_index":332,"t":{"44":{"position":[[399,5],[438,5]]},"102":{"position":[[1165,7]]}}}],["format",{"_index":729,"t":{"106":{"position":[[209,6]]}}}],["forum",{"_index":95,"t":{"8":{"position":[[224,5]]},"65":{"position":[[69,6]]}}}],["forward",{"_index":526,"t":{"75":{"position":[[307,7]]},"104":{"position":[[29,7],[205,7],[541,7]]}}}],["foundat",{"_index":458,"t":{"58":{"position":[[338,10]]}}}],["franck",{"_index":65,"t":{"6":{"position":[[176,6]]}}}],["frank",{"_index":179,"t":{"22":{"position":[[160,5]]}}}],["free",{"_index":130,"t":{"16":{"position":[[14,4]]},"24":{"position":[[68,4]]},"69":{"position":[[62,4]]}}}],["freedom",{"_index":125,"t":{"14":{"position":[[171,9]]}}}],["fruition",{"_index":435,"t":{"56":{"position":[[345,9]]}}}],["full",{"_index":611,"t":{"90":{"position":[[168,6]]},"106":{"position":[[323,4]]},"120":{"position":[[666,4]]}}}],["function",{"_index":302,"t":{"38":{"position":[[764,13]]},"79":{"position":[[246,13],[455,15]]},"81":{"position":[[336,8]]},"98":{"position":[[207,13]]},"104":{"position":[[322,16]]}}}],["further",{"_index":593,"t":{"83":{"position":[[375,7]]},"118":{"position":[[1026,7]]}}}],["futur",{"_index":680,"t":{"102":{"position":[[1436,6]]}}}],["gap",{"_index":429,"t":{"56":{"position":[[170,3]]}}}],["gener",{"_index":311,"t":{"42":{"position":[[5,7]]},"75":{"position":[[521,7]]},"96":{"position":[[53,7]]},"104":{"position":[[192,8]]}}}],["github",{"_index":480,"t":{"65":{"position":[[103,7]]}}}],["given",{"_index":449,"t":{"58":{"position":[[72,5]]}}}],["gmbh",{"_index":277,"t":{"36":{"position":[[286,4]]},"100":{"position":[[227,4]]}}}],["go",{"_index":43,"t":{"4":{"position":[[86,3]]}}}],["goal",{"_index":102,"t":{"12":{"position":[[4,4]]},"22":{"position":[[262,6]]},"62":{"position":[[58,4]]}}}],["good",{"_index":187,"t":{"24":{"position":[[35,5]]},"67":{"position":[[91,5]]}}}],["gossip",{"_index":612,"t":{"90":{"position":[[209,6]]}}}],["gossipsub",{"_index":447,"t":{"56":{"position":[[722,9]]},"60":{"position":[[55,9],[173,9]]},"62":{"position":[[75,9]]},"73":{"position":[[162,9],[350,10]]}}}],["govern",{"_index":631,"t":{"98":{"position":[[137,6]]},"108":{"position":[[132,6]]},"116":{"position":[[10,7]]}}}],["group",{"_index":5,"t":{"2":{"position":[[51,5]]}}}],["grow",{"_index":230,"t":{"32":{"position":[[39,4]]}}}],["guarante",{"_index":154,"t":{"18":{"position":[[169,11]]},"102":{"position":[[1355,10]]}}}],["guidanc",{"_index":507,"t":{"71":{"position":[[133,9]]}}}],["hackenproof",{"_index":500,"t":{"69":{"position":[[237,11]]}}}],["harm",{"_index":736,"t":{"108":{"position":[[423,8]]}}}],["harmless",{"_index":759,"t":{"112":{"position":[[35,8]]}}}],["head",{"_index":833,"t":{"120":{"position":[[268,8]]}}}],["heard",{"_index":778,"t":{"118":{"position":[[495,5]]}}}],["held",{"_index":738,"t":{"110":{"position":[[15,4]]},"118":{"position":[[1519,4]]},"120":{"position":[[532,4]]}}}],["help",{"_index":501,"t":{"69":{"position":[[252,4]]},"77":{"position":[[307,5]]}}}],["herein",{"_index":669,"t":{"102":{"position":[[994,6],[1257,6],[1493,7],[1527,6]]},"104":{"position":[[825,6],[1004,7]]},"118":{"position":[[1422,6]]}}}],["high",{"_index":573,"t":{"79":{"position":[[804,4]]},"104":{"position":[[500,4],[1027,4]]}}}],["highest",{"_index":583,"t":{"81":{"position":[[567,7]]}}}],["hold",{"_index":625,"t":{"96":{"position":[[148,5]]},"112":{"position":[[27,4]]}}}],["holder",{"_index":235,"t":{"32":{"position":[[108,7]]}}}],["https://creativecommons.org/licenses/by/4.0",{"_index":731,"t":{"106":{"position":[[390,45]]}}}],["https://our.status.im/our",{"_index":97,"t":{"10":{"position":[[42,25]]}}}],["hundr",{"_index":756,"t":{"110":{"position":[[537,7]]}}}],["idea",{"_index":528,"t":{"75":{"position":[[333,5]]}}}],["identifi",{"_index":299,"t":{"38":{"position":[[653,11]]},"56":{"position":[[384,11]]},"77":{"position":[[313,8]]},"79":{"position":[[537,11]]},"81":{"position":[[506,8]]},"83":{"position":[[553,11]]},"120":{"position":[[277,11]]}}}],["ift",{"_index":10,"t":{"2":{"position":[[97,3]]},"56":{"position":[[92,4],[295,3],[554,4]]},"60":{"position":[[433,3]]},"71":{"position":[[58,3]]},"73":{"position":[[551,3]]},"75":{"position":[[55,3],[154,3]]},"77":{"position":[[160,3],[488,3]]},"79":{"position":[[47,3],[772,3]]},"81":{"position":[[116,3],[308,3]]},"83":{"position":[[163,3],[429,4],[514,3]]},"85":{"position":[[150,3]]},"87":{"position":[[54,4],[134,3],[311,3],[525,3]]},"94":{"position":[[152,3]]},"96":{"position":[[181,3]]}}}],["ift'",{"_index":585,"t":{"83":{"position":[[9,5]]}}}],["ii",{"_index":789,"t":{"118":{"position":[[965,4]]}}}],["immedi",{"_index":385,"t":{"50":{"position":[[185,11]]},"114":{"position":[[217,11]]}}}],["implement",{"_index":314,"t":{"42":{"position":[[67,11]]},"79":{"position":[[160,12],[340,16],[387,15]]},"85":{"position":[[212,12]]}}}],["impli",{"_index":645,"t":{"102":{"position":[[163,8],[1385,7]]},"108":{"position":[[246,5]]}}}],["import",{"_index":627,"t":{"96":{"position":[[166,10]]}}}],["improv",{"_index":220,"t":{"30":{"position":[[57,7]]},"56":{"position":[[732,13]]},"60":{"position":[[65,14],[239,13]]},"73":{"position":[[141,9],[444,11]]}}}],["incent",{"_index":516,"t":{"75":{"position":[[104,11]]}}}],["incentivis",{"_index":217,"t":{"30":{"position":[[19,12]]}}}],["incid",{"_index":494,"t":{"69":{"position":[[125,9]]}}}],["incident",{"_index":747,"t":{"110":{"position":[[202,11]]}}}],["includ",{"_index":126,"t":{"14":{"position":[[186,8]]},"56":{"position":[[375,8],[604,9]]},"79":{"position":[[152,7]]},"83":{"position":[[236,9],[484,8]]},"98":{"position":[[181,9]]},"102":{"position":[[172,9]]},"104":{"position":[[243,7],[727,9],[1259,9]]},"106":{"position":[[233,9]]},"108":{"position":[[104,9]]},"112":{"position":[[103,9],[210,9]]},"118":{"position":[[74,8],[1873,9]]}}}],["inclus",{"_index":212,"t":{"28":{"position":[[119,12]]}}}],["incorpor",{"_index":803,"t":{"118":{"position":[[1409,12]]}}}],["incub",{"_index":17,"t":{"2":{"position":[[162,9]]},"54":{"position":[[170,9]]},"56":{"position":[[271,9],[431,9]]},"58":{"position":[[90,9]]},"94":{"position":[[4,9],[272,9]]}}}],["indemnifi",{"_index":758,"t":{"112":{"position":[[10,9]]}}}],["independ",{"_index":376,"t":{"48":{"position":[[110,11]]}}}],["indic",{"_index":294,"t":{"38":{"position":[[433,9]]}}}],["individu",{"_index":115,"t":{"14":{"position":[[33,12]]},"26":{"position":[[192,11]]}}}],["industri",{"_index":698,"t":{"104":{"position":[[168,8]]}}}],["ineffici",{"_index":238,"t":{"32":{"position":[[141,14]]}}}],["inform",{"_index":132,"t":{"16":{"position":[[27,12],[202,12]]},"22":{"position":[[48,11]]},"34":{"position":[[78,6]]},"42":{"position":[[191,11]]},"46":{"position":[[937,11]]},"60":{"position":[[288,11]]},"102":{"position":[[1076,13],[1245,11],[1505,11]]},"104":{"position":[[803,11],[982,11],[1246,12]]}}}],["infrastructur",{"_index":143,"t":{"16":{"position":[[182,15]]}}}],["infring",{"_index":650,"t":{"102":{"position":[[278,12]]}}}],["inherit",{"_index":96,"t":{"10":{"position":[[27,9]]}}}],["initi",{"_index":617,"t":{"94":{"position":[[36,11]]}}}],["innov",{"_index":425,"t":{"56":{"position":[[70,10]]}}}],["institut",{"_index":491,"t":{"69":{"position":[[49,9]]},"118":{"position":[[1275,11]]}}}],["integr",{"_index":417,"t":{"54":{"position":[[488,11]]},"73":{"position":[[212,11]]},"79":{"position":[[230,11]]},"85":{"position":[[526,9]]},"87":{"position":[[452,11]]}}}],["intellectu",{"_index":651,"t":{"102":{"position":[[294,12]]}}}],["intend",{"_index":255,"t":{"34":{"position":[[66,8]]},"81":{"position":[[348,8]]},"102":{"position":[[1042,8]]}}}],["intens",{"_index":519,"t":{"75":{"position":[[175,9]]}}}],["interact",{"_index":310,"t":{"40":{"position":[[218,8]]},"58":{"position":[[377,12]]}}}],["interest",{"_index":301,"t":{"38":{"position":[[699,8]]},"67":{"position":[[7,10]]}}}],["intern",{"_index":727,"t":{"106":{"position":[[87,13]]},"118":{"position":[[1325,13],[1972,13]]}}}],["interoper",{"_index":213,"t":{"28":{"position":[[162,17]]},"54":{"position":[[318,13]]},"79":{"position":[[188,16]]},"87":{"position":[[276,13]]}}}],["interpret",{"_index":837,"t":{"120":{"position":[[423,14]]}}}],["intricaci",{"_index":452,"t":{"58":{"position":[[144,11]]}}}],["invalid",{"_index":838,"t":{"120":{"position":[[537,7]]}}}],["invest",{"_index":214,"t":{"28":{"position":[[184,9]]}}}],["involv",{"_index":580,"t":{"81":{"position":[[455,8]]}}}],["ip",{"_index":295,"t":{"38":{"position":[[549,2],[620,2],[727,2],[833,2]]}}}],["issu",{"_index":478,"t":{"65":{"position":[[87,6]]},"67":{"position":[[103,6]]},"77":{"position":[[360,7]]},"79":{"position":[[585,6]]}}}],["it'",{"_index":161,"t":{"20":{"position":[[73,4]]}}}],["itself",{"_index":199,"t":{"26":{"position":[[73,7]]}}}],["job",{"_index":482,"t":{"65":{"position":[[133,3]]}}}],["join",{"_index":83,"t":{"8":{"position":[[0,4],[131,4]]},"65":{"position":[[24,7]]}}}],["judg",{"_index":779,"t":{"118":{"position":[[506,6]]}}}],["juri",{"_index":782,"t":{"118":{"position":[[615,4]]}}}],["jurisdict",{"_index":667,"t":{"102":{"position":[[826,13]]},"118":{"position":[[650,14]]}}}],["keep",{"_index":84,"t":{"8":{"position":[[24,4]]}}}],["kept",{"_index":811,"t":{"118":{"position":[[1742,4]]}}}],["key",{"_index":442,"t":{"56":{"position":[[593,3]]}}}],["kind",{"_index":643,"t":{"102":{"position":[[146,5]]}}}],["kit",{"_index":490,"t":{"67":{"position":[[187,3]]},"85":{"position":[[628,4]]}}}],["knowledg",{"_index":45,"t":{"4":{"position":[[114,9],[172,9]]},"56":{"position":[[619,9]]},"58":{"position":[[164,9]]},"75":{"position":[[435,10]]},"85":{"position":[[91,9]]},"96":{"position":[[74,9]]}}}],["known",{"_index":820,"t":{"118":{"position":[[2160,5]]}}}],["laid",{"_index":347,"t":{"46":{"position":[[240,4]]}}}],["languag",{"_index":812,"t":{"118":{"position":[[1765,8]]}}}],["larger",{"_index":545,"t":{"77":{"position":[[234,6]]}}}],["last",{"_index":249,"t":{"34":{"position":[[13,4]]},"98":{"position":[[13,4]]}}}],["latest",{"_index":87,"t":{"8":{"position":[[49,6],[188,6]]}}}],["law",{"_index":663,"t":{"102":{"position":[[708,4],[976,5]]},"110":{"position":[[639,4]]},"116":{"position":[[6,3],[145,4]]},"118":{"position":[[66,3]]}}}],["lawsuit",{"_index":823,"t":{"118":{"position":[[2241,7]]}}}],["layer",{"_index":471,"t":{"60":{"position":[[423,6]]},"62":{"position":[[99,5]]},"73":{"position":[[239,7]]}}}],["legal",{"_index":271,"t":{"36":{"position":[[212,5]]},"38":{"position":[[199,5]]},"100":{"position":[[152,5]]},"102":{"position":[[431,9],[785,7],[1565,6]]},"110":{"position":[[93,5]]}}}],["legal@free.technolog",{"_index":390,"t":{"52":{"position":[[89,22]]},"120":{"position":[[902,22]]}}}],["legisl",{"_index":289,"t":{"38":{"position":[[245,12]]},"44":{"position":[[713,12]]},"46":{"position":[[269,12]]}}}],["legitim",{"_index":300,"t":{"38":{"position":[[688,10]]}}}],["level",{"_index":328,"t":{"44":{"position":[[297,5]]}}}],["leverag",{"_index":604,"t":{"85":{"position":[[371,10]]},"94":{"position":[[311,10]]}}}],["liabil",{"_index":379,"t":{"48":{"position":[[179,9]]},"110":{"position":[[73,10],[484,9],[572,9]]}}}],["liabl",{"_index":737,"t":{"108":{"position":[[498,6]]},"110":{"position":[[20,6]]}}}],["liberti",{"_index":120,"t":{"14":{"position":[[111,8]]}}}],["libp2p",{"_index":35,"t":{"4":{"position":[[4,6]]},"56":{"position":[[715,6]]},"60":{"position":[[47,7],[166,6]]},"62":{"position":[[68,6]]},"67":{"position":[[156,6]]},"71":{"position":[[193,6]]},"73":{"position":[[124,7],[155,6],[343,6]]}}}],["librari",{"_index":23,"t":{"2":{"position":[[240,10]]},"83":{"position":[[139,9],[406,9]]}}}],["licenc",{"_index":191,"t":{"24":{"position":[[89,8]]},"106":{"position":[[138,7],[342,8]]}}}],["licens",{"_index":392,"t":{"52":{"position":[[129,8]]},"106":{"position":[[101,7]]},"120":{"position":[[942,8]]}}}],["limit",{"_index":285,"t":{"38":{"position":[[10,5]]},"40":{"position":[[19,7]]},"46":{"position":[[40,5]]},"102":{"position":[[190,10]]},"110":{"position":[[513,7],[558,10]]},"112":{"position":[[228,10]]},"118":{"position":[[570,8],[2039,12]]},"120":{"position":[[402,6]]}}}],["linchpin",{"_index":405,"t":{"54":{"position":[[279,8]]},"87":{"position":[[237,8]]}}}],["link",{"_index":373,"t":{"48":{"position":[[37,5]]},"102":{"position":[[613,6]]},"108":{"position":[[39,5],[182,7]]}}}],["list",{"_index":485,"t":{"67":{"position":[[110,5]]}}}],["litig",{"_index":773,"t":{"118":{"position":[[277,10],[474,10]]}}}],["local",{"_index":668,"t":{"102":{"position":[[970,5]]}}}],["logo",{"_index":266,"t":{"36":{"position":[[135,5],[238,5],[350,8],[417,5]]},"54":{"position":[[54,5],[147,5],[353,5],[561,5]]},"85":{"position":[[482,5]]},"96":{"position":[[206,6]]},"100":{"position":[[75,5],[179,5],[291,8],[368,5]]}}}],["long",{"_index":185,"t":{"22":{"position":[[252,4]]}}}],["look",{"_index":693,"t":{"104":{"position":[[37,7],[213,7],[549,7]]}}}],["lost",{"_index":742,"t":{"110":{"position":[[127,4]]}}}],["lower",{"_index":245,"t":{"32":{"position":[[247,5]]}}}],["machin",{"_index":624,"t":{"96":{"position":[[92,8]]}}}],["made",{"_index":101,"t":{"10":{"position":[[120,4]]},"24":{"position":[[47,4]]},"58":{"position":[[43,4]]},"102":{"position":[[1057,4]]},"106":{"position":[[33,4]]}}}],["main",{"_index":473,"t":{"62":{"position":[[53,4]]}}}],["maintain",{"_index":316,"t":{"42":{"position":[[128,8]]},"83":{"position":[[392,8]]}}}],["mainten",{"_index":576,"t":{"81":{"position":[[67,12]]},"83":{"position":[[103,11]]}}}],["make",{"_index":152,"t":{"18":{"position":[[148,4]]},"22":{"position":[[206,6]]},"40":{"position":[[138,6]]},"102":{"position":[[367,4],[716,4]]},"118":{"position":[[97,4],[953,7]]}}}],["manag",{"_index":592,"t":{"83":{"position":[[280,9]]}}}],["mandat",{"_index":595,"t":{"83":{"position":[[455,7]]}}}],["manner",{"_index":776,"t":{"118":{"position":[[388,7]]}}}],["market",{"_index":517,"t":{"75":{"position":[[120,8]]},"104":{"position":[[181,7],[620,6]]}}}],["mass",{"_index":108,"t":{"12":{"position":[[92,4]]}}}],["matter",{"_index":681,"t":{"102":{"position":[[1475,7]]},"104":{"position":[[437,7],[1123,7]]}}}],["maximis",{"_index":121,"t":{"14":{"position":[[130,8]]},"26":{"position":[[100,8],[169,8]]}}}],["maximum",{"_index":661,"t":{"102":{"position":[[669,7]]},"110":{"position":[[600,7]]}}}],["mean",{"_index":239,"t":{"32":{"position":[[186,5]]},"44":{"position":[[197,5]]},"98":{"position":[[375,5]]},"118":{"position":[[43,7]]},"120":{"position":[[760,4]]}}}],["measur",{"_index":304,"t":{"38":{"position":[[803,8]]},"42":{"position":[[101,8]]}}}],["meet",{"_index":774,"t":{"118":{"position":[[305,5]]}}}],["member",{"_index":227,"t":{"30":{"position":[[135,8]]}}}],["merchant",{"_index":646,"t":{"102":{"position":[[219,16]]}}}],["messag",{"_index":57,"t":{"6":{"position":[[92,9],[154,9],[294,9],[390,9]]}}}],["method",{"_index":151,"t":{"18":{"position":[[120,7]]},"118":{"position":[[1602,7]]}}}],["meticul",{"_index":401,"t":{"54":{"position":[[193,12]]},"79":{"position":[[687,10]]},"81":{"position":[[464,12]]},"87":{"position":[[151,12]]}}}],["minimis",{"_index":197,"t":{"26":{"position":[[3,8]]}}}],["minor",{"_index":98,"t":{"10":{"position":[[86,5]]}}}],["model",{"_index":535,"t":{"75":{"position":[[463,9],[507,9]]}}}],["modifi",{"_index":193,"t":{"24":{"position":[[119,7]]},"50":{"position":[[7,6]]},"106":{"position":[[168,6]]},"114":{"position":[[7,6]]},"120":{"position":[[386,7]]}}}],["modular",{"_index":607,"t":{"90":{"position":[[18,7]]}}}],["natur",{"_index":788,"t":{"118":{"position":[[908,6]]}}}],["need",{"_index":291,"t":{"38":{"position":[[321,4],[418,6]]},"75":{"position":[[256,5]]},"81":{"position":[[299,5]]},"83":{"position":[[597,5]]}}}],["neglig",{"_index":739,"t":{"110":{"position":[[54,11]]}}}],["nescienc",{"_index":47,"t":{"4":{"position":[[193,11]]},"58":{"position":[[108,10]]},"96":{"position":[[0,9]]}}}],["network",{"_index":36,"t":{"4":{"position":[[11,10]]},"26":{"position":[[156,8]]},"56":{"position":[[682,9]]},"58":{"position":[[411,9]]},"60":{"position":[[347,9]]},"62":{"position":[[34,9]]},"73":{"position":[[231,7],[488,9]]},"77":{"position":[[241,9]]}}}],["new",{"_index":53,"t":{"6":{"position":[[42,3]]},"18":{"position":[[107,3]]},"50":{"position":[[148,3]]},"56":{"position":[[427,3]]},"75":{"position":[[329,3]]},"114":{"position":[[174,3]]}}}],["nim",{"_index":487,"t":{"67":{"position":[[152,3]]},"71":{"position":[[189,3]]},"73":{"position":[[120,3]]},"83":{"position":[[36,3],[55,3],[118,3],[181,3],[221,3],[250,3],[294,3],[366,3],[470,3],[548,4],[630,3]]}}}],["nimbl",{"_index":590,"t":{"83":{"position":[[264,6]]}}}],["node",{"_index":446,"t":{"56":{"position":[[668,4]]},"77":{"position":[[221,5]]}}}],["nois",{"_index":602,"t":{"85":{"position":[[303,5]]}}}],["non",{"_index":649,"t":{"102":{"position":[[274,3]]},"108":{"position":[[419,3]]}}}],["nonetheless",{"_index":344,"t":{"46":{"position":[[117,12]]}}}],["note",{"_index":780,"t":{"118":{"position":[[520,4]]}}}],["noth",{"_index":688,"t":{"102":{"position":[[1628,7]]}}}],["notic",{"_index":381,"t":{"50":{"position":[[81,7]]},"104":{"position":[[1392,6]]},"114":{"position":[[87,7]]},"118":{"position":[[870,6],[1108,6]]}}}],["number",{"_index":201,"t":{"26":{"position":[[113,6],[182,6]]}}}],["object",{"_index":34,"t":{"2":{"position":[[416,11]]},"46":{"position":[[831,6]]},"77":{"position":[[137,9]]}}}],["oblig",{"_index":233,"t":{"32":{"position":[[88,10]]},"44":{"position":[[7,7]]},"102":{"position":[[1222,10]]}}}],["occur",{"_index":340,"t":{"44":{"position":[[669,5]]},"75":{"position":[[189,6]]}}}],["offer",{"_index":599,"t":{"85":{"position":[[166,8]]},"102":{"position":[[1685,5],[1725,5]]}}}],["offic",{"_index":269,"t":{"36":{"position":[[190,6]]},"100":{"position":[[130,6]]}}}],["on",{"_index":755,"t":{"110":{"position":[[532,4]]},"118":{"position":[[1652,3],[2085,3]]}}}],["oneself",{"_index":159,"t":{"20":{"position":[[43,7]]}}}],["open",{"_index":173,"t":{"22":{"position":[[23,8]]},"24":{"position":[[77,4]]},"65":{"position":[[45,7],[79,7],[137,9]]}}}],["oper",{"_index":622,"t":{"94":{"position":[[291,7]]}}}],["opportun",{"_index":436,"t":{"56":{"position":[[396,13]]}}}],["optimis",{"_index":466,"t":{"60":{"position":[[279,8]]}}}],["oral",{"_index":831,"t":{"120":{"position":[[216,5]]}}}],["order",{"_index":708,"t":{"104":{"position":[[741,5]]}}}],["organis",{"_index":116,"t":{"14":{"position":[[60,12]]},"22":{"position":[[71,13]]},"26":{"position":[[60,12]]},"32":{"position":[[167,13]]}}}],["oskar",{"_index":71,"t":{"6":{"position":[[253,5],[320,5],[433,5]]}}}],["otherwis",{"_index":331,"t":{"44":{"position":[[370,10]]},"102":{"position":[[549,9]]}}}],["out",{"_index":348,"t":{"46":{"position":[[245,3]]},"118":{"position":[[896,3]]}}}],["outlin",{"_index":111,"t":{"12":{"position":[[143,8]]}}}],["outsid",{"_index":323,"t":{"44":{"position":[[149,7],[614,7]]}}}],["over",{"_index":80,"t":{"6":{"position":[[400,4]]},"26":{"position":[[221,4]]},"38":{"position":[[864,4]]},"108":{"position":[[451,4]]},"118":{"position":[[272,4]]}}}],["overal",{"_index":467,"t":{"60":{"position":[[377,7]]},"73":{"position":[[436,7]]},"79":{"position":[[630,7]]},"85":{"position":[[518,7]]}}}],["overse",{"_index":403,"t":{"54":{"position":[[219,10]]},"87":{"position":[[177,10]]}}}],["p2p",{"_index":470,"t":{"60":{"position":[[419,3]]},"73":{"position":[[4,3],[78,5],[102,3],[227,3],[251,3],[386,3],[408,3]]},"90":{"position":[[26,3]]}}}],["packag",{"_index":591,"t":{"83":{"position":[[271,8]]}}}],["page",{"_index":248,"t":{"34":{"position":[[8,4]]},"98":{"position":[[8,4]]}}}],["part",{"_index":296,"t":{"38":{"position":[[564,4]]},"50":{"position":[[29,4]]},"56":{"position":[[355,4]]},"73":{"position":[[36,4]]},"114":{"position":[[29,4]]},"120":{"position":[[494,4],[568,4]]}}}],["parti",{"_index":307,"t":{"40":{"position":[[66,7],[243,5]]},"44":{"position":[[264,7]]},"48":{"position":[[52,5],[80,5],[235,5]]},"108":{"position":[[56,5],[163,5],[201,5],[292,5],[358,5],[467,5],[548,5],[618,5],[742,5]]},"118":{"position":[[1726,7]]}}}],["particip",{"_index":196,"t":{"24":{"position":[[174,14]]},"28":{"position":[[147,14]]},"118":{"position":[[2211,11]]}}}],["particular",{"_index":648,"t":{"102":{"position":[[250,10]]}}}],["past",{"_index":678,"t":{"102":{"position":[[1415,5]]}}}],["payment",{"_index":814,"t":{"118":{"position":[[1813,7]]}}}],["peer",{"_index":510,"t":{"73":{"position":[[65,4],[73,4]]},"90":{"position":[[130,4],[138,4]]}}}],["perform",{"_index":461,"t":{"60":{"position":[[118,12]]},"77":{"position":[[348,11]]},"79":{"position":[[809,10]]}}}],["period",{"_index":384,"t":{"50":{"position":[[114,12]]},"114":{"position":[[140,12]]}}}],["permissionless",{"_index":195,"t":{"24":{"position":[[159,14]]},"28":{"position":[[132,14]]}}}],["permit",{"_index":662,"t":{"102":{"position":[[684,9]]},"110":{"position":[[615,9]]}}}],["person",{"_index":119,"t":{"14":{"position":[[102,8]]},"36":{"position":[[77,8]]},"38":{"position":[[64,8],[141,8],[293,8],[352,8],[505,8]]},"40":{"position":[[45,8],[97,8]]},"42":{"position":[[156,8]]},"44":{"position":[[41,8],[125,8],[208,8],[482,8],[600,8]]},"46":{"position":[[84,8],[190,8],[338,8],[403,8],[461,8],[588,8],[864,8],[996,8]]}}}],["physic",{"_index":202,"t":{"26":{"position":[[123,8]]}}}],["place",{"_index":322,"t":{"44":{"position":[[142,6]]},"104":{"position":[[1095,5]]}}}],["plan",{"_index":562,"t":{"79":{"position":[[120,6]]},"83":{"position":[[383,5]]}}}],["play",{"_index":504,"t":{"71":{"position":[[24,4]]},"79":{"position":[[288,5]]}}}],["pleas",{"_index":382,"t":{"50":{"position":[[89,6]]},"52":{"position":[[68,6]]},"69":{"position":[[98,6],[165,6]]},"106":{"position":[[351,6]]},"118":{"position":[[513,6]]},"120":{"position":[[881,6]]}}}],["pluggabl",{"_index":474,"t":{"62":{"position":[[110,9]]}}}],["polici",{"_index":254,"t":{"34":{"position":[[56,6],[220,6]]},"36":{"position":[[33,6]]},"40":{"position":[[295,8]]},"46":{"position":[[29,7]]},"48":{"position":[[130,9]]},"50":{"position":[[50,6],[160,6]]},"52":{"position":[[60,7]]},"108":{"position":[[122,9]]}}}],["polit",{"_index":123,"t":{"14":{"position":[[147,10]]}}}],["portion",{"_index":840,"t":{"120":{"position":[[642,8]]}}}],["possibl",{"_index":244,"t":{"32":{"position":[[235,8]]},"46":{"position":[[107,9],[370,10]]},"110":{"position":[[427,11]]},"118":{"position":[[1613,9]]}}}],["post",{"_index":387,"t":{"50":{"position":[[206,7]]},"114":{"position":[[238,7]]}}}],["poster",{"_index":614,"t":{"90":{"position":[[230,8]]}}}],["potenti",{"_index":548,"t":{"77":{"position":[[322,9]]},"81":{"position":[[527,9]]},"118":{"position":[[670,11]]}}}],["power",{"_index":156,"t":{"20":{"position":[[15,5]]}}}],["pr",{"_index":479,"t":{"65":{"position":[[96,3]]}}}],["practic",{"_index":431,"t":{"56":{"position":[[193,9],[520,9]]}}}],["prefer",{"_index":772,"t":{"118":{"position":[[253,6]]}}}],["present",{"_index":679,"t":{"102":{"position":[[1421,7]]},"104":{"position":[[788,10]]}}}],["preserv",{"_index":445,"t":{"56":{"position":[[657,10]]},"58":{"position":[[249,10]]},"90":{"position":[[119,10],[183,10]]},"96":{"position":[[42,10],[120,10]]}}}],["primari",{"_index":541,"t":{"77":{"position":[[129,7]]},"79":{"position":[[127,7]]}}}],["principl",{"_index":1,"t":{"2":{"position":[[9,9],[352,10]]},"10":{"position":[[6,10],[68,12]]},"12":{"position":[[132,10]]},"16":{"position":[[112,9]]},"118":{"position":[[315,9]]}}}],["prior",{"_index":829,"t":{"120":{"position":[[113,5]]}}}],["privaci",{"_index":155,"t":{"20":{"position":[[0,7],[99,7]]},"34":{"position":[[48,7],[110,7],[212,7]]},"36":{"position":[[25,7]]},"38":{"position":[[237,7]]},"40":{"position":[[287,7]]},"44":{"position":[[30,7],[705,7]]},"46":{"position":[[21,7],[261,7]]},"48":{"position":[[122,7]]},"50":{"position":[[42,7],[152,7]]},"52":{"position":[[52,7]]},"56":{"position":[[649,7],[702,8]]},"58":{"position":[[241,7]]},"85":{"position":[[450,8]]},"90":{"position":[[111,7],[175,7]]},"96":{"position":[[34,7],[112,7]]},"108":{"position":[[114,7]]}}}],["privat",{"_index":76,"t":{"6":{"position":[[355,7]]}}}],["proactiv",{"_index":523,"t":{"75":{"position":[[271,11]]},"79":{"position":[[515,9]]}}}],["problem",{"_index":241,"t":{"32":{"position":[[200,8]]}}}],["proceed",{"_index":810,"t":{"118":{"position":[[1676,11]]}}}],["process",{"_index":264,"t":{"36":{"position":[[63,10]]},"38":{"position":[[50,10],[125,7],[281,7],[483,7],[607,7],[711,10]]},"40":{"position":[[89,7]]},"44":{"position":[[235,9]]},"46":{"position":[[65,10],[228,8],[575,7],[635,10],[676,10],[769,10],[845,10],[1019,9]]},"79":{"position":[[728,10]]},"81":{"position":[[447,7]]}}}],["procur",{"_index":744,"t":{"110":{"position":[[149,11]]}}}],["profit",{"_index":743,"t":{"110":{"position":[[132,8]]}}}],["program",{"_index":628,"t":{"96":{"position":[[185,9]]}}}],["programm",{"_index":499,"t":{"69":{"position":[[224,9]]}}}],["project",{"_index":18,"t":{"2":{"position":[[172,9]]},"54":{"position":[[153,8],[180,9]]},"56":{"position":[[281,9],[299,9],[441,9]]},"58":{"position":[[100,7]]},"60":{"position":[[437,9]]},"62":{"position":[[120,7]]},"71":{"position":[[62,9]]},"73":{"position":[[194,8],[555,8]]},"75":{"position":[[59,8],[158,7]]},"77":{"position":[[164,8],[414,9],[492,8]]},"79":{"position":[[51,8],[332,7],[379,7],[776,8]]},"81":{"position":[[120,9],[212,7],[312,9],[385,7]]},"83":{"position":[[167,9],[518,8],[589,7]]},"85":{"position":[[154,8],[488,9]]},"87":{"position":[[138,9]]},"94":{"position":[[14,8],[98,8],[156,9],[282,8]]},"102":{"position":[[1462,8]]},"104":{"position":[[134,11],[281,7],[386,9]]}}}],["project'",{"_index":567,"t":{"79":{"position":[[486,9]]}}}],["promis",{"_index":676,"t":{"102":{"position":[[1318,8]]}}}],["promptli",{"_index":570,"t":{"79":{"position":[[606,9]]}}}],["proof",{"_index":453,"t":{"58":{"position":[[174,7]]},"85":{"position":[[106,7],[201,6]]}}}],["properti",{"_index":652,"t":{"102":{"position":[[307,8]]}}}],["propos",{"_index":465,"t":{"60":{"position":[[229,9]]}}}],["protect",{"_index":61,"t":{"6":{"position":[[127,10]]},"20":{"position":[[91,7]]},"44":{"position":[[18,7],[306,10],[408,12],[517,10]]},"46":{"position":[[922,10]]},"90":{"position":[[157,10],[199,9]]}}}],["protocol",{"_index":22,"t":{"2":{"position":[[229,10],[308,10]]},"54":{"position":[[332,9]]},"60":{"position":[[183,9]]},"69":{"position":[[268,9]]},"73":{"position":[[172,9]]},"85":{"position":[[309,8]]},"87":{"position":[[290,9]]},"90":{"position":[[30,9]]}}}],["provid",{"_index":6,"t":{"2":{"position":[[62,8]]},"20":{"position":[[204,7]]},"40":{"position":[[175,7]]},"44":{"position":[[277,7],[499,8]]},"56":{"position":[[673,8]]},"71":{"position":[[87,9]]},"83":{"position":[[493,9]]},"85":{"position":[[118,8]]},"102":{"position":[[15,8]]},"108":{"position":[[26,8]]}}}],["provis",{"_index":768,"t":{"116":{"position":[[150,11]]},"120":{"position":[[445,10]]}}}],["pseudo",{"_index":166,"t":{"20":{"position":[[159,6]]}}}],["pst",{"_index":275,"t":{"36":{"position":[[271,3]]},"100":{"position":[[212,3]]}}}],["public",{"_index":25,"t":{"2":{"position":[[271,13]]},"24":{"position":[[28,6]]},"56":{"position":[[134,13]]}}}],["purport",{"_index":713,"t":{"104":{"position":[[858,7]]}}}],["purpos",{"_index":262,"t":{"36":{"position":[[8,8]]},"38":{"position":[[168,8],[443,8]]},"96":{"position":[[61,7]]},"100":{"position":[[8,8]]},"102":{"position":[[261,8],[1090,8]]},"106":{"position":[[224,8]]}}}],["push",{"_index":454,"t":{"58":{"position":[[215,7]]}}}],["qa",{"_index":558,"t":{"79":{"position":[[4,2],[280,2],[435,2],[743,2]]}}}],["qa'",{"_index":568,"t":{"79":{"position":[[510,4]]}}}],["qualiti",{"_index":571,"t":{"79":{"position":[[638,7],[710,7]]}}}],["question",{"_index":388,"t":{"52":{"position":[[32,9]]},"120":{"position":[[837,9]]}}}],["r&d",{"_index":13,"t":{"2":{"position":[[124,3]]},"56":{"position":[[260,3]]},"71":{"position":[[6,3]]},"73":{"position":[[8,3]]},"77":{"position":[[38,3]]},"85":{"position":[[30,3]]},"94":{"position":[[238,3]]}}}],["rang",{"_index":534,"t":{"75":{"position":[[446,7]]}}}],["read",{"_index":633,"t":{"98":{"position":[[276,4]]}}}],["readi",{"_index":231,"t":{"32":{"position":[[53,5]]}}}],["real",{"_index":557,"t":{"77":{"position":[[579,4]]}}}],["realm",{"_index":448,"t":{"58":{"position":[[7,5]]}}}],["reason",{"_index":351,"t":{"46":{"position":[[359,10]]},"118":{"position":[[722,10],[2149,10]]}}}],["receiv",{"_index":796,"t":{"118":{"position":[[1093,9]]}}}],["rectifi",{"_index":582,"t":{"81":{"position":[[519,7]]}}}],["refer",{"_index":283,"t":{"36":{"position":[[341,5],[381,11],[400,9]]},"100":{"position":[[282,5],[332,11],[351,9]]},"106":{"position":[[358,5]]},"118":{"position":[[1157,8],[1432,10]]},"120":{"position":[[352,9]]}}}],["regard",{"_index":260,"t":{"34":{"position":[[166,7]]},"38":{"position":[[460,7]]},"83":{"position":[[527,9]]},"116":{"position":[[123,6]]},"120":{"position":[[73,9]]}}}],["regardless",{"_index":816,"t":{"118":{"position":[[1999,10]]}}}],["regist",{"_index":268,"t":{"36":{"position":[[179,10]]},"100":{"position":[[119,10]]}}}],["regress",{"_index":547,"t":{"77":{"position":[[274,10]]}}}],["regularli",{"_index":719,"t":{"104":{"position":[[1231,10]]}}}],["regulatori",{"_index":706,"t":{"104":{"position":[[667,10]]}}}],["relat",{"_index":486,"t":{"67":{"position":[[132,7]]},"83":{"position":[[615,7]]},"85":{"position":[[351,7]]},"102":{"position":[[559,8],[1446,8]]},"104":{"position":[[421,7]]},"110":{"position":[[239,7]]},"112":{"position":[[146,7]]},"118":{"position":[[141,8]]}}}],["relay",{"_index":610,"t":{"90":{"position":[[104,6]]}}}],["relentlessli",{"_index":228,"t":{"32":{"position":[[7,12]]}}}],["relev",{"_index":265,"t":{"36":{"position":[[112,8]]},"46":{"position":[[252,8]]},"100":{"position":[[52,8]]},"118":{"position":[[1828,8]]}}}],["reli",{"_index":675,"t":{"102":{"position":[[1301,6]]}}}],["reliabl",{"_index":77,"t":{"6":{"position":[[367,8]]},"60":{"position":[[147,11]]},"77":{"position":[[381,11]]},"79":{"position":[[650,11]]},"81":{"position":[[601,12]]},"83":{"position":[[336,9]]},"102":{"position":[[450,12]]},"104":{"position":[[882,8],[951,11]]}}}],["relianc",{"_index":718,"t":{"104":{"position":[[1107,8]]}}}],["remain",{"_index":502,"t":{"69":{"position":[[291,6]]},"120":{"position":[[632,9],[656,6]]}}}],["remedi",{"_index":790,"t":{"118":{"position":[[974,6]]}}}],["remot",{"_index":38,"t":{"4":{"position":[[31,6],[90,6],[148,6],[205,6]]}}}],["remov",{"_index":352,"t":{"46":{"position":[[391,6]]}}}],["replac",{"_index":380,"t":{"50":{"position":[[17,7]]},"114":{"position":[[17,7]]}}}],["report",{"_index":493,"t":{"69":{"position":[[105,6],[172,6]]},"79":{"position":[[553,9]]}}}],["repres",{"_index":733,"t":{"108":{"position":[[233,9]]}}}],["represent",{"_index":655,"t":{"102":{"position":[[376,15],[1327,15]]},"120":{"position":[[167,15]]}}}],["request",{"_index":357,"t":{"46":{"position":[[541,8]]}}}],["requir",{"_index":336,"t":{"44":{"position":[[531,8]]},"79":{"position":[[496,13]]},"81":{"position":[[220,13],[404,13]]},"106":{"position":[[278,12]]}}}],["research",{"_index":3,"t":{"2":{"position":[[26,8],[148,9],[196,8]]},"4":{"position":[[124,8],[182,10]]},"8":{"position":[[56,8],[195,8],[215,8]]},"14":{"position":[[51,8]]},"18":{"position":[[98,8]]},"56":{"position":[[9,8],[324,8],[469,8],[497,8],[564,8]]},"58":{"position":[[30,8],[313,8]]},"60":{"position":[[35,8],[216,8],[262,8]]},"62":{"position":[[9,10]]},"67":{"position":[[26,8]]},"73":{"position":[[295,8],[315,8]]},"75":{"position":[[369,9]]},"94":{"position":[[74,8],[225,8]]}}}],["resili",{"_index":553,"t":{"77":{"position":[[525,9]]},"85":{"position":[[540,10]]}}}],["resist",{"_index":129,"t":{"14":{"position":[[210,10]]},"16":{"position":[[136,11]]},"90":{"position":[[64,9]]}}}],["resolut",{"_index":785,"t":{"118":{"position":[[809,10]]}}}],["resolv",{"_index":775,"t":{"118":{"position":[[328,9],[744,7],[1034,7],[1181,8]]}}}],["resourc",{"_index":229,"t":{"32":{"position":[[20,12],[300,11]]},"71":{"position":[[108,10]]},"94":{"position":[[198,9]]}}}],["respect",{"_index":258,"t":{"34":{"position":[[121,7]]},"40":{"position":[[276,10]]},"46":{"position":[[175,7]]},"118":{"position":[[1845,7]]},"120":{"position":[[227,7]]}}}],["respond",{"_index":522,"t":{"75":{"position":[[238,8]]}}}],["respons",{"_index":378,"t":{"48":{"position":[[161,14]]},"54":{"position":[[86,14]]},"77":{"position":[[58,11]]},"79":{"position":[[135,16]]},"87":{"position":[[73,14]]},"98":{"position":[[258,14]]},"102":{"position":[[924,11]]},"104":{"position":[[918,14]]},"108":{"position":[[696,11]]},"114":{"position":[[103,11]]}}}],["result",{"_index":308,"t":{"40":{"position":[[116,6]]},"104":{"position":[[718,8]]},"118":{"position":[[797,6]]}}}],["reveal",{"_index":158,"t":{"20":{"position":[[36,6]]}}}],["review",{"_index":581,"t":{"81":{"position":[[477,9]]},"118":{"position":[[536,6]]}}}],["rfc",{"_index":395,"t":{"54":{"position":[[8,3],[260,3]]},"87":{"position":[[8,3],[218,3],[329,3]]}}}],["right",{"_index":170,"t":{"20":{"position":[[216,5]]},"46":{"position":[[165,6],[295,5]]},"102":{"position":[[338,7]]},"118":{"position":[[465,5],[603,6],[2202,5]]},"120":{"position":[[789,5]]}}}],["rigor",{"_index":564,"t":{"79":{"position":[[360,10]]}}}],["rise",{"_index":450,"t":{"58":{"position":[[78,4]]}}}],["risk",{"_index":640,"t":{"102":{"position":[[106,5]]},"104":{"position":[[515,4],[1042,4]]},"108":{"position":[[672,4]]}}}],["rln",{"_index":609,"t":{"90":{"position":[[100,3]]}}}],["roadmap",{"_index":699,"t":{"104":{"position":[[272,8]]}}}],["robust",{"_index":468,"t":{"60":{"position":[[385,10]]},"79":{"position":[[793,6]]},"81":{"position":[[179,7]]}}}],["role",{"_index":506,"t":{"71":{"position":[[39,4]]},"79":{"position":[[304,4]]}}}],["rout",{"_index":613,"t":{"90":{"position":[[222,7]]}}}],["royer",{"_index":66,"t":{"6":{"position":[[183,5]]}}}],["rule",{"_index":801,"t":{"118":{"position":[[1316,5],[1386,5],[1963,5]]}}}],["rust",{"_index":42,"t":{"4":{"position":[[77,5]]}}}],["sa",{"_index":394,"t":{"52":{"position":[[150,3]]},"120":{"position":[[963,3]]}}}],["safeti",{"_index":317,"t":{"42":{"position":[[141,6]]}}}],["same",{"_index":335,"t":{"44":{"position":[[512,4]]}}}],["sc",{"_index":578,"t":{"81":{"position":[[134,2],[234,2]]}}}],["scalabl",{"_index":469,"t":{"60":{"position":[[400,11]]},"77":{"position":[[512,8]]}}}],["scale",{"_index":543,"t":{"77":{"position":[[194,7]]}}}],["scientif",{"_index":427,"t":{"56":{"position":[[123,10]]}}}],["seamless",{"_index":416,"t":{"54":{"position":[[479,8]]},"73":{"position":[[507,8]]},"79":{"position":[[221,8]]},"87":{"position":[[443,8]]}}}],["section",{"_index":834,"t":{"120":{"position":[[289,8]]}}}],["secur",{"_index":146,"t":{"18":{"position":[[23,8],[111,8],[160,8]]},"38":{"position":[[794,8]]},"42":{"position":[[36,8],[92,8]]},"58":{"position":[[353,6]]},"69":{"position":[[8,8],[116,8],[298,7]]},"81":{"position":[[187,7],[588,8]]},"85":{"position":[[440,9]]},"90":{"position":[[44,6]]},"102":{"position":[[1764,9]]}}}],["security@free.technolog",{"_index":495,"t":{"69":{"position":[[139,25]]}}}],["see",{"_index":481,"t":{"65":{"position":[[117,3]]},"67":{"position":[[83,3]]},"92":{"position":[[0,3]]}}}],["seek",{"_index":791,"t":{"118":{"position":[[989,8]]}}}],["select",{"_index":157,"t":{"20":{"position":[[24,11]]}}}],["sell",{"_index":691,"t":{"102":{"position":[[1701,5],[1741,4]]}}}],["send",{"_index":786,"t":{"118":{"position":[[852,4]]}}}],["separ",{"_index":375,"t":{"48":{"position":[[97,8]]},"94":{"position":[[143,8]]}}}],["serious",{"_index":313,"t":{"42":{"position":[[45,9]]},"69":{"position":[[17,9]]}}}],["serv",{"_index":396,"t":{"54":{"position":[[17,6]]},"87":{"position":[[17,6]]}}}],["servic",{"_index":14,"t":{"2":{"position":[[128,7]]},"22":{"position":[[237,7]]},"71":{"position":[[10,7]]},"73":{"position":[[12,7]]},"77":{"position":[[42,7]]},"79":{"position":[[7,7],[746,7]]},"81":{"position":[[22,7]]},"83":{"position":[[59,7]]},"85":{"position":[[34,7]]},"94":{"position":[[242,7]]},"110":{"position":[[176,9]]}}}],["session",{"_index":51,"t":{"6":{"position":[[17,7]]}}}],["set",{"_index":29,"t":{"2":{"position":[[345,3]]},"118":{"position":[[888,7]]}}}],["sever",{"_index":441,"t":{"56":{"position":[[585,7]]},"120":{"position":[[581,9]]}}}],["shall",{"_index":674,"t":{"102":{"position":[[1264,5]]},"112":{"position":[[4,5]]},"118":{"position":[[841,5]]}}}],["share",{"_index":93,"t":{"8":{"position":[[161,5]]},"24":{"position":[[112,6]]},"60":{"position":[[300,7]]}}}],["shepherd",{"_index":399,"t":{"54":{"position":[[104,11]]},"87":{"position":[[91,11]]}}}],["short",{"_index":182,"t":{"22":{"position":[[213,5]]}}}],["shortcom",{"_index":180,"t":{"22":{"position":[[176,13]]}}}],["signific",{"_index":626,"t":{"96":{"position":[[154,11]]}}}],["similar",{"_index":284,"t":{"36":{"position":[[373,7]]},"100":{"position":[[324,7]]}}}],["singl",{"_index":223,"t":{"30":{"position":[[94,6]]}}}],["site",{"_index":374,"t":{"48":{"position":[[86,5]]},"102":{"position":[[607,5]]}}}],["site.thes",{"_index":660,"t":{"102":{"position":[[628,10]]}}}],["sixti",{"_index":793,"t":{"118":{"position":[[1062,5]]}}}],["skill",{"_index":533,"t":{"75":{"position":[[424,6]]}}}],["smart",{"_index":574,"t":{"81":{"position":[[6,5],[96,5],[159,5],[258,5],[487,5]]}}}],["social",{"_index":122,"t":{"14":{"position":[[139,7]]},"28":{"position":[[112,6]]}}}],["softwar",{"_index":40,"t":{"4":{"position":[[50,8]]},"24":{"position":[[4,8]]},"26":{"position":[[43,8]]},"28":{"position":[[48,9]]},"30":{"position":[[10,8]]},"69":{"position":[[282,8]]},"71":{"position":[[161,8]]},"77":{"position":[[113,9]]},"79":{"position":[[669,9],[820,8]]}}}],["sole",{"_index":638,"t":{"102":{"position":[[86,4],[889,4],[917,6]]},"104":{"position":[[1410,4]]},"108":{"position":[[689,6]]}}}],["solicit",{"_index":692,"t":{"102":{"position":[[1710,10]]}}}],["solut",{"_index":554,"t":{"77":{"position":[[535,9]]},"79":{"position":[[829,10]]},"85":{"position":[[72,9]]}}}],["solv",{"_index":240,"t":{"32":{"position":[[192,7]]}}}],["sourc",{"_index":190,"t":{"24":{"position":[[82,6]]}}}],["sovereignti",{"_index":114,"t":{"14":{"position":[[18,11]]}}}],["spam",{"_index":60,"t":{"6":{"position":[[122,4]]},"90":{"position":[[152,4],[194,4]]}}}],["spawn",{"_index":437,"t":{"56":{"position":[[418,8]]}}}],["special",{"_index":746,"t":{"110":{"position":[[193,8]]}}}],["specialis",{"_index":509,"t":{"73":{"position":[[49,12]]},"81":{"position":[[35,11]]},"85":{"position":[[264,11]]}}}],["specif",{"_index":24,"t":{"2":{"position":[[251,15]]},"38":{"position":[[159,8]]},"44":{"position":[[429,8]]},"54":{"position":[[128,14],[236,15]]},"62":{"position":[[128,8]]},"79":{"position":[[419,15]]},"81":{"position":[[290,8]]},"83":{"position":[[580,8]]},"87":{"position":[[115,14],[194,15]]},"104":{"position":[[1290,9]]},"120":{"position":[[828,8]]}}}],["spectrum",{"_index":532,"t":{"75":{"position":[[412,8]]}}}],["stabil",{"_index":550,"t":{"77":{"position":[[397,9]]}}}],["stage",{"_index":720,"t":{"104":{"position":[[1308,6]]}}}],["stand",{"_index":117,"t":{"14":{"position":[[78,6]]}}}],["standard",{"_index":584,"t":{"81":{"position":[[575,9]]}}}],["standardis",{"_index":406,"t":{"54":{"position":[[301,12]]},"87":{"position":[[259,12]]}}}],["startup",{"_index":11,"t":{"2":{"position":[[101,8]]}}}],["state",{"_index":148,"t":{"18":{"position":[[63,5]]}}}],["statement",{"_index":694,"t":{"104":{"position":[[45,10],[221,11],[251,10],[410,10],[557,10]]}}}],["statu",{"_index":64,"t":{"6":{"position":[[169,6]]},"108":{"position":[[217,6]]}}}],["statut",{"_index":817,"t":{"118":{"position":[[2028,7]]}}}],["stay",{"_index":109,"t":{"12":{"position":[[112,7]]}}}],["stealth",{"_index":489,"t":{"67":{"position":[[171,7]]},"85":{"position":[[612,7]]}}}],["stem",{"_index":618,"t":{"94":{"position":[[53,4]]}}}],["stewardship",{"_index":222,"t":{"30":{"position":[[77,11]]}}}],["still",{"_index":345,"t":{"46":{"position":[[134,5]]}}}],["store",{"_index":292,"t":{"38":{"position":[[342,5],[380,5],[851,6]]}}}],["strategi",{"_index":529,"t":{"75":{"position":[[343,10]]}}}],["strengthen",{"_index":457,"t":{"58":{"position":[[322,11]]}}}],["strict",{"_index":740,"t":{"110":{"position":[[66,6]]}}}],["strive",{"_index":169,"t":{"20":{"position":[[194,6]]},"22":{"position":[[3,6]]}}}],["strong",{"_index":153,"t":{"18":{"position":[[153,6]]}}}],["stronger",{"_index":141,"t":{"16":{"position":[[153,9]]}}}],["stylist",{"_index":99,"t":{"10":{"position":[[92,9]]}}}],["subject",{"_index":701,"t":{"104":{"position":[[487,7],[572,7],[1318,7]]},"106":{"position":[[259,7]]}}}],["submit",{"_index":318,"t":{"42":{"position":[[179,6]]},"44":{"position":[[73,9]]}}}],["subsect",{"_index":835,"t":{"120":{"position":[[302,11]]}}}],["substitut",{"_index":745,"t":{"110":{"position":[[165,10]]}}}],["such",{"_index":297,"t":{"38":{"position":[[572,4],[722,4]]},"40":{"position":[[232,4]]},"42":{"position":[[186,4]]},"44":{"position":[[421,4],[477,4]]},"46":{"position":[[764,4]]},"56":{"position":[[248,4]]},"71":{"position":[[181,4]]},"83":{"position":[[434,4]]},"85":{"position":[[285,4]]},"102":{"position":[[584,4],[1622,5]]},"104":{"position":[[432,4]]},"106":{"position":[[291,4]]},"108":{"position":[[190,4],[281,4],[347,4],[456,4],[607,4],[731,4]]},"110":{"position":[[442,4],[498,4]]},"118":{"position":[[823,4],[1136,4]]}}}],["suggest",{"_index":527,"t":{"75":{"position":[[318,10]]},"83":{"position":[[298,8]]}}}],["suitabl",{"_index":659,"t":{"102":{"position":[[463,11]]}}}],["summari",{"_index":712,"t":{"104":{"position":[[837,7]]}}}],["supersed",{"_index":828,"t":{"120":{"position":[[99,9]]}}}],["supplement",{"_index":722,"t":{"104":{"position":[[1358,12]]}}}],["support",{"_index":8,"t":{"2":{"position":[[81,7]]},"71":{"position":[[47,10]]},"79":{"position":[[36,10]]},"83":{"position":[[503,7]]},"94":{"position":[[186,7]]},"108":{"position":[[272,8]]}}}],["surveil",{"_index":135,"t":{"16":{"position":[[60,13]]}}}],["swiss",{"_index":763,"t":{"116":{"position":[[0,5]]},"118":{"position":[[1247,5],[1310,5],[1957,5]]}}}],["switzerland",{"_index":281,"t":{"36":{"position":[[317,11]]},"44":{"position":[[179,12],[330,11],[543,11],[645,11]]},"100":{"position":[[258,11]]},"118":{"position":[[1532,12]]}}}],["symmetri",{"_index":174,"t":{"22":{"position":[[36,8]]}}}],["sync",{"_index":79,"t":{"6":{"position":[[381,4]]}}}],["system",{"_index":353,"t":{"46":{"position":[[426,8]]},"75":{"position":[[484,7]]},"77":{"position":[[16,7],[97,7],[464,7]]},"79":{"position":[[267,8]]}}}],["system(",{"_index":206,"t":{"26":{"position":[[230,9]]}}}],["tailor",{"_index":579,"t":{"81":{"position":[[274,8]]}}}],["take",{"_index":312,"t":{"42":{"position":[[26,4]]},"54":{"position":[[72,6]]},"69":{"position":[[3,4]]},"87":{"position":[[59,6]]}}}],["tax",{"_index":685,"t":{"102":{"position":[[1572,4]]}}}],["team",{"_index":226,"t":{"30":{"position":[[130,4]]},"56":{"position":[[207,4]]},"58":{"position":[[123,4]]},"60":{"position":[[84,4]]},"75":{"position":[[166,5],[383,4]]}}}],["technic",{"_index":7,"t":{"2":{"position":[[71,9]]},"38":{"position":[[754,9]]},"71":{"position":[[123,9]]},"104":{"position":[[303,9],[639,9]]}}}],["techniqu",{"_index":601,"t":{"85":{"position":[[239,11]]}}}],["technolog",{"_index":150,"t":{"18":{"position":[[80,13],[132,12]]},"54":{"position":[[533,12]]},"58":{"position":[[260,13]]},"69":{"position":[[67,10]]},"73":{"position":[[84,13],[390,13]]},"85":{"position":[[409,13]]},"87":{"position":[[497,12]]},"104":{"position":[[156,11]]}}}],["term",{"_index":183,"t":{"22":{"position":[[219,4],[257,4]]},"32":{"position":[[272,5]]},"98":{"position":[[58,5],[81,5],[293,5],[439,5],[492,5]]},"100":{"position":[[34,5]]},"106":{"position":[[328,5]]},"108":{"position":[[82,5]]},"110":{"position":[[308,5]]},"112":{"position":[[271,5]]},"114":{"position":[[50,5],[186,5]]},"116":{"position":[[32,5]]},"118":{"position":[[9,6],[167,5]]},"120":{"position":[[14,5],[331,5],[473,5],[516,5],[610,5],[732,5],[867,5]]}}}],["test",{"_index":41,"t":{"4":{"position":[[72,4]]},"77":{"position":[[24,7],[105,7],[285,8],[472,7]]},"79":{"position":[[115,4],[178,5],[205,5],[371,7],[698,7]]}}}],["theoret",{"_index":537,"t":{"75":{"position":[[495,11]]}}}],["theori",{"_index":430,"t":{"56":{"position":[[182,6]]},"110":{"position":[[112,6]]}}}],["therefor",{"_index":377,"t":{"48":{"position":[[143,9]]}}}],["therein",{"_index":735,"t":{"108":{"position":[[318,8],[385,7]]}}}],["thing",{"_index":704,"t":{"104":{"position":[[612,7]]}}}],["third",{"_index":306,"t":{"40":{"position":[[60,5],[237,5]]},"48":{"position":[[46,5],[74,5],[229,5]]},"108":{"position":[[50,5],[157,5],[195,5],[286,5],[352,5],[461,5],[542,5],[612,5],[736,5]]}}}],["thoren",{"_index":72,"t":{"6":{"position":[[259,6],[326,7],[439,6]]}}}],["thorough",{"_index":546,"t":{"77":{"position":[[265,8]]},"81":{"position":[[429,8]]}}}],["those",{"_index":732,"t":{"108":{"position":[[151,5]]}}}],["thought",{"_index":94,"t":{"8":{"position":[[172,8]]}}}],["thread",{"_index":477,"t":{"65":{"position":[[55,6]]}}}],["through",{"_index":559,"t":{"79":{"position":[[60,7],[679,7]]},"102":{"position":[[1018,7]]},"104":{"position":[[462,7],[1174,7]]},"108":{"position":[[572,7],[633,7]]},"118":{"position":[[1204,7]]}}}],["throughout",{"_index":419,"t":{"54":{"position":[[546,10]]},"87":{"position":[[510,10]]}}}],["time",{"_index":247,"t":{"32":{"position":[[290,5]]},"38":{"position":[[413,4],[869,5]]},"50":{"position":[[64,4]]},"104":{"position":[[1378,5]]},"114":{"position":[[70,4]]},"118":{"position":[[1359,4],[2123,4]]}}}],["timelin",{"_index":709,"t":{"104":{"position":[[755,9],[1273,8]]}}}],["tke",{"_index":521,"t":{"75":{"position":[[225,3]]}}}],["tke'",{"_index":518,"t":{"75":{"position":[[129,5],[363,5]]}}}],["togeth",{"_index":530,"t":{"75":{"position":[[395,8]]}}}],["token",{"_index":234,"t":{"32":{"position":[[102,5]]},"75":{"position":[[8,5],[87,5]]},"102":{"position":[[1750,6]]},"104":{"position":[[376,6]]}}}],["tool",{"_index":587,"t":{"83":{"position":[[122,7],[208,5]]}}}],["total",{"_index":171,"t":{"20":{"position":[[225,5]]}}}],["touch",{"_index":476,"t":{"65":{"position":[[7,5]]}}}],["tradeoff",{"_index":184,"t":{"22":{"position":[[224,9]]}}}],["train",{"_index":50,"t":{"6":{"position":[[8,8]]}}}],["transact",{"_index":164,"t":{"20":{"position":[[134,13]]}}}],["transfer",{"_index":355,"t":{"46":{"position":[[494,11]]}}}],["translat",{"_index":439,"t":{"56":{"position":[[481,9]]}}}],["transmiss",{"_index":338,"t":{"44":{"position":[[584,12]]}}}],["treat",{"_index":687,"t":{"102":{"position":[[1611,7]]}}}],["trial",{"_index":783,"t":{"118":{"position":[[620,5]]}}}],["tribun",{"_index":808,"t":{"118":{"position":[[1627,8]]}}}],["true",{"_index":110,"t":{"12":{"position":[[120,4]]}}}],["trustworthi",{"_index":605,"t":{"85":{"position":[[463,15]]}}}],["unabl",{"_index":792,"t":{"118":{"position":[[1016,6]]}}}],["uncertainti",{"_index":703,"t":{"104":{"position":[[524,12],[1051,11]]}}}],["under",{"_index":134,"t":{"16":{"position":[[54,5]]},"38":{"position":[[220,5]]},"52":{"position":[[138,5]]},"106":{"position":[[48,5]]},"110":{"position":[[34,5]]},"120":{"position":[[951,5]]}}}],["understand",{"_index":542,"t":{"77":{"position":[[176,13]]},"120":{"position":[[139,15]]}}}],["undu",{"_index":717,"t":{"104":{"position":[[1101,5]]}}}],["unenforc",{"_index":839,"t":{"120":{"position":[[548,14]]}}}],["union",{"_index":325,"t":{"44":{"position":[[170,5],[635,5]]}}}],["unit",{"_index":15,"t":{"2":{"position":[[136,6]]},"54":{"position":[[12,4],[264,4]]},"56":{"position":[[264,6]]},"71":{"position":[[18,5]]},"73":{"position":[[20,4],[106,4],[255,4],[412,4]]},"75":{"position":[[24,4]]},"77":{"position":[[50,4],[302,4]]},"79":{"position":[[15,4],[173,4],[283,4],[754,4]]},"81":{"position":[[30,4],[137,4]]},"83":{"position":[[67,4],[185,4],[370,4],[474,4]]},"85":{"position":[[42,4],[259,4]]},"87":{"position":[[12,4],[222,4]]},"94":{"position":[[250,5]]}}}],["unit'",{"_index":551,"t":{"77":{"position":[[432,6]]},"81":{"position":[[422,6]]},"87":{"position":[[333,6]]}}}],["unlawfulli",{"_index":371,"t":{"46":{"position":[[1029,11]]}}}],["unlik",{"_index":319,"t":{"44":{"position":[[90,8]]}}}],["up",{"_index":85,"t":{"8":{"position":[[29,2]]},"81":{"position":[[365,2]]},"83":{"position":[[350,2]]},"92":{"position":[[10,4]]}}}],["updat",{"_index":250,"t":{"34":{"position":[[18,8]]},"46":{"position":[[326,6]]},"98":{"position":[[18,8]]},"104":{"position":[[1212,6]]}}}],["upon",{"_index":386,"t":{"50":{"position":[[197,4]]},"102":{"position":[[1308,4]]},"114":{"position":[[229,4]]}}}],["us",{"_index":70,"t":{"6":{"position":[[242,5]]},"18":{"position":[[59,3]]},"28":{"position":[[86,4]]},"38":{"position":[[529,3],[577,3]]},"40":{"position":[[145,3]]},"44":{"position":[[389,3]]},"83":{"position":[[25,3],[541,3]]},"98":{"position":[[67,3],[90,5],[160,3],[302,3],[328,3],[356,3],[448,4],[501,4],[529,3]]},"100":{"position":[[43,4]]},"102":{"position":[[58,3],[498,3],[855,3]]},"104":{"position":[[369,3]]},"106":{"position":[[254,4]]},"108":{"position":[[144,3],[409,6],[514,3]]},"110":{"position":[[317,4],[371,3]]},"112":{"position":[[162,3],[280,4]]},"114":{"position":[[59,3],[195,3]]},"116":{"position":[[41,3]]},"118":{"position":[[176,4],[230,3],[713,3]]},"120":{"position":[[23,3],[340,3],[482,4],[525,3],[619,4],[741,4],[876,4]]}}}],["user",{"_index":256,"t":{"34":{"position":[[85,5]]},"38":{"position":[[83,5]]},"106":{"position":[[153,5]]}}}],["util",{"_index":658,"t":{"102":{"position":[[441,8]]}}}],["v2",{"_index":49,"t":{"6":{"position":[[5,2],[76,2],[278,2]]}}}],["vac",{"_index":0,"t":{"2":{"position":[[0,3],[110,3]]},"4":{"position":[[0,3]]},"6":{"position":[[66,4],[268,4]]},"8":{"position":[[9,3],[211,3]]},"12":{"position":[[12,3]]},"54":{"position":[[4,3],[166,3],[256,3]]},"56":{"position":[[0,3],[256,3],[460,3]]},"58":{"position":[[21,3],[304,3]]},"60":{"position":[[26,3],[253,3]]},"62":{"position":[[0,3]]},"67":{"position":[[128,3]]},"69":{"position":[[30,3]]},"73":{"position":[[44,4],[286,3]]},"75":{"position":[[4,3]]},"87":{"position":[[4,3],[214,3]]},"94":{"position":[[0,3],[263,4],[306,4]]}}}],["vac'",{"_index":503,"t":{"71":{"position":[[0,5]]},"81":{"position":[[0,5]]},"94":{"position":[[63,5]]}}}],["valid",{"_index":363,"t":{"46":{"position":[[752,8]]},"56":{"position":[[692,9]]}}}],["valu",{"_index":33,"t":{"2":{"position":[[405,6]]}}}],["valuabl",{"_index":598,"t":{"85":{"position":[[127,8]]}}}],["vari",{"_index":710,"t":{"104":{"position":[[771,4]]}}}],["varieti",{"_index":315,"t":{"42":{"position":[[81,7]]}}}],["variou",{"_index":432,"t":{"56":{"position":[[230,7]]},"67":{"position":[[120,7]]}}}],["verif",{"_index":563,"t":{"79":{"position":[[316,12]]}}}],["via",{"_index":189,"t":{"24":{"position":[[62,3]]},"69":{"position":[[135,3]]},"118":{"position":[[1566,3]]}}}],["video",{"_index":805,"t":{"118":{"position":[[1570,5]]}}}],["violat",{"_index":653,"t":{"102":{"position":[[325,9]]},"112":{"position":[[244,9]]}}}],["virtual",{"_index":623,"t":{"96":{"position":[[84,7]]}}}],["virtual/onlin",{"_index":807,"t":{"118":{"position":[[1587,14]]}}}],["visit",{"_index":261,"t":{"34":{"position":[[185,8]]}}}],["vital",{"_index":397,"t":{"54":{"position":[[29,5]]},"73":{"position":[[30,5]]},"87":{"position":[[29,5]]}}}],["vulner",{"_index":497,"t":{"69":{"position":[[194,15]]},"81":{"position":[[537,16]]}}}],["waiv",{"_index":777,"t":{"118":{"position":[[453,6],[588,5],[2192,5]]},"120":{"position":[[778,6]]}}}],["waku",{"_index":48,"t":{"6":{"position":[[0,4],[25,5],[71,4],[248,4],[273,4],[350,4]]},"90":{"position":[[0,5],[95,4]]}}}],["warrant",{"_index":654,"t":{"102":{"position":[[356,7]]}}}],["warranti",{"_index":642,"t":{"102":{"position":[[128,10],[205,10],[1343,8]]},"120":{"position":[[187,11]]}}}],["way",{"_index":243,"t":{"32":{"position":[[231,3]]},"38":{"position":[[646,3]]},"60":{"position":[[98,4]]},"110":{"position":[[275,3]]}}}],["web",{"_index":105,"t":{"12":{"position":[[60,4]]},"85":{"position":[[572,3]]}}}],["websit",{"_index":259,"t":{"34":{"position":[[137,7],[145,12],[198,8]]},"38":{"position":[[96,8],[540,8],[588,7],[819,8]]},"40":{"position":[[130,7]]},"42":{"position":[[117,7]]},"48":{"position":[[8,8],[58,9],[241,9]]},"50":{"position":[[106,7],[221,8]]},"98":{"position":[[50,7],[71,9],[172,8],[238,8],[285,7],[339,7],[367,7],[431,7],[484,7],[537,8]]},"100":{"position":[[26,7]]},"102":{"position":[[4,7],[66,7],[509,8],[538,7],[575,8],[740,7],[866,7],[1031,7],[1644,7]]},"104":{"position":[[4,7],[475,7],[1149,7],[1187,8],[1223,7]]},"106":{"position":[[4,7],[381,8]]},"108":{"position":[[18,7],[62,8],[169,9],[207,9],[298,8],[364,8],[473,8],[554,8],[584,8],[624,8],[645,8],[748,9]]},"110":{"position":[[300,7],[326,8],[354,8],[382,8]]},"112":{"position":[[173,8],[201,8],[263,7]]},"114":{"position":[[42,7],[132,7],[178,7],[253,8]]},"116":{"position":[[24,7]]},"118":{"position":[[159,7],[185,8],[213,8],[241,8]]},"120":{"position":[[6,7],[87,7],[242,8],[323,7],[465,7],[508,7],[602,7],[724,7],[859,7]]}}}],["well",{"_index":165,"t":{"20":{"position":[[151,4]]},"118":{"position":[[1691,4]]}}}],["what’",{"_index":711,"t":{"104":{"position":[[781,6]]}}}],["whenev",{"_index":282,"t":{"36":{"position":[[329,8]]},"100":{"position":[[270,8]]}}}],["wherev",{"_index":343,"t":{"46":{"position":[[98,8]]}}}],["whether",{"_index":677,"t":{"102":{"position":[[1366,7],[1397,7]]},"116":{"position":[[82,7]]}}}],["whisper",{"_index":75,"t":{"6":{"position":[[339,7],[405,7]]}}}],["widespread",{"_index":103,"t":{"12":{"position":[[19,10]]},"28":{"position":[[23,10]]}}}],["withdraw",{"_index":358,"t":{"46":{"position":[[550,8]]}}}],["withdrawn",{"_index":365,"t":{"46":{"position":[[807,9]]}}}],["within",{"_index":175,"t":{"22":{"position":[[60,6]]},"32":{"position":[[156,6]]},"54":{"position":[[342,6]]},"56":{"position":[[81,6],[543,6]]},"58":{"position":[[390,6]]},"60":{"position":[[326,6]]},"73":{"position":[[544,6]]},"77":{"position":[[227,6]]},"87":{"position":[[300,6]]},"94":{"position":[[256,6],[299,6]]},"118":{"position":[[1055,6],[2078,6]]}}}],["without",{"_index":221,"t":{"30":{"position":[[65,7]]},"50":{"position":[[73,7]]},"102":{"position":[[182,7]]},"104":{"position":[[1384,7]]},"112":{"position":[[220,7]]},"114":{"position":[[79,7]]},"116":{"position":[[115,7]]}}}],["withstand",{"_index":555,"t":{"77":{"position":[[554,9]]}}}],["word",{"_index":200,"t":{"26":{"position":[[90,6]]}}}],["work",{"_index":426,"t":{"56":{"position":[[103,4]]},"73":{"position":[[132,5]]},"83":{"position":[[190,5]]}}}],["world",{"_index":160,"t":{"20":{"position":[[58,6]]},"77":{"position":[[584,5]]}}}],["worldwid",{"_index":39,"t":{"4":{"position":[[38,11],[97,11],[155,11],[212,11]]}}}],["write",{"_index":615,"t":{"92":{"position":[[4,5]]}}}],["written",{"_index":787,"t":{"118":{"position":[[862,7]]},"120":{"position":[[204,7]]}}}],["x",{"_index":91,"t":{"8":{"position":[[129,1]]}}}],["year",{"_index":818,"t":{"118":{"position":[[2089,4]]}}}],["zero",{"_index":44,"t":{"4":{"position":[[109,4],[167,4]]},"56":{"position":[[614,4]]},"58":{"position":[[159,4]]},"85":{"position":[[86,4]]},"96":{"position":[[69,4]]}}}],["zerokit",{"_index":488,"t":{"67":{"position":[[163,7]]},"71":{"position":[[204,8]]},"85":{"position":[[600,7]]}}}],["zk",{"_index":444,"t":{"56":{"position":[[629,5]]},"58":{"position":[[300,3]]},"85":{"position":[[27,2],[101,4],[198,2]]}}}],["zkp",{"_index":59,"t":{"6":{"position":[[113,4]]},"58":{"position":[[16,4]]}}}],["zkpodcast",{"_index":58,"t":{"6":{"position":[[102,10]]}}}],["zug",{"_index":270,"t":{"36":{"position":[[200,3],[313,3]]},"100":{"position":[[140,3],[254,3]]},"118":{"position":[[1527,4]]}}}]],"pipeline":["stemmer"]}}] \ No newline at end of file diff --git a/security/index.html b/security/index.html new file mode 100644 index 00000000..9bfb04c3 --- /dev/null +++ b/security/index.html @@ -0,0 +1,26 @@ + + + + + +Security | Vac Research + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..9dbbbbaa --- /dev/null +++ b/sitemap.xml @@ -0,0 +1 @@ +https://vac.dev/rlogweekly0.5https://vac.dev/rlog/archiveweekly0.5https://vac.dev/rlog/building-privacy-protecting-infrastructureweekly0.5https://vac.dev/rlog/device-pairing-in-js-waku-and-go-wakuweekly0.5https://vac.dev/rlog/dns-based-discoveryweekly0.5https://vac.dev/rlog/ethics-surveillance-techweekly0.5https://vac.dev/rlog/feasibility-discv5weekly0.5https://vac.dev/rlog/feasibility-semaphore-rate-limiting-zksnarksweekly0.5https://vac.dev/rlog/fixing-whisper-with-wakuweekly0.5https://vac.dev/rlog/future-of-waku-networkweekly0.5https://vac.dev/rlog/GossipSub%20Improvementsweekly0.5https://vac.dev/rlog/introducing-nwakuweekly0.5https://vac.dev/rlog/kademlia-to-discv5weekly0.5https://vac.dev/rlog/Nescience-A-zkVM-leveraging-hiding-propertiesweekly0.5https://vac.dev/rlog/p2p-data-sync-for-mobileweekly0.5https://vac.dev/rlog/page/2weekly0.5https://vac.dev/rlog/page/3weekly0.5https://vac.dev/rlog/page/4weekly0.5https://vac.dev/rlog/presenting-js-wakuweekly0.5https://vac.dev/rlog/remote-logweekly0.5https://vac.dev/rlog/rln-anonymous-dos-preventionweekly0.5https://vac.dev/rlog/rln-light-verifiersweekly0.5https://vac.dev/rlog/rln-relayweekly0.5https://vac.dev/rlog/rln-v3weekly0.5https://vac.dev/rlog/vac-overviewweekly0.5https://vac.dev/rlog/waku-for-allweekly0.5https://vac.dev/rlog/waku-updateweekly0.5https://vac.dev/rlog/waku-v1-v2-bandwidth-comparisonweekly0.5https://vac.dev/rlog/waku-v2-ethereum-coscupweekly0.5https://vac.dev/rlog/waku-v2-ethereum-messagingweekly0.5https://vac.dev/rlog/waku-v2-planweekly0.5https://vac.dev/rlog/waku-v2-updateweekly0.5https://vac.dev/rlog/wakuv2-apdweekly0.5https://vac.dev/rlog/wakuv2-noiseweekly0.5https://vac.dev/rlog/wakuv2-relay-anonweekly0.5https://vac.dev/rlog/wechat-replacement-needweekly0.5https://vac.dev/weekly0.5https://vac.dev/communityweekly0.5https://vac.dev/contributeweekly0.5https://vac.dev/deepresearchweekly0.5https://vac.dev/join-usweekly0.5https://vac.dev/mediaweekly0.5https://vac.dev/principlesweekly0.5https://vac.dev/privacy-policyweekly0.5https://vac.dev/publicationsweekly0.5https://vac.dev/rfcprocessweekly0.5https://vac.dev/securityweekly0.5https://vac.dev/termsweekly0.5https://vac.dev/vipsweekly0.5https://vac.dev/vsusweekly0.5 \ No newline at end of file diff --git a/terms/index.html b/terms/index.html new file mode 100644 index 00000000..82b61dc3 --- /dev/null +++ b/terms/index.html @@ -0,0 +1,26 @@ + + + + + +Terms of Use | Vac Research + + + + + + + + + + +
+

Terms of Use

Last updated: 14 February 2024

These website terms of use ('Website Terms of Use') are entered into by you and us, and they govern your access and use of this Website, including any content and functionality contained in the Website.

It is your responsibility to read the Website Terms of Use carefully before your use of the Website and your use of the Website means you have agreed to be bound and comply with these Website Terms of Use.

If you do not agree with these Website Terms of Use, you must not access or use the Website.

1) Who we are

For the purposes of these Website Terms of Use, the relevant entity is the Logos Collective Association, which has its registered office in Zug and its legal domicile address at:

Logos Collective Association
c/o PST Consulting GmbH
Baarerstrasse 10
6300 Zug
Switzerland

Whenever we refer to “Logos”, “we”, “us” or any other similar references, we are referring to the Logos Collective Association.

2) Disclaimers

The Website is provided by us on an ‘as is’ basis and you use the Website at your own sole discretion and risk.

We disclaim all warranties of any kind, express or implied, including without limitation the warranties of merchantability, fitness for a particular purpose, and non-infringement of intellectual property or other violation of rights. We do not warrant or make any representations concerning the completeness, accuracy, legality, utility, reliability, suitability or availability of the use of the Website, the content on this Website or otherwise relating to the Website, such content or on any sites linked to this site.These disclaimers will apply to the maximum extent permitted by applicable law.

We make no claims that the Website or any of its content is accessible, legally compliant or appropriate in your jurisdiction. Your access or use of the Website is at your own sole discretion and you are solely responsible for complying with any applicable local laws.

The content herein or as accessible through this website is intended to be made available for informational purposes only and should not be considered as creating any expectations or forming the basis of any contract, commitment or binding obligation with us. No information herein shall be considered to contain or be relied upon as a promise, representation, warranty or guarantee, whether express or implied and whether as to the past, present or the future in relation to the projects and matters described herein.

The information contained herein does not constitute financial, legal, tax, or other advice and should not be treated as such.

Nothing in this Website should be construed by you as an offer to buy or sell, or soliciting any offer to buy or sell any tokens or any security.

3) Forward looking statements

The Website may also contain forward-looking statements that are based on current expectations, estimates, forecasts, assumptions and projections about the technology, industry and markets in general.

The forward looking statements, which may include statements about the roadmap, project descriptions, technical details, functionalities, features, the development and use of tokens by projects, and any other statements related to such matters or as accessible through this website are subject to a high degree of risk and uncertainty. The forward looking statements are subject to change based on, among other things, market conditions, technical developments, and regulatory environment. The actual development and results, including the order and the timeline, might vary from what’s presented. The information contained herein is a summary and does not purport to be accurate, reliable or complete and we bear no responsibility for the accuracy, reliability or completeness of information contained herein. Because of the high degree of risk and uncertainty described above, you should not place undue reliance on any matters described in this website or as accessible through this website.

While we aim to update our website regularly, all information, including the timeline and the specifics of each stage, is subject to change and may be amended or supplemented at any time, without notice and at our sole discretion.

4) Intellectual property rights

The Website and its contents are made available under Creative Commons Attribution 4.0 International license (CC-BY 4.0). In essence this licence allows users to copy, modify and distribute the content in any format for any purpose, including commercial use, subject to certain requirements such as attributing us. For the full terms of this licence, please refer to the following website: https://creativecommons.org/licenses/by/4.0/.

To the extent the Website provides any links to a third party website, then their terms and conditions, including privacy policies, govern your use of those third party websites. By linking such third party websites, Status does not represent or imply that it endorses or supports such third party websites or content therein, or that it believes such third party websites and content therein to be accurate, useful or non-harmful. We have no control over such third party websites and will not be liable for your use of or activities on any third party websites accessed through the Website. If you access such third party websites through the Website, it is at your own risk and you are solely responsible for your activities on such third party websites.

6) Limitation of liability

We will not be held liable to you under any contract, negligence, strict liability, or other legal or equitable theory for any lost profits, cost of procurement for substitute services, or any special, incidental, or consequential damages related to, arising from, or in any way connected with these Website Terms of Use, the Website, the content on the Website, or your use of the Website, even if we have been advised of the possibility of such damages. In any event, our aggregate liability for such claims is limited to EUR 100 (one hundred Euros). This limitation of liability will apply to the maximum extent permitted by applicable law.

7) Indemnity

You shall indemnify us and hold us harmless from and against any and all claims, damages and expenses, including attorneys’ fees, arising from or related to your use of the Website, the content on the Website, including without limitation your violation of these Website Terms of Use.

8) Modifications

We may modify or replace any part of this Website Terms of Use at any time and without notice. You are responsible for checking the Website periodically for any changes. The new Website Terms of Use will be effective immediately upon its posting on the Website.

9) Governing law

Swiss law governs these Website Terms of Use and any disputes between you and us, whether in court or arbitration, without regard to conflict of laws provisions.

10) Disputes

In these terms, “dispute” has the broadest meaning enforceable by law and includes any claim you make against or controversy you may have in relation to these Website Terms of Use, the Website, the content on the Website, or your use of the Website.

We prefer arbitration over litigation as we believe it meets our principle of resolving disputes in the most effective and cost effective manner. You are bound by the following arbitration clause, which waives your right to litigation and to be heard by a judge. Please note that court review of an arbitration award is limited. You also waive all your rights to a jury trial (if any) in any and all jurisdictions.

If a (potential) dispute arises, you must first use your reasonable efforts to resolve it amicably with us. If these efforts do not result in a resolution of such dispute, you shall then send us a written notice of dispute setting out (i) the nature of the dispute, and the claim you are making; and (ii) the remedy you are seeking.

If we and you are unable to further resolve this dispute within sixty (60) calendar days of us receiving this notice of dispute, then any such dispute will be referred to and finally resolved by you and us through an arbitration administered by the Swiss Chambers’ Arbitration Institution in accordance with the Swiss Rules of International Arbitration for the time being in force, which rules are deemed to be incorporated herein by reference. The arbitral decision may be enforced in any court. The arbitration will be held in Zug, Switzerland, and may be conducted via video conference virtual/online methods if possible. The tribunal will consist of one arbitrator, and all proceedings as well as communications between the parties will be kept confidential. The language of the arbitration will be in English. Payment of all relevant fees in respect of the arbitration, including filing, administration and arbitrator fees will be in accordance with the Swiss Rules of International Arbitration.

Regardless of any applicable statute of limitations, you must bring any claims within one year after the claim arose or the time when you should have reasonably known about the claim. You also waive the right to participate in a class action lawsuit or a classwide arbitration against us.

11) About these Website Terms of Use

These Website Terms of Use cover the entire agreement between you and us regarding the Website and supersede all prior and contemporaneous understandings, agreements, representations and warranties, both written and oral, with respect to the Website.

The captions and headings identifying sections and subsections of these Website Terms of Use are for reference only and do not define, modify, expand, limit, or affect the interpretation of any provisions of these Website Terms of Use.

If any part of these Website Terms of Use is held invalid or unenforceable, that part will be severable from these Website Terms of Use, and the remaining portions will remain in full force and effect. If we fail to enforce any of these Website Terms of Use, that does not mean that we have waived our right to enforce them.

If you have any specific questions about these Website Terms of Use, please contact us at legal@free.technology.

This document is licensed under CC-BY-SA.

+ + + + \ No newline at end of file diff --git a/theme/image/favicon.ico b/theme/image/favicon.ico new file mode 100644 index 00000000..369fb702 Binary files /dev/null and b/theme/image/favicon.ico differ diff --git a/theme/image/horizontal_lockup_small_white.svg b/theme/image/horizontal_lockup_small_white.svg new file mode 100644 index 00000000..13191d85 --- /dev/null +++ b/theme/image/horizontal_lockup_small_white.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/theme/image/logo-og.svg b/theme/image/logo-og.svg new file mode 100644 index 00000000..bee381b4 --- /dev/null +++ b/theme/image/logo-og.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/theme/image/logo.svg b/theme/image/logo.svg new file mode 100644 index 00000000..8a20f35f --- /dev/null +++ b/theme/image/logo.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/theme/image/preview-image.png b/theme/image/preview-image.png new file mode 100644 index 00000000..a6954b40 Binary files /dev/null and b/theme/image/preview-image.png differ diff --git a/vac-overview/index.html b/vac-overview/index.html new file mode 100644 index 00000000..11e0f772 --- /dev/null +++ b/vac-overview/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/vips/index.html b/vips/index.html new file mode 100644 index 00000000..ca7fb01f --- /dev/null +++ b/vips/index.html @@ -0,0 +1,29 @@ + + + + + +Incubator Projects | Vac Research + + + + + + + + + + +
+

Vac Incubator Projects

Vac incubator projects are emerging initiatives that stem from Vac's deep research efforts. +These projects are envisioned to eventually become separate IFT projects, benefiting from the support and resources of both the Deep Research and R&D Service Units within Vac. +The incubator projects operate within Vac, leveraging the expertise and collaboration available to them.

Nescience

Nescience focuses on developing a privacy-preserving general-purpose zero-knowledge virtual machine. +It enables privacy-preserving computing, which holds significant importance for IFT programs, especially Logos.

+ + + + \ No newline at end of file diff --git a/vsus/index.html b/vsus/index.html new file mode 100644 index 00000000..d9e82eb5 --- /dev/null +++ b/vsus/index.html @@ -0,0 +1,69 @@ + + + + + +R&D Service Units | Vac Research + + + + + + + + + + +
+

Vac R&D Service Units

Vac's R&D Service Units play a crucial role in supporting IFT projects. +In addition to providing expertise, resources, and technical guidance, +they also develop software artefacts, such as nim-libp2p and zerokit.

P2P

The P2P R&D Service Unit is a vital part of Vac, specialising in peer-to-peer (P2P) technologies. +The P2P unit develops nim-libp2p, +works on improving the libp2p gossipsub protocol, +and assists projects with the integration of P2P network layers. +The P2P unit collaborates closely with Vac Deep Research to conduct research aimed at enhancing libp2p gossipsub. +By focusing on advancing P2P technologies, the P2P unit contributes to the overall improvement and efficiency of decentralised networks, +enabling seamless decentralised communication within IFT projects and beyond.

Token Economics (TKE)

The Vac Token Economics Unit is dedicated to assisting IFT projects in designing their token economies, incentives, and markets. +TKE's collaboration with IFT project teams is intensive and occurs on a day-to-day basis, +where TKE not only responds to their needs but also proactively drives the conversation forward by suggesting new ideas and strategies +based on TKE's research. +The team brings together a broad spectrum of skills and knowledge, ranging from the modelling of dynamic systems to theoretical modelling +and general cryptoeconomics.

Distributed Systems Testing (DST)

The Distributed Systems Testing (DST) R&D Service Unit is responsible for developing distributed systems testing software. +DST's primary objective is to assist IFT projects in understanding the scaling behaviour of their nodes within larger networks. +By conducting thorough regression testing, the DST unit helps identify potential bottlenecks and performance issues, +ensuring the reliability and stability of the projects. +The DST unit's expertise in distributed systems testing enables IFT projects to deliver scalable and resilient solutions that can withstand the demands of real-world decentralised applications.

Quality Assurance (QA)

The QA Service Unit is dedicated to supporting IFT projects through the development and execution of comprehensive test plans. +Primary responsibilities include implementing unit tests and interoperability tests to ensure seamless integration and functionality across systems.

The QA unit plays a crucial role in the verification of project implementations. +By rigorously testing project implementations against defined specifications, QA ensures that all functionalities align with the project's requirements. +QA's proactive approach to identifying and reporting bugs ensures that any issues are addressed promptly, enhancing the overall quality and reliability of the software. +Through meticulous testing and quality assurance processes, the QA Service Unit ensures that IFT projects deliver robust and high-performing software solutions.

Smart Contracts (SC)

Vac's Smart Contracts Service Unit specialises in the development, maintenance, and auditing of smart contracts for IFT projects. +The SC unit ensures that all smart contracts are robust, secure, and aligned with project requirements. +SC designs and develops smart contracts tailored to the specific needs of IFT projects, +ensuring they function as intended and are up-to-date with any project changes or requirements. +The unit's thorough auditing process involves meticulously reviewing smart contracts to identify and rectify potential vulnerabilities, +ensuring the highest standards of security and reliability.

Nim

With the IFT's extensive use of the Nim ecosystem, +the Nim Service Unit focuses on the development and maintenance of Nim tooling and core libraries essential for IFT projects. +The Nim unit works on critical tools in the Nim ecosystem, including the Nim compiler, Nimble (package manager), +and nim-suggest, ensuring they are efficient, reliable, and up-to-date. +The Nim unit further plans to maintain core libraries developed by IFT, such as Chronos. +The mandate of the Nim unit also includes providing support to IFT projects regarding the use of Nim, +identifying and addressing specific project needs and demands related to the Nim ecosystem.

Applied Cryptography & ZK (ACZ)

The Applied Cryptography & ZK R&D Service Unit focuses on cryptographic solutions and zero-knowledge (ZK) proofs. +ACZ provides valuable assistance to IFT projects by offering expertise in employing ZK proofs and implementing cryptographic techniques. +The ACZ unit specialises in areas such as employing noise protocol channels and other cryptographic-related aspects. +By leveraging cutting-edge cryptographic technologies, ACZ enhances the security, privacy, and trustworthiness of Logos projects, +contributing to the overall integrity and resilience of the decentralised web ecosystem. +ACZ develops zerokit and stealth-address-kit.

RFC

The Vac RFC unit serves as a vital cornerstone in the IFT, +taking on the responsibility of shepherding and editing specifications for IFT projects. +By meticulously crafting and overseeing these specifications, +the Vac RFC unit acts as a linchpin for ensuring standardised and interoperable protocols within the IFT ecosystem +The RFC unit's expertise and attention to detail contribute to a cohesive and collaborative environment, +facilitating seamless integration and advancement of decentralised technologies +throughout the IFT and beyond.

+ + + + \ No newline at end of file diff --git a/waku-for-all/index.html b/waku-for-all/index.html new file mode 100644 index 00000000..27723ef6 --- /dev/null +++ b/waku-for-all/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-update/index.html b/waku-update/index.html new file mode 100644 index 00000000..5fb93bde --- /dev/null +++ b/waku-update/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-v1-v2-bandwidth-comparison/index.html b/waku-v1-v2-bandwidth-comparison/index.html new file mode 100644 index 00000000..9e7dbadf --- /dev/null +++ b/waku-v1-v2-bandwidth-comparison/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-v2-ethereum-coscup/index.html b/waku-v2-ethereum-coscup/index.html new file mode 100644 index 00000000..a823259e --- /dev/null +++ b/waku-v2-ethereum-coscup/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-v2-ethereum-messaging/index.html b/waku-v2-ethereum-messaging/index.html new file mode 100644 index 00000000..22d9e86f --- /dev/null +++ b/waku-v2-ethereum-messaging/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-v2-plan/index.html b/waku-v2-plan/index.html new file mode 100644 index 00000000..9bfecce8 --- /dev/null +++ b/waku-v2-plan/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/waku-v2-update/index.html b/waku-v2-update/index.html new file mode 100644 index 00000000..4be1ec0b --- /dev/null +++ b/waku-v2-update/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/wakuv2-apd/index.html b/wakuv2-apd/index.html new file mode 100644 index 00000000..98775225 --- /dev/null +++ b/wakuv2-apd/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/wakuv2-noise/index.html b/wakuv2-noise/index.html new file mode 100644 index 00000000..4b820cfb --- /dev/null +++ b/wakuv2-noise/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/wakuv2-relay-anon/index.html b/wakuv2-relay-anon/index.html new file mode 100644 index 00000000..8af0e229 --- /dev/null +++ b/wakuv2-relay-anon/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file diff --git a/wechat-replacement-need/index.html b/wechat-replacement-need/index.html new file mode 100644 index 00000000..ac37b468 --- /dev/null +++ b/wechat-replacement-need/index.html @@ -0,0 +1,11 @@ + + + + + + + + + \ No newline at end of file