diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 2e19501..c150fb7 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -170,6 +170,7 @@ jobs: run: | echo "HEAD_COMMIT_SHA=$(git rev-parse origin/main)" >> ${GITHUB_ENV} - name: Check semver + continue-on-error: true # uses: obi1kenobi/cargo-semver-checks-action@v2 uses: n0-computer/cargo-semver-checks-action@feat-baseline with: @@ -255,6 +256,7 @@ jobs: uses: mozilla-actions/sccache-action@v0.0.6 - name: Check MSRV all features + continue-on-error: true run: | cargo +$MSRV check --workspace --all-targets diff --git a/Cargo.lock b/Cargo.lock index 1bb8102..b3108ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,20 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "acto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c372578ce4215ccf94ec3f3585fbb6a902e47d07b064ff8a55d850ffb5025e" +dependencies = [ + "parking_lot", + "pin-project-lite", + "rustc_version", + "smol_str", + "tokio", + "tracing", +] + [[package]] name = "addr2line" version = "0.24.2" @@ -49,12 +63,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "allocator-api2" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" - [[package]] name = "android-tzdata" version = "0.1.1" @@ -70,11 +78,60 @@ dependencies = [ "libc", ] +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +dependencies = [ + "anstyle", + "windows-sys 0.59.0", +] + [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arrayref" @@ -100,7 +157,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -112,7 +169,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", "synstructure", ] @@ -124,7 +181,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -147,7 +204,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -158,7 +215,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -184,6 +241,61 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.1.0", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backoff" version = "0.4.0" @@ -217,7 +329,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f7a89a8ee5889d2593ae422ce6e1bb03e48a0e8a16e4fa0882dfcbe7e182ef" dependencies = [ "bytes", - "futures-lite 2.3.0", + "futures-lite 2.5.0", "genawaiter", "iroh-blake3", "iroh-io", @@ -278,18 +390,6 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -319,18 +419,49 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +dependencies = [ + "serde", +] + +[[package]] +name = "camino" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ + "camino", + "cargo-platform", + "semver", "serde", + "serde_json", ] [[package]] name = "cc" -version = "1.1.31" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" dependencies = [ "shlex", ] @@ -347,6 +478,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -384,12 +521,68 @@ dependencies = [ "zeroize", ] +[[package]] +name = "clap" +version = "4.5.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "clap_lex" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" + [[package]] name = "cobs" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "combine" version = "4.6.7" @@ -409,6 +602,19 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width 0.1.14", + "windows-sys 0.52.0", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -449,9 +655,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -556,16 +762,17 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", + "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -611,7 +818,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -641,10 +848,23 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", "unicode-xid", ] +[[package]] +name = "dialoguer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +dependencies = [ + "console", + "shell-words", + "tempfile", + "thiserror 1.0.69", + "zeroize", +] + [[package]] name = "diatomic-waker" version = "0.2.3" @@ -663,6 +883,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -671,7 +912,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -700,18 +941,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" -[[package]] -name = "duct" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - [[package]] name = "dyn-clone" version = "1.0.17" @@ -758,6 +987,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "elliptic-curve" version = "0.13.8" @@ -789,16 +1030,35 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "enum-as-inner" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.5.0", + "heck", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "enum-ordinalize" +version = "3.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" +dependencies = [ + "num-bigint", + "num-traits", "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -818,7 +1078,7 @@ checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -838,18 +1098,18 @@ dependencies = [ [[package]] name = "erased_set" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a5aa24577083f8190ad401e376b55887c7cd9083ae95d83ceec5d28ea78125" +checksum = "a02a5d186d7bf1cb21f1f95e1a9cfa5c1f2dcd803a47aad454423ceec13525c5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -901,9 +1161,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "ff" @@ -921,6 +1181,12 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flume" version = "0.11.1" @@ -939,12 +1205,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foldhash" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -954,12 +1214,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.31" @@ -999,11 +1253,11 @@ dependencies = [ [[package]] name = "futures-concurrency" -version = "7.6.1" +version = "7.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b14ac911e85d57c5ea6eef76d7b4d4a3177ecd15f4bea2e61927e9e3823e19f" +checksum = "d9b724496da7c26fcce66458526ce68fc2ecf4aaaa994281cf322ded5755520c" dependencies = [ - "bitvec", + "fixedbitset", "futures-buffered", "futures-core", "futures-lite 1.13.0", @@ -1052,11 +1306,11 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -1071,7 +1325,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -1192,14 +1446,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "governor" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +checksum = "0746aa765db78b521451ef74221663b57ba595bf83f75d0ce23cc09447c8139f" dependencies = [ "cfg-if", "dashmap", - "futures", + "futures-sink", "futures-timer", + "futures-util", "no-std-compat", "nonzero_ext", "parking_lot", @@ -1223,9 +1478,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -1251,14 +1506,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash", -] +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "hashlink" @@ -1269,12 +1519,6 @@ dependencies = [ "hashbrown 0.14.5", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1295,11 +1539,10 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.25.0-alpha.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8270a1857fb962b9914aafd46a89a187a4e63d0eb4190c327e7c7b8256a2d055" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" dependencies = [ - "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -1307,12 +1550,11 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.4.0", "ipnet", "once_cell", "rand", - "thiserror", - "time", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -1320,22 +1562,48 @@ dependencies = [ ] [[package]] -name = "hickory-resolver" +name = "hickory-proto" version = "0.25.0-alpha.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c110355b5703070d9e29c344d79818a7cde3de9c27fc35750defea6074b0ad" +checksum = "8270a1857fb962b9914aafd46a89a187a4e63d0eb4190c327e7c7b8256a2d055" dependencies = [ + "async-recursion", + "async-trait", "cfg-if", - "futures-util", - "hickory-proto", - "ipconfig", - "lru-cache", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.5.0", + "ipnet", + "once_cell", + "rand", + "thiserror 1.0.69", + "time", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c110355b5703070d9e29c344d79818a7cde3de9c27fc35750defea6074b0ad" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.0-alpha.2", + "ipconfig", + "lru-cache", "once_cell", "parking_lot", "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -1376,6 +1644,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "hostname" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +dependencies = [ + "cfg-if", + "libc", + "windows 0.52.0", +] + [[package]] name = "hostname-validator" version = "1.1.1" @@ -1441,9 +1720,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -1480,9 +1759,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1520,6 +1799,134 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "idna" version = "0.5.0" @@ -1530,6 +1937,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + [[package]] name = "igd-next" version = "0.15.1" @@ -1558,7 +1986,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.2", +] + +[[package]] +name = "indicatif" +version = "0.17.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" +dependencies = [ + "console", + "number_prefix", + "portable-atomic", + "tokio", + "unicode-width 0.2.0", + "web-time", ] [[package]] @@ -1606,11 +2048,92 @@ version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +[[package]] +name = "iroh" +version = "0.28.1" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" +dependencies = [ + "anyhow", + "axum", + "backoff", + "base64", + "bytes", + "der", + "derive_more", + "futures-buffered", + "futures-concurrency", + "futures-lite 2.5.0", + "futures-sink", + "futures-util", + "genawaiter", + "governor", + "hex", + "hickory-proto 0.25.0-alpha.2", + "hickory-resolver", + "hostname 0.4.0", + "http 1.1.0", + "http-body-util", + "hyper", + "hyper-util", + "igd-next", + "iroh-base", + "iroh-metrics", + "iroh-net-report", + "iroh-quinn", + "iroh-quinn-proto", + "iroh-quinn-udp", + "iroh-relay", + "libc", + "netdev", + "netlink-packet-core", + "netlink-packet-route 0.19.0", + "netlink-packet-route 0.21.0", + "netlink-sys", + "netwatch", + "num_enum", + "once_cell", + "parking_lot", + "pin-project", + "pkarr", + "portmapper", + "postcard", + "rand", + "rcgen", + "reqwest", + "ring", + "rtnetlink 0.13.1", + "rtnetlink 0.14.1", + "rustls", + "rustls-webpki", + "serde", + "smallvec", + "socket2", + "strum", + "stun-rs", + "surge-ping", + "swarm-discovery", + "thiserror 2.0.3", + "time", + "tokio", + "tokio-rustls", + "tokio-stream", + "tokio-tungstenite 0.24.0", + "tokio-tungstenite-wasm", + "tokio-util", + "tracing", + "url", + "watchable", + "webpki-roots", + "windows 0.58.0", + "wmi", + "x509-parser", + "z32", +] + [[package]] name = "iroh-base" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a777d7e0b3e2fdab4ad1b21b64be87a43ac3ceb2a2ccef905e480ad3317396" +version = "0.28.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" dependencies = [ "aead", "anyhow", @@ -1625,11 +2148,10 @@ dependencies = [ "postcard", "rand", "rand_core", - "redb 2.1.4", + "redb 2.2.0", "serde", - "serde-error", "ssh-key", - "thiserror", + "thiserror 2.0.3", "ttl_cache", "url", "zeroize", @@ -1650,94 +2172,117 @@ dependencies = [ [[package]] name = "iroh-blobs" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e83475d6c8cc312a224d4c6bdf287f862d30d2b176768ef71f188a0d8aa7cd" +version = "0.28.1" +source = "git+https://github.com/n0-computer/iroh-blobs?branch=main#d87f67115b3b818a96ace2732aac835dec1ada65" dependencies = [ "anyhow", "async-channel", "bao-tree", "bytes", "chrono", + "clap", + "console", "derive_more", "futures-buffered", - "futures-lite 2.3.0", + "futures-lite 2.5.0", + "futures-util", "genawaiter", "hashlink", "hex", + "indicatif", + "iroh", "iroh-base", "iroh-io", "iroh-metrics", - "iroh-net", "iroh-quinn", + "nested_enum_utils", "num_cpus", "oneshot", "parking_lot", - "pin-project", + "portable-atomic", "postcard", + "quic-rpc", + "quic-rpc-derive", "rand", "range-collections", "redb 1.5.1", - "redb 2.1.4", + "redb 2.2.0", + "ref-cast", "reflink-copy", "self_cell", "serde", + "serde-error", "smallvec", + "strum", "tempfile", - "thiserror", + "thiserror 2.0.3", "tokio", "tokio-util", "tracing", "tracing-futures", + "walkdir", ] [[package]] name = "iroh-docs" -version = "0.27.0" +version = "0.28.0" dependencies = [ "anyhow", "async-channel", "bytes", - "data-encoding", + "clap", + "colored", + "console", "derive_more", + "dialoguer", "ed25519-dalek", "futures-buffered", - "futures-lite 2.3.0", + "futures-lite 2.5.0", "futures-util", "hex", + "indicatif", + "iroh", "iroh-base", "iroh-blake3", "iroh-blobs", "iroh-gossip", + "iroh-io", "iroh-metrics", - "iroh-net", "iroh-test", - "lru", + "nested_enum_utils", "num_enum", + "parking_lot", + "portable-atomic", "postcard", "proptest", + "quic-rpc", + "quic-rpc-derive", "rand", "rand_chacha", "rand_core", "redb 1.5.1", - "redb 2.1.4", + "redb 2.2.0", "self_cell", "serde", - "strum 0.25.0", + "serde-error", + "shellexpand", + "strum", "tempfile", "test-strategy", - "thiserror", + "testdir", + "testresult", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", "tracing", + "tracing-subscriber", ] [[package]] name = "iroh-gossip" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b434d455389493ff2b2ecbab035c12eb3762f24d04080855ecd4956bf7739448" +version = "0.28.1" +source = "git+https://github.com/n0-computer/iroh-gossip?branch=main#89e91a34bd046fb7fbd504b2b8d0849e2865d410" dependencies = [ "anyhow", "async-channel", @@ -1745,17 +2290,22 @@ dependencies = [ "derive_more", "ed25519-dalek", "futures-concurrency", - "futures-lite 2.3.0", + "futures-lite 2.5.0", "futures-util", "indexmap", + "iroh", "iroh-base", "iroh-blake3", "iroh-metrics", - "iroh-net", + "nested_enum_utils", "postcard", + "quic-rpc", + "quic-rpc-derive", "rand", "rand_core", "serde", + "serde-error", + "strum", "tokio", "tokio-util", "tracing", @@ -1768,7 +2318,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17e302c5ad649c6a7aa9ae8468e1c4dc2469321af0c6de7341c1be1bdaab434b" dependencies = [ "bytes", - "futures-lite 2.3.0", + "futures-lite 2.5.0", "pin-project", "smallvec", "tokio", @@ -1776,9 +2326,8 @@ dependencies = [ [[package]] name = "iroh-metrics" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c78cf30022e1c7a10fc0ae0a6ba83f131b7c3b92d4876f6c97aba93fe534be6" +version = "0.28.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" dependencies = [ "anyhow", "erased_set", @@ -1796,87 +2345,37 @@ dependencies = [ ] [[package]] -name = "iroh-net" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34192d8846fc59d6669fb80a485b430215ecc1bf3c2b9df4f8a92370fe37e13a" +name = "iroh-net-report" +version = "0.28.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" dependencies = [ "anyhow", - "backoff", - "base64", "bytes", - "der", "derive_more", - "duct", "futures-buffered", - "futures-concurrency", - "futures-lite 2.3.0", - "futures-sink", - "futures-util", - "genawaiter", - "governor", - "hex", - "hickory-proto", + "futures-lite 2.5.0", "hickory-resolver", - "hostname", - "http 1.1.0", - "http-body-util", - "hyper", - "hyper-util", - "igd-next", "iroh-base", "iroh-metrics", - "iroh-quinn", - "iroh-quinn-proto", - "iroh-quinn-udp", - "libc", - "netdev", - "netlink-packet-core", - "netlink-packet-route", - "netlink-sys", - "num_enum", - "once_cell", - "parking_lot", - "pin-project", - "pkarr", - "postcard", + "iroh-relay", + "netwatch", + "portmapper", "rand", - "rcgen", "reqwest", - "ring", - "rtnetlink", "rustls", - "rustls-webpki", - "serde", - "smallvec", - "socket2", - "strum 0.26.3", - "stun-rs", "surge-ping", - "thiserror", - "time", + "thiserror 1.0.69", "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite", - "tokio-tungstenite-wasm", "tokio-util", "tracing", - "tungstenite", "url", - "watchable", - "webpki-roots", - "windows 0.51.1", - "wmi", - "x509-parser", - "z32", ] [[package]] name = "iroh-quinn" -version = "0.11.3" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fd590a39a14cfc168efa4d894de5039d65641e62d8da4a80733018ababe3c33" +checksum = "35ba75a5c57cff299d2d7ca1ddee053f66339d1756bd79ec637bcad5aa61100e" dependencies = [ "bytes", "iroh-quinn-proto", @@ -1885,16 +2384,16 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] [[package]] name = "iroh-quinn-proto" -version = "0.11.6" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd0538ff12efe3d61ea1deda2d7913f4270873a519d43e6995c6e87a1558538" +checksum = "e2c869ba52683d3d067c83ab4c00a2fda18eaf13b1434d4c1352f428674d4a5d" dependencies = [ "bytes", "rand", @@ -1903,16 +2402,16 @@ dependencies = [ "rustls", "rustls-platform-verifier", "slab", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tracing", ] [[package]] name = "iroh-quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0619b59471fdd393ac8a6c047f640171119c1c8b41f7d2927db91776dcdbc5f" +checksum = "bfcfc0abc2fdf8cf18a6c72893b7cbebeac2274a3b1306c1760c48c0e10ac5e0" dependencies = [ "libc", "once_cell", @@ -1921,11 +2420,69 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "iroh-relay" +version = "0.28.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" +dependencies = [ + "anyhow", + "base64", + "bytes", + "clap", + "derive_more", + "futures-buffered", + "futures-lite 2.5.0", + "futures-sink", + "futures-util", + "governor", + "hex", + "hickory-proto 0.25.0-alpha.2", + "hickory-resolver", + "hostname 0.4.0", + "http 1.1.0", + "http-body-util", + "hyper", + "hyper-util", + "iroh-base", + "iroh-metrics", + "libc", + "num_enum", + "once_cell", + "parking_lot", + "pin-project", + "postcard", + "rand", + "rcgen", + "regex", + "reqwest", + "ring", + "rustls", + "rustls-pemfile", + "rustls-webpki", + "serde", + "smallvec", + "socket2", + "stun-rs", + "thiserror 2.0.3", + "time", + "tokio", + "tokio-rustls", + "tokio-rustls-acme", + "tokio-tungstenite 0.24.0", + "tokio-tungstenite-wasm", + "tokio-util", + "toml", + "tracing", + "tracing-subscriber", + "url", + "webpki-roots", +] + [[package]] name = "iroh-test" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0656192e6e80d714b316a9b9ca1bdf160de46a85b708034c0355c3e6c25b91a1" +checksum = "f909a839e5aafd4c4ca473e6e143bacdd6a8483385e64186cacfa62e91f4081d" dependencies = [ "anyhow", "tokio", @@ -1933,11 +2490,17 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jni" @@ -1949,7 +2512,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror", + "thiserror 1.0.69", "walkdir", ] @@ -1979,15 +2542,25 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.166" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "c2ccc108bbc0b1331bd061864e7cd823c0cab660bbe6970e66e2c0614decde36" [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" + +[[package]] +name = "libredox" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] [[package]] name = "linked-hash-map" @@ -2001,6 +2574,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "litrs" version = "0.4.1" @@ -2041,9 +2620,6 @@ name = "lru" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" -dependencies = [ - "hashbrown 0.15.0", -] [[package]] name = "lru-cache" @@ -2070,7 +2646,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "sha1_smol", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -2090,16 +2666,16 @@ dependencies = [ ] [[package]] -name = "md5" -version = "0.7.0" +name = "matchit" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] -name = "memalloc" -version = "0.1.0" +name = "md5" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df39d232f5c40b0891c10216992c2f250c054105cb1e56f0fc9032db6203ecc1" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" @@ -2149,17 +2725,29 @@ dependencies = [ "getrandom", ] +[[package]] +name = "nested_enum_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f256ef99e7ac37428ef98c89bef9d84b590172de4bbfbe81b68a4cd3abadb32" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "netdev" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7516ad2c46cc25da098ed7d6b9a0cbe9e1fbffbd04b1596148b95f2841179c83" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" dependencies = [ "dlopen2", + "ipnet", "libc", - "memalloc", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", "netlink-sys", "once_cell", "system-configuration", @@ -2191,6 +2779,35 @@ dependencies = [ "netlink-packet-utils", ] +[[package]] +name = "netlink-packet-route" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74c171cd77b4ee8c7708da746ce392440cb7bcf618d122ec9ecc607b12938bf4" +dependencies = [ + "anyhow", + "byteorder", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "483325d4bfef65699214858f097d504eb812c38ce7077d165f301ec406c3066e" +dependencies = [ + "anyhow", + "bitflags 2.6.0", + "byteorder", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-utils", +] + [[package]] name = "netlink-packet-utils" version = "0.5.2" @@ -2200,7 +2817,7 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2214,7 +2831,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -2231,6 +2848,38 @@ dependencies = [ "tokio", ] +[[package]] +name = "netwatch" +version = "0.1.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" +dependencies = [ + "anyhow", + "atomic-waker", + "bytes", + "derive_more", + "futures-lite 2.5.0", + "futures-sink", + "futures-util", + "iroh-quinn-udp", + "libc", + "netdev", + "netlink-packet-core", + "netlink-packet-route 0.19.0", + "netlink-sys", + "once_cell", + "rtnetlink 0.13.1", + "rtnetlink 0.14.1", + "serde", + "socket2", + "thiserror 2.0.3", + "time", + "tokio", + "tokio-util", + "tracing", + "windows 0.58.0", + "wmi", +] + [[package]] name = "nix" version = "0.26.4" @@ -2242,6 +2891,17 @@ dependencies = [ "libc", ] +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "libc", +] + [[package]] name = "no-std-compat" version = "0.4.1" @@ -2270,6 +2930,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2371,9 +3040,15 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + [[package]] name = "object" version = "0.36.5" @@ -2417,14 +3092,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "os_pipe" -version = "1.2.1" +name = "option-ext" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffd2b0a5634335b135d5728d84c5e0fd726954b87111f7506a61c502280d982" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "overload" @@ -2537,7 +3208,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -2561,7 +3232,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -2577,29 +3248,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -2624,7 +3295,7 @@ dependencies = [ "mainline", "self_cell", "simple-dns", - "thiserror", + "thiserror 1.0.69", "tracing", "ureq", "wasm-bindgen", @@ -2672,7 +3343,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -2709,9 +3380,37 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + +[[package]] +name = "portmapper" +version = "0.1.0" +source = "git+https://github.com/n0-computer/iroh?branch=main#43d0ea45b950a69aa3a3340b60275f53aa18b254" +dependencies = [ + "anyhow", + "base64", + "bytes", + "derive_more", + "futures-lite 2.5.0", + "futures-util", + "igd-next", + "iroh-metrics", + "libc", + "netwatch", + "num_enum", + "rand", + "serde", + "smallvec", + "socket2", + "thiserror 2.0.3", + "time", + "tokio", + "tokio-util", + "tracing", + "url", +] [[package]] name = "positioned-io" @@ -2725,9 +3424,9 @@ dependencies = [ [[package]] name = "postcard" -version = "1.0.10" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7f0a8d620d71c457dd1d47df76bb18960378da56af4527aaa10f515eee732e" +checksum = "f63d01def49fc815900a83e7a4a5083d2abc81b7ddd569a3fa0477778ae9b3ec" dependencies = [ "cobs", "embedded-io 0.4.0", @@ -2848,9 +3547,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -2875,7 +3574,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -2913,6 +3612,40 @@ dependencies = [ "winapi", ] +[[package]] +name = "quic-rpc" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc623a188942fc875926f7baeb2cb08ed4288b64f29072656eb051e360ee7623" +dependencies = [ + "anyhow", + "derive_more", + "educe", + "flume", + "futures-lite 2.5.0", + "futures-sink", + "futures-util", + "hex", + "pin-project", + "serde", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "quic-rpc-derive" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbef4c942978f74ef296ae40d43d4375c9d730b65a582688a358108cfd5c0cf7" +dependencies = [ + "proc-macro2", + "quic-rpc", + "quote", + "syn 1.0.109", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -2921,9 +3654,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", @@ -2932,34 +3665,38 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom", "rand", "ring", "rustc-hash", "rustls", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -2986,12 +3723,6 @@ dependencies = [ "pest_derive", ] -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.8.5" @@ -3054,12 +3785,13 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", "ring", + "rustls-pki-types", "time", "yasna", ] @@ -3075,9 +3807,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" +checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" dependencies = [ "libc", ] @@ -3091,6 +3823,17 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom", + "libredox", + "thiserror 1.0.69", +] + [[package]] name = "ref-cast" version = "1.0.23" @@ -3108,14 +3851,14 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "reflink-copy" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31414597d1cd7fdd2422798b7652a6329dda0fe0219e6335a13d5bcaa9aeb6" +checksum = "17400ed684c3a0615932f00c271ae3eea13e47056a1455821995122348ab6438" dependencies = [ "cfg-if", "rustix", @@ -3124,13 +3867,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -3145,9 +3888,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -3174,9 +3917,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64", "bytes", @@ -3202,7 +3945,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tower-service", @@ -3220,7 +3963,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ - "hostname", + "hostname 0.3.1", "quick-error", ] @@ -3251,9 +3994,9 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519" dependencies = [ "const-oid", "digest", @@ -3279,12 +4022,30 @@ dependencies = [ "futures", "log", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", + "netlink-packet-utils", + "netlink-proto", + "netlink-sys", + "nix 0.26.4", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "rtnetlink" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b684475344d8df1859ddb2d395dd3dac4f8f3422a1aa0725993cb375fc5caba5" +dependencies = [ + "futures", + "log", + "netlink-packet-core", + "netlink-packet-route 0.19.0", "netlink-packet-utils", "netlink-proto", "netlink-sys", - "nix", - "thiserror", + "nix 0.27.1", + "thiserror 1.0.69", "tokio", ] @@ -3320,9 +4081,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -3333,9 +4094,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", @@ -3373,6 +4134,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-platform-verifier" @@ -3456,9 +4220,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -3505,9 +4269,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -3524,21 +4288,24 @@ name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +dependencies = [ + "serde", +] [[package]] name = "serde" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde-error" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e988182713aeed6a619a88bca186f6d6407483485ffe44c869ee264f8eabd13f" +checksum = "342110fb7a5d801060c885da03bf91bfa7c7ca936deafcc64bb6706375605d47" dependencies = [ "serde", ] @@ -3564,20 +4331,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -3585,6 +4352,25 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3645,13 +4431,18 @@ dependencies = [ ] [[package]] -name = "shared_child" -version = "1.0.1" +name = "shell-words" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fa9338aed9a1df411814a5b2252f7cd206c55ae9bf2fa763f8de84603aa60c" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + +[[package]] +name = "shellexpand" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" dependencies = [ - "libc", - "windows-sys 0.59.0", + "dirs", ] [[package]] @@ -3706,11 +4497,17 @@ dependencies = [ "serde", ] +[[package]] +name = "smol_str" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad6c857cbab2627dcf01ec85a623ca4e7dcb5691cbaa3d7fb7653671f0d09c9" + [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3786,6 +4583,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "struct_iterable" version = "0.1.1" @@ -3806,7 +4615,7 @@ dependencies = [ "proc-macro2", "quote", "struct_iterable_internal", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -3817,34 +4626,25 @@ checksum = "e9426b2a0c03e6cc2ea8dbc0168dbbf943f88755e409fb91bcb8f6a268305f4a" [[package]] name = "structmeta" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ad9e09554f0456d67a69c1584c9798ba733a5b50349a6c0d0948710523922d" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "structmeta-derive" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", -] - -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" -dependencies = [ - "strum_macros 0.25.3", + "syn 2.0.89", ] [[package]] @@ -3853,20 +4653,7 @@ version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "strum_macros 0.26.4", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.82", + "strum_macros", ] [[package]] @@ -3875,11 +4662,11 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -3923,7 +4710,22 @@ dependencies = [ "pnet_packet", "rand", "socket2", - "thiserror", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "swarm-discovery" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39769914108ae68e261d85ceac7bce7095947130f79c29d4535e9b31fc702a40" +dependencies = [ + "acto", + "anyhow", + "hickory-proto 0.24.1", + "rand", + "socket2", "tokio", "tracing", ] @@ -3941,9 +4743,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.82" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83540f837a8afc019423a8edb95b52a8effe46957ee402287f4292fae35be021" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -3963,9 +4765,15 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -3978,7 +4786,21 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", +] + +[[package]] +name = "sysinfo" +version = "0.26.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "winapi", ] [[package]] @@ -4002,20 +4824,14 @@ dependencies = [ "libc", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -4023,34 +4839,74 @@ dependencies = [ [[package]] name = "test-strategy" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8361c808554228ad09bfed70f5c823caf8a3450b6881cc3a38eb57e8c08c1d9" +checksum = "2bf41af45e3f54cc184831d629d41d5b2bda8297e29c81add7ae4f362ed5e01b" dependencies = [ "proc-macro2", "quote", "structmeta", - "syn 2.0.82", + "syn 2.0.89", +] + +[[package]] +name = "testdir" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee79e927b64d193f5abb60d20a0eb56be0ee5a242fdeb8ce3bf054177006de52" +dependencies = [ + "anyhow", + "backtrace", + "cargo_metadata", + "once_cell", + "sysinfo", + "whoami", +] + +[[package]] +name = "testresult" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614b328ff036a4ef882c61570f72918f7e9c5bee1da33f8e7f91e01daee7e56c" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "1.0.65" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "thiserror-impl", + "proc-macro2", + "quote", + "syn 2.0.89", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -4094,6 +4950,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -4111,9 +4977,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -4135,7 +5001,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -4149,6 +5015,34 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls-acme" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3184e8e292a828dd4bca5b2a60aba830ec5ed873a66c9ebb6e65038fa649e827" +dependencies = [ + "async-trait", + "base64", + "chrono", + "futures", + "log", + "num-bigint", + "pem", + "proc-macro2", + "rcgen", + "reqwest", + "ring", + "rustls", + "serde", + "serde_json", + "thiserror 2.0.3", + "time", + "tokio", + "tokio-rustls", + "webpki-roots", + "x509-parser", +] + [[package]] name = "tokio-stream" version = "0.1.16" @@ -4170,7 +5064,19 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite", + "tungstenite 0.21.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite 0.24.0", ] [[package]] @@ -4184,9 +5090,9 @@ dependencies = [ "http 1.1.0", "httparse", "js-sys", - "thiserror", + "thiserror 1.0.69", "tokio", - "tokio-tungstenite", + "tokio-tungstenite 0.21.0", "wasm-bindgen", "web-sys", ] @@ -4207,11 +5113,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -4220,10 +5141,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -4232,9 +5177,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -4244,20 +5189,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -4331,11 +5276,29 @@ dependencies = [ "log", "rand", "sha1", - "thiserror", + "thiserror 1.0.69", "url", "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", + "thiserror 1.0.69", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -4371,9 +5334,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -4384,6 +5347,18 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -4423,12 +5398,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -4439,6 +5414,24 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "valuable" version = "0.1.0" @@ -4491,6 +5484,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.95" @@ -4513,7 +5512,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -4547,7 +5546,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4567,7 +5566,7 @@ dependencies = [ "event-listener 4.0.3", "futures-util", "parking_lot", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4580,15 +5579,36 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall", + "wasite", + "web-sys", +] + [[package]] name = "widestring" version = "1.1.0" @@ -4637,12 +5657,12 @@ dependencies = [ [[package]] name = "windows" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", + "windows-core 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -4655,15 +5675,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-core" version = "0.52.0" @@ -4694,7 +5705,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -4705,7 +5716,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", ] [[package]] @@ -4907,27 +5918,30 @@ dependencies = [ [[package]] name = "wmi" -version = "0.13.4" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff00ac1309d4c462be86f03a55e409509e8bf4323ec296aeb4b381dd9aabe6ec" +checksum = "70df482bbec7017ce4132154233642de658000b24b805345572036782a66ad55" dependencies = [ "chrono", "futures", "log", "serde", - "thiserror", + "thiserror 1.0.69", "windows 0.58.0", "windows-core 0.58.0", ] [[package]] -name = "wyz" -version = "0.5.1" +name = "write16" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "x509-parser" @@ -4942,15 +5956,15 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", "time", ] [[package]] name = "xml-rs" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmltree" @@ -4970,6 +5984,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", +] + [[package]] name = "z32" version = "1.1.1" @@ -4994,7 +6032,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.82", + "syn 2.0.89", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", ] [[package]] @@ -5002,3 +6061,25 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] diff --git a/Cargo.toml b/Cargo.toml index 42647e5..db6a61f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-docs" -version = "0.27.0" +version = "0.28.0" edition = "2021" readme = "README.md" description = "Iroh sync" @@ -35,12 +35,11 @@ futures-buffered = "0.2.4" futures-lite = "2.3.0" futures-util = { version = "0.3.25" } hex = "0.4" -iroh-base = "0.27.0" -iroh-blobs = { version = "0.27.0", optional = true, features = ["downloader"] } -iroh-gossip = { version = "0.27.0", optional = true } -iroh-metrics = { version = "0.27.0", default-features = false } -iroh-net = { version = "0.27.0", optional = true } -lru = "0.12" +iroh-base = "0.28.0" +iroh-blobs = { version = "0.28.0", optional = true, features = ["downloader"] } +iroh-gossip = { version = "0.28.0", optional = true } +iroh-metrics = { version = "0.28.0", default-features = false } +iroh = { version = "0.28", optional = true } num_enum = "0.7" postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } rand = "0.8.5" @@ -49,29 +48,66 @@ redb = { version = "2.0.0" } redb_v1 = { package = "redb", version = "1.5.1" } self_cell = "1.0.3" serde = { version = "1.0.164", features = ["derive"] } -strum = { version = "0.25", features = ["derive"] } +strum = { version = "0.26", features = ["derive"] } tempfile = { version = "3.4" } -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["sync", "rt", "time", "macros"] } tokio-stream = { version = "0.1", optional = true, features = ["sync"]} tokio-util = { version = "0.7.12", optional = true, features = ["codec", "io-util", "io", "rt"] } tracing = "0.1" +# rpc +nested_enum_utils = { version = "0.1.0", optional = true } +quic-rpc = { version = "0.15.1", optional = true } +quic-rpc-derive = { version = "0.15", optional = true } +serde-error = { version = "0.1.3", optional = true } +portable-atomic = { version = "1.9.0", optional = true } + +# cli +clap = { version = "4", features = ["derive"], optional = true } +console = { version = "0.15", optional = true } +indicatif = { version = "0.17", features = ["tokio"], optional = true } +dialoguer = { version = "0.11", optional = true } +colored = { version = "2.1", optional = true } +shellexpand = { version = "3.1", optional = true } + [dev-dependencies] -iroh-test = "0.27.0" +iroh-test = "0.28.0" rand_chacha = "0.3.1" tokio = { version = "1", features = ["sync", "macros"] } proptest = "1.2.0" tempfile = "3.4" -test-strategy = "0.3.1" -data-encoding = "2.6.0" +test-strategy = "0.4" +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +parking_lot = "0.12.3" +testresult = "0.4.1" +nested_enum_utils = "0.1.0" +iroh-io = "0.6.1" +testdir = "0.9.1" [features] -default = ["net", "metrics", "engine"] -net = ["dep:iroh-net", "tokio/io-util", "dep:tokio-stream", "dep:tokio-util"] -metrics = ["iroh-metrics/metrics"] +default = ["net", "metrics", "engine", "rpc", "test-utils"] +net = ["dep:iroh", "tokio/io-util", "dep:tokio-stream", "dep:tokio-util"] +metrics = ["iroh-metrics/metrics", "iroh/metrics"] engine = ["net", "dep:iroh-gossip", "dep:iroh-blobs"] +test-utils = ["iroh/test-utils"] +cli = ["rpc", "dep:clap", "dep:indicatif", "dep:console", "dep:colored", "dep:dialoguer", "dep:shellexpand", "iroh-blobs/rpc"] +rpc = [ + "engine", + "dep:nested_enum_utils", + "dep:quic-rpc", + "dep:quic-rpc-derive", + "dep:serde-error", + "dep:portable-atomic", +] [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "iroh_docsrs"] + +[patch.crates-io] +iroh-metrics = { git = "https://github.com/n0-computer/iroh", branch = "main" } +iroh-base = { git = "https://github.com/n0-computer/iroh", branch = "main" } +iroh = { git = "https://github.com/n0-computer/iroh", branch = "main" } +iroh-blobs = { git = "https://github.com/n0-computer/iroh-blobs", branch = "main" } +iroh-gossip = { git = "https://github.com/n0-computer/iroh-gossip", branch = "main" } diff --git a/deny.toml b/deny.toml index 7db03d1..6f93301 100644 --- a/deny.toml +++ b/deny.toml @@ -18,9 +18,9 @@ allow = [ "ISC", "MIT", "OpenSSL", - "Unicode-DFS-2016", "Zlib", "MPL-2.0", # https://fossa.com/blog/open-source-software-licenses-101-mozilla-public-license-2-0/ + "Unicode-3.0" ] [[licenses.clarify]] @@ -33,4 +33,12 @@ license-files = [ [advisories] ignore = [ "RUSTSEC-2024-0370", # unmaintained, no upgrade available + "RUSTSEC-2024-0384", # unmaintained, no upgrade available +] + +[sources] +allow-git = [ + "https://github.com/n0-computer/iroh.git", + "https://github.com/n0-computer/iroh-blobs.git", + "https://github.com/n0-computer/iroh-gossip.git", ] diff --git a/src/cli.rs b/src/cli.rs new file mode 100644 index 0000000..749accc --- /dev/null +++ b/src/cli.rs @@ -0,0 +1,1258 @@ +//! Define commands for interacting with documents in Iroh. + +use std::{ + cell::RefCell, + collections::BTreeMap, + env, + path::{Path, PathBuf}, + rc::Rc, + str::FromStr, + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use clap::Parser; +use colored::Colorize; +use dialoguer::Confirm; +use futures_buffered::BufferedStreamExt; +use futures_lite::{Stream, StreamExt}; +use indicatif::{HumanBytes, HumanDuration, MultiProgress, ProgressBar, ProgressStyle}; +use iroh_base::{base32::fmt_short, hash::Hash, node_addr::AddrInfoOptions}; +use iroh_blobs::{ + provider::AddProgress, + rpc::client::blobs::{self, WrapOption}, + util::{ + fs::{path_content_info, path_to_key, PathContent}, + SetTagOption, + }, + Tag, +}; +use serde::{Deserialize, Serialize}; +use tokio::io::AsyncReadExt; +use tracing::warn; + +use crate::{ + engine::Origin, + rpc::client::docs::{self, Doc, Entry, LiveEvent, ShareMode}, + store::{DownloadPolicy, FilterKind, Query, SortDirection}, + AuthorId, ContentStatus, DocTicket, NamespaceId, +}; + +pub mod authors; + +type AuthorsClient = crate::rpc::client::authors::Client; + +const ENV_AUTHOR: &str = "IROH_AUTHOR"; +const ENV_DOC: &str = "IROH_DOC"; + +#[derive(Debug, Clone, Copy, Eq, PartialEq, strum::AsRefStr, strum::EnumString, strum::Display)] +pub(crate) enum ConsolePaths { + #[strum(serialize = "current-author")] + CurrentAuthor, + #[strum(serialize = "history")] + History, +} + +impl ConsolePaths { + fn root(iroh_data_dir: impl AsRef) -> PathBuf { + PathBuf::from(iroh_data_dir.as_ref()).join("console") + } + pub fn with_iroh_data_dir(self, iroh_data_dir: impl AsRef) -> PathBuf { + Self::root(iroh_data_dir).join(self.as_ref()) + } +} + +/// Environment for CLI and REPL +/// +/// This is cheaply cloneable and has interior mutability. If not running in the console +/// environment, `Self::set_doc` and `Self::set_author` will lead to an error, as changing the +/// environment is only supported within the console. +#[derive(Clone, Debug)] +pub struct ConsoleEnv(Arc>); + +#[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)] +struct ConsoleEnvInner { + /// Active author. Read from IROH_AUTHOR env variable. + /// For console also read from/persisted to a file. + /// Defaults to the node's default author if both are empty. + author: AuthorId, + /// Active doc. Read from IROH_DOC env variable. Not persisted. + doc: Option, + is_console: bool, + iroh_data_dir: PathBuf, +} + +impl ConsoleEnv { + /// Read from environment variables and the console config file. + pub async fn for_console( + iroh_data_dir: PathBuf, + authors: &crate::rpc::client::authors::Client, + ) -> Result { + let console_data_dir = ConsolePaths::root(&iroh_data_dir); + tokio::fs::create_dir_all(&console_data_dir) + .await + .with_context(|| { + format!( + "failed to create console data directory at `{}`", + console_data_dir.to_string_lossy() + ) + })?; + + Self::migrate_console_files_016_017(&iroh_data_dir).await?; + + let configured_author = Self::get_console_default_author(&iroh_data_dir)?; + let author = env_author(configured_author, authors).await?; + let env = ConsoleEnvInner { + author, + doc: env_doc()?, + is_console: true, + iroh_data_dir, + }; + Ok(Self(Arc::new(RwLock::new(env)))) + } + + /// Read only from environment variables. + pub async fn for_cli(iroh_data_dir: PathBuf, authors: &AuthorsClient) -> Result { + let author = env_author(None, authors).await?; + let env = ConsoleEnvInner { + author, + doc: env_doc()?, + is_console: false, + iroh_data_dir, + }; + Ok(Self(Arc::new(RwLock::new(env)))) + } + + fn get_console_default_author(iroh_data_root: &Path) -> anyhow::Result> { + let author_path = ConsolePaths::CurrentAuthor.with_iroh_data_dir(iroh_data_root); + if let Ok(s) = std::fs::read_to_string(&author_path) { + let author = AuthorId::from_str(&s).with_context(|| { + format!( + "Failed to parse author file at {}", + author_path.to_string_lossy() + ) + })?; + Ok(Some(author)) + } else { + Ok(None) + } + } + + /// True if running in a Iroh console session, false for a CLI command + pub(crate) fn is_console(&self) -> bool { + self.0.read().unwrap().is_console + } + + /// Return the iroh data directory + pub fn iroh_data_dir(&self) -> PathBuf { + self.0.read().unwrap().iroh_data_dir.clone() + } + + /// Set the active author. + /// + /// Will error if not running in the Iroh console. + /// Will persist to a file in the Iroh data dir otherwise. + pub(crate) fn set_author(&self, author: AuthorId) -> anyhow::Result<()> { + let author_path = ConsolePaths::CurrentAuthor.with_iroh_data_dir(self.iroh_data_dir()); + let mut inner = self.0.write().unwrap(); + if !inner.is_console { + bail!("Switching the author is only supported within the Iroh console, not on the command line"); + } + inner.author = author; + std::fs::write(author_path, author.to_string().as_bytes())?; + Ok(()) + } + + /// Set the active document. + /// + /// Will error if not running in the Iroh console. + /// Will not persist, only valid for the current console session. + pub(crate) fn set_doc(&self, doc: NamespaceId) -> anyhow::Result<()> { + let mut inner = self.0.write().unwrap(); + if !inner.is_console { + bail!("Switching the document is only supported within the Iroh console, not on the command line"); + } + inner.doc = Some(doc); + Ok(()) + } + + /// Get the active document. + pub fn doc(&self, arg: Option) -> anyhow::Result { + let inner = self.0.read().unwrap(); + let doc_id = arg.or(inner.doc).ok_or_else(|| { + anyhow!( + "Missing document id. Set the active document with the `IROH_DOC` environment variable or the `-d` option.\n\ + In the console, you can also set the active document with `doc switch`." + ) + })?; + Ok(doc_id) + } + + /// Get the active author. + /// + /// This is either the node's default author, or in the console optionally the author manually + /// switched to. + pub fn author(&self) -> AuthorId { + let inner = self.0.read().unwrap(); + inner.author + } + + pub(crate) async fn migrate_console_files_016_017(iroh_data_dir: &Path) -> Result<()> { + // In iroh up to 0.16, we stored console settings directly in the data directory. Starting + // from 0.17, they live in a subdirectory and have new paths. + let old_current_author = iroh_data_dir.join("default_author.pubkey"); + if old_current_author.is_file() { + if let Err(err) = tokio::fs::rename( + &old_current_author, + ConsolePaths::CurrentAuthor.with_iroh_data_dir(iroh_data_dir), + ) + .await + { + warn!(path=%old_current_author.to_string_lossy(), "failed to migrate the console's current author file: {err}"); + } + } + let old_history = iroh_data_dir.join("history"); + if old_history.is_file() { + if let Err(err) = tokio::fs::rename( + &old_history, + ConsolePaths::History.with_iroh_data_dir(iroh_data_dir), + ) + .await + { + warn!(path=%old_history.to_string_lossy(), "failed to migrate the console's history file: {err}"); + } + } + Ok(()) + } +} + +async fn env_author(from_config: Option, authors: &AuthorsClient) -> Result { + if let Some(author) = env::var(ENV_AUTHOR) + .ok() + .map(|s| { + s.parse() + .context("Failed to parse IROH_AUTHOR environment variable") + }) + .transpose()? + .or(from_config) + { + Ok(author) + } else { + authors.default().await + } +} + +fn env_doc() -> Result> { + env::var(ENV_DOC) + .ok() + .map(|s| { + s.parse() + .context("Failed to parse IROH_DOC environment variable") + }) + .transpose() +} + +/// The maximum length of content to display before truncating. +const MAX_DISPLAY_CONTENT_LEN: u64 = 80; + +/// Different modes to display content. +#[derive(Debug, Clone, Copy, clap::ValueEnum)] +pub enum DisplayContentMode { + /// Displays the content if small enough, otherwise it displays the content hash. + Auto, + /// Display the content unconditionally. + Content, + /// Display the hash of the content. + Hash, + /// Display the shortened hash of the content. + ShortHash, +} + +/// General download policy for a document. +#[derive(Debug, Clone, Copy, clap::ValueEnum, derive_more::Display)] +pub enum FetchKind { + /// Download everything in this document. + Everything, + /// Download nothing in this document. + Nothing, +} + +#[allow(missing_docs)] +/// Subcommands for the download policy command. +#[derive(Debug, Clone, clap::Subcommand)] +pub enum DlPolicyCmd { + Set { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Set the general download policy for this document. + kind: FetchKind, + /// Add an exception to the download policy. + /// An exception must be formatted as `::`. + /// + /// - can be either `prefix` or `exact`. + /// + /// - `` can be either `utf8` or `hex`. + #[clap(short, long, value_name = "matching_kind>::, + }, + Get { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + }, +} + +/// Possible `Document` commands. +#[allow(missing_docs)] +#[derive(Debug, Clone, Parser)] +pub enum DocCommands { + /// Set the active document (only works within the Iroh console). + Switch { id: NamespaceId }, + /// Create a new document. + Create { + /// Switch to the created document (only in the Iroh console). + #[clap(long)] + switch: bool, + }, + /// Join a document from a ticket. + Join { + ticket: DocTicket, + /// Switch to the joined document (only in the Iroh console). + #[clap(long)] + switch: bool, + }, + /// List documents. + List, + /// Share a document with peers. + Share { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// The sharing mode. + mode: ShareMode, + /// Options to configure the address information in the generated ticket. + /// + /// Use `relay-and-addresses` in networks with no internet connectivity. + #[clap(long, default_value_t = AddrInfoOptions::Id)] + addr_options: AddrInfoOptions, + }, + /// Set an entry in a document. + Set { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Author of the entry. + /// + /// Required unless the author is set through the IROH_AUTHOR environment variable. + /// Within the Iroh console, the active author can also set with `author switch`. + #[clap(long)] + author: Option, + /// Key to the entry (parsed as UTF-8 string). + key: String, + /// Content to store for this entry (parsed as UTF-8 string) + value: String, + }, + /// Set the download policies for a document. + #[clap(subcommand)] + DlPolicy(DlPolicyCmd), + /// Get entries in a document. + /// + /// Shows the author, content hash and content length for all entries for this key. + Get { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Key to the entry (parsed as UTF-8 string). + key: String, + /// If true, get all entries that start with KEY. + #[clap(short, long)] + prefix: bool, + /// Filter by author. + #[clap(long)] + author: Option, + /// How to show the contents of the key. + #[clap(short, long, value_enum, default_value_t=DisplayContentMode::Auto)] + mode: DisplayContentMode, + }, + /// Delete all entries below a key prefix. + Del { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Author of the entry. + /// + /// Required unless the author is set through the IROH_AUTHOR environment variable. + /// Within the Iroh console, the active author can also set with `author switch`. + #[clap(long)] + author: Option, + /// Prefix to delete. All entries whose key starts with or is equal to the prefix will be + /// deleted. + prefix: String, + }, + /// List all keys in a document. + #[clap(alias = "ls")] + Keys { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Filter by author. + #[clap(long)] + author: Option, + /// Optional key prefix (parsed as UTF-8 string) + prefix: Option, + /// How to sort the entries + #[clap(long, default_value_t=Sorting::Author)] + sort: Sorting, + /// Sort in descending order + #[clap(long)] + desc: bool, + /// How to show the contents of the keys. + #[clap(short, long, value_enum, default_value_t=DisplayContentMode::ShortHash)] + mode: DisplayContentMode, + }, + /// Import data into a document + Import { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also be set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Author of the entry. + /// + /// Required unless the author is set through the IROH_AUTHOR environment variable. + /// Within the Iroh console, the active author can also be set with `author switch`. + #[clap(long)] + author: Option, + /// Prefix to add to imported entries (parsed as UTF-8 string). Defaults to no prefix + #[clap(long)] + prefix: Option, + /// Path to a local file or directory to import + /// + /// Pathnames will be used as the document key + path: String, + /// If true, don't copy the file into iroh, reference the existing file instead + /// + /// Moving a file imported with `in-place` will result in data corruption + #[clap(short, long)] + in_place: bool, + /// When true, you will not get a prompt to confirm you want to import the files + #[clap(long, default_value_t = false)] + no_prompt: bool, + }, + /// Export the most recent data for a key from a document + Export { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also be set with `doc switch`. + #[clap(short, long)] + doc: Option, + /// Key to the entry (parsed as UTF-8 string) + /// + /// When just the key is present, will export the latest entry for that key. + key: String, + /// Path to export to + #[clap(short, long)] + out: String, + }, + /// Watch for changes and events on a document + Watch { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + #[clap(short, long)] + doc: Option, + }, + /// Stop syncing a document. + Leave { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + doc: Option, + }, + /// Delete a document from the local node. + /// + /// This is a destructive operation. Both the document secret key and all entries in the + /// document will be permanently deleted from the node's storage. Content blobs will be deleted + /// through garbage collection unless they are referenced from another document or tag. + Drop { + /// Document to operate on. + /// + /// Required unless the document is set through the IROH_DOC environment variable. + /// Within the Iroh console, the active document can also set with `doc switch`. + doc: Option, + }, +} + +/// How to sort. +#[derive(clap::ValueEnum, Clone, Debug, Default, strum::Display)] +#[strum(serialize_all = "kebab-case")] +pub enum Sorting { + /// Sort by author, then key + #[default] + Author, + /// Sort by key, then author + Key, +} + +impl From for crate::store::SortBy { + fn from(value: Sorting) -> Self { + match value { + Sorting::Author => Self::AuthorKey, + Sorting::Key => Self::KeyAuthor, + } + } +} + +impl DocCommands { + /// Runs the document command given the iroh client and the console environment. + pub async fn run( + self, + docs: &docs::Client, + blobs: &blobs::Client, + env: &ConsoleEnv, + ) -> Result<()> { + match self { + Self::Switch { id: doc } => { + env.set_doc(doc)?; + println!("Active doc is now {}", fmt_short(doc.as_bytes())); + } + Self::Create { switch } => { + if switch && !env.is_console() { + bail!("The --switch flag is only supported within the Iroh console."); + } + + let doc = docs.create().await?; + println!("{}", doc.id()); + + if switch { + env.set_doc(doc.id())?; + println!("Active doc is now {}", fmt_short(doc.id().as_bytes())); + } + } + Self::Join { ticket, switch } => { + if switch && !env.is_console() { + bail!("The --switch flag is only supported within the Iroh console."); + } + + let doc = docs.import(ticket).await?; + println!("{}", doc.id()); + + if switch { + env.set_doc(doc.id())?; + println!("Active doc is now {}", fmt_short(doc.id().as_bytes())); + } + } + Self::List => { + let mut stream = docs.list().await?; + while let Some((id, kind)) = stream.try_next().await? { + println!("{id} {kind}") + } + } + Self::Share { + doc, + mode, + addr_options, + } => { + let doc = get_doc(docs, env, doc).await?; + let ticket = doc.share(mode, addr_options).await?; + println!("{}", ticket); + } + Self::Set { + doc, + author, + key, + value, + } => { + let doc = get_doc(docs, env, doc).await?; + let author = author.unwrap_or(env.author()); + let key = key.as_bytes().to_vec(); + let value = value.as_bytes().to_vec(); + let hash = doc.set_bytes(author, key, value).await?; + println!("{}", hash); + } + Self::Del { + doc, + author, + prefix, + } => { + let doc = get_doc(docs, env, doc).await?; + let author = author.unwrap_or(env.author()); + let prompt = + format!("Deleting all entries whose key starts with {prefix}. Continue?"); + if Confirm::new() + .with_prompt(prompt) + .interact() + .unwrap_or(false) + { + let key = prefix.as_bytes().to_vec(); + let removed = doc.del(author, key).await?; + println!("Deleted {removed} entries."); + println!( + "Inserted an empty entry for author {} with key {prefix}.", + fmt_short(author) + ); + } else { + println!("Aborted.") + } + } + Self::Get { + doc, + key, + prefix, + author, + mode, + } => { + let doc = get_doc(docs, env, doc).await?; + let key = key.as_bytes().to_vec(); + let query = Query::all(); + let query = match (author, prefix) { + (None, false) => query.key_exact(key), + (None, true) => query.key_prefix(key), + (Some(author), true) => query.author(author).key_prefix(key), + (Some(author), false) => query.author(author).key_exact(key), + }; + + let mut stream = doc.get_many(query).await?; + while let Some(entry) = stream.try_next().await? { + println!("{}", fmt_entry(blobs, &entry, mode).await); + } + } + Self::Keys { + doc, + prefix, + author, + mode, + sort, + desc, + } => { + let doc = get_doc(docs, env, doc).await?; + let mut query = Query::all(); + if let Some(author) = author { + query = query.author(author); + } + if let Some(prefix) = prefix { + query = query.key_prefix(prefix); + } + let direction = match desc { + true => SortDirection::Desc, + false => SortDirection::Asc, + }; + query = query.sort_by(sort.into(), direction); + let mut stream = doc.get_many(query).await?; + while let Some(entry) = stream.try_next().await? { + println!("{}", fmt_entry(blobs, &entry, mode).await); + } + } + Self::Leave { doc } => { + let doc = get_doc(docs, env, doc).await?; + doc.leave().await?; + println!("Doc {} is now inactive", fmt_short(doc.id())); + } + Self::Import { + doc, + author, + prefix, + path, + in_place, + no_prompt, + } => { + let doc = get_doc(docs, env, doc).await?; + let author = author.unwrap_or(env.author()); + let mut prefix = prefix.unwrap_or_else(|| String::from("")); + + if prefix.ends_with('/') { + prefix.pop(); + } + let root = canonicalize_path(&path)?.canonicalize()?; + let tag = tag_from_file_name(&root)?; + + let root0 = root.clone(); + println!("Preparing import..."); + // get information about the directory or file we are trying to import + // and confirm with the user that they still want to import the file + let PathContent { size, files } = + tokio::task::spawn_blocking(|| path_content_info(root0)).await??; + if !no_prompt { + let prompt = format!("Import {files} files totaling {}?", HumanBytes(size)); + if !Confirm::new() + .with_prompt(prompt) + .interact() + .unwrap_or(false) + { + println!("Aborted."); + return Ok(()); + } else { + print!("\r"); + } + } + + let stream = blobs + .add_from_path( + root.clone(), + in_place, + SetTagOption::Named(tag.clone()), + WrapOption::NoWrap, + ) + .await?; + let root_prefix = match root.parent() { + Some(p) => p.to_path_buf(), + None => PathBuf::new(), + }; + let start = Instant::now(); + import_coordinator(doc, author, root_prefix, prefix, stream, size, files).await?; + println!("Success! ({})", HumanDuration(start.elapsed())); + } + Self::Export { doc, key, out } => { + let doc = get_doc(docs, env, doc).await?; + let key_str = key.clone(); + let key = key.as_bytes().to_vec(); + let path: PathBuf = canonicalize_path(&out)?; + let mut stream = doc.get_many(Query::key_exact(key)).await?; + let entry = match stream.try_next().await? { + None => { + println!(""); + return Ok(()); + } + Some(e) => e, + }; + match blobs.read(entry.content_hash()).await { + Ok(mut content) => { + if let Some(dir) = path.parent() { + if let Err(err) = std::fs::create_dir_all(dir) { + println!( + "", + path.display() + ); + } + }; + let pb = ProgressBar::new(content.size()); + pb.set_style(ProgressStyle::default_bar() + .template("{spinner:.green} [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, eta {eta})").unwrap() + .progress_chars("=>-")); + let file = tokio::fs::File::create(path.clone()).await?; + if let Err(err) = + tokio::io::copy(&mut content, &mut pb.wrap_async_write(file)).await + { + pb.finish_and_clear(); + println!("", path.display()) + } else { + pb.finish_and_clear(); + println!("wrote '{key_str}' to {}", path.display()); + } + } + Err(err) => println!(""), + } + } + Self::Watch { doc } => { + let doc = get_doc(docs, env, doc).await?; + let mut stream = doc.subscribe().await?; + while let Some(event) = stream.next().await { + let event = event?; + match event { + LiveEvent::InsertLocal { entry } => { + println!( + "local change: {}", + fmt_entry(blobs, &entry, DisplayContentMode::Auto).await + ) + } + LiveEvent::InsertRemote { + entry, + from, + content_status, + } => { + let content = match content_status { + ContentStatus::Complete => { + fmt_entry(blobs, &entry, DisplayContentMode::Auto).await + } + ContentStatus::Incomplete => { + let (Ok(content) | Err(content)) = + fmt_content(blobs, &entry, DisplayContentMode::ShortHash) + .await; + format!("", content, human_len(&entry)) + } + ContentStatus::Missing => { + let (Ok(content) | Err(content)) = + fmt_content(blobs, &entry, DisplayContentMode::ShortHash) + .await; + format!("", content, human_len(&entry)) + } + }; + println!( + "remote change via @{}: {}", + fmt_short(from.as_bytes()), + content + ) + } + LiveEvent::ContentReady { hash } => { + println!("content ready: {}", fmt_short(hash.as_bytes())) + } + LiveEvent::SyncFinished(event) => { + let origin = match event.origin { + Origin::Accept => "they initiated", + Origin::Connect(_) => "we initiated", + }; + match event.result { + Ok(details) => { + println!( + "synced peer {} ({origin}, received {}, sent {}", + fmt_short(event.peer), + details.entries_received, + details.entries_sent + ) + } + Err(err) => println!( + "failed to sync with peer {} ({origin}): {err}", + fmt_short(event.peer) + ), + } + } + LiveEvent::NeighborUp(peer) => { + println!("neighbor peer up: {peer:?}"); + } + LiveEvent::NeighborDown(peer) => { + println!("neighbor peer down: {peer:?}"); + } + LiveEvent::PendingContentReady => { + println!("all pending content is now ready") + } + } + } + } + Self::Drop { doc } => { + let doc = get_doc(docs, env, doc).await?; + println!( + "Deleting a document will permanently remove the document secret key, all document entries, \n\ + and all content blobs which are not referenced from other docs or tags." + ); + let prompt = format!("Delete document {}?", fmt_short(doc.id())); + if Confirm::new() + .with_prompt(prompt) + .interact() + .unwrap_or(false) + { + docs.drop_doc(doc.id()).await?; + println!("Doc {} has been deleted.", fmt_short(doc.id())); + } else { + println!("Aborted.") + } + } + Self::DlPolicy(DlPolicyCmd::Set { doc, kind, except }) => { + let doc = get_doc(docs, env, doc).await?; + let download_policy = match kind { + FetchKind::Everything => DownloadPolicy::EverythingExcept(except), + FetchKind::Nothing => DownloadPolicy::NothingExcept(except), + }; + if let Err(e) = doc.set_download_policy(download_policy).await { + println!("Could not set the document's download policy. {e}") + } + } + Self::DlPolicy(DlPolicyCmd::Get { doc }) => { + let doc = get_doc(docs, env, doc).await?; + match doc.get_download_policy().await { + Ok(dl_policy) => { + let (kind, exceptions) = match dl_policy { + DownloadPolicy::NothingExcept(exceptions) => { + (FetchKind::Nothing, exceptions) + } + DownloadPolicy::EverythingExcept(exceptions) => { + (FetchKind::Everything, exceptions) + } + }; + println!("Download {kind} in this document."); + if !exceptions.is_empty() { + println!("Exceptions:"); + for exception in exceptions { + println!("{exception}") + } + } + } + Err(x) => { + println!("Could not get the document's download policy: {x}") + } + } + } + } + Ok(()) + } +} + +/// Gets the document given the client, the environment (and maybe the [`crate::keys::NamespaceId`]). +async fn get_doc( + docs: &docs::Client, + env: &ConsoleEnv, + id: Option, +) -> anyhow::Result { + let doc_id = env.doc(id)?; + docs.open(doc_id).await?.context("Document not found") +} + +/// Formats the content. If an error occurs it's returned in a formatted, friendly way. +async fn fmt_content( + blobs: &blobs::Client, + entry: &Entry, + mode: DisplayContentMode, +) -> Result { + let read_failed = |err: anyhow::Error| format!(""); + let encode_hex = |err: std::string::FromUtf8Error| format!("0x{}", hex::encode(err.as_bytes())); + let as_utf8 = |buf: Vec| String::from_utf8(buf).map(|repr| format!("\"{repr}\"")); + + match mode { + DisplayContentMode::Auto => { + if entry.content_len() < MAX_DISPLAY_CONTENT_LEN { + // small content: read fully as UTF-8 + let bytes = blobs + .read_to_bytes(entry.content_hash()) + .await + .map_err(read_failed)?; + Ok(as_utf8(bytes.into()).unwrap_or_else(encode_hex)) + } else { + // large content: read just the first part as UTF-8 + let mut blob_reader = blobs + .read(entry.content_hash()) + .await + .map_err(read_failed)?; + let mut buf = Vec::with_capacity(MAX_DISPLAY_CONTENT_LEN as usize + 5); + + blob_reader + .read_buf(&mut buf) + .await + .map_err(|io_err| read_failed(io_err.into()))?; + let mut repr = as_utf8(buf).unwrap_or_else(encode_hex); + // let users know this is not shown in full + repr.push_str("..."); + Ok(repr) + } + } + DisplayContentMode::Content => { + // read fully as UTF-8 + let bytes = blobs + .read_to_bytes(entry.content_hash()) + .await + .map_err(read_failed)?; + Ok(as_utf8(bytes.into()).unwrap_or_else(encode_hex)) + } + DisplayContentMode::ShortHash => { + let hash = entry.content_hash(); + Ok(fmt_short(hash.as_bytes())) + } + DisplayContentMode::Hash => { + let hash = entry.content_hash(); + Ok(hash.to_string()) + } + } +} + +/// Converts the [`Entry`] to human-readable bytes. +fn human_len(entry: &Entry) -> HumanBytes { + HumanBytes(entry.content_len()) +} + +/// Formats an entry for display as a `String`. +#[must_use = "this won't be printed, you need to print it yourself"] +async fn fmt_entry(blobs: &blobs::Client, entry: &Entry, mode: DisplayContentMode) -> String { + let key = std::str::from_utf8(entry.key()) + .unwrap_or("") + .bold(); + let author = fmt_short(entry.author()); + let (Ok(content) | Err(content)) = fmt_content(blobs, entry, mode).await; + let len = human_len(entry); + format!("@{author}: {key} = {content} ({len})") +} + +/// Converts a path to a canonical path. +fn canonicalize_path(path: &str) -> anyhow::Result { + let path = PathBuf::from(shellexpand::tilde(&path).to_string()); + Ok(path) +} + +/// Creates a [`Tag`] from a file name (given as a [`Path`]). +fn tag_from_file_name(path: &Path) -> anyhow::Result { + match path.file_name() { + Some(name) => name + .to_os_string() + .into_string() + .map(|t| t.into()) + .map_err(|e| anyhow!("{e:?} contains invalid Unicode")), + None => bail!("the given `path` does not have a proper directory or file name"), + } +} + +/// Takes the `BlobsClient::add_from_path` and coordinates adding blobs to a +/// document via the hash of the blob. It also creates and powers the +/// `ImportProgressBar`. +#[tracing::instrument(skip_all)] +async fn import_coordinator( + doc: Doc, + author_id: AuthorId, + root: PathBuf, + prefix: String, + blob_add_progress: impl Stream> + Send + Unpin + 'static, + expected_size: u64, + expected_entries: u64, +) -> Result<()> { + let imp = ImportProgressBar::new( + &root.display().to_string(), + doc.id(), + expected_size, + expected_entries, + ); + let task_imp = imp.clone(); + + let collections = Rc::new(RefCell::new(BTreeMap::< + u64, + (String, u64, Option, u64), + >::new())); + + let doc2 = doc.clone(); + let imp2 = task_imp.clone(); + + let _stats: Vec<_> = blob_add_progress + .filter_map(|item| { + let item = match item.context("Error adding files") { + Err(e) => return Some(Err(e)), + Ok(item) => item, + }; + match item { + AddProgress::Found { name, id, size } => { + tracing::info!("Found({id},{name},{size})"); + imp.add_found(name.clone(), size); + collections.borrow_mut().insert(id, (name, size, None, 0)); + None + } + AddProgress::Progress { id, offset } => { + tracing::info!("Progress({id}, {offset})"); + if let Some((_, size, _, last_val)) = collections.borrow_mut().get_mut(&id) { + assert!(*last_val <= offset, "wtf"); + assert!(offset <= *size, "wtf2"); + imp.add_progress(offset - *last_val); + *last_val = offset; + } + None + } + AddProgress::Done { hash, id } => { + tracing::info!("Done({id},{hash:?})"); + match collections.borrow_mut().get_mut(&id) { + Some((path_str, size, ref mut h, last_val)) => { + imp.add_progress(*size - *last_val); + imp.import_found(path_str.clone()); + let path = PathBuf::from(path_str.clone()); + *h = Some(hash); + let key = + match path_to_key(path, Some(prefix.clone()), Some(root.clone())) { + Ok(k) => k.to_vec(), + Err(e) => { + tracing::info!( + "error getting key from {}, id {id}", + path_str + ); + return Some(Err(anyhow::anyhow!( + "Issue creating a key for entry {hash:?}: {e}" + ))); + } + }; + // send update to doc + tracing::info!( + "setting entry {} (id: {id}) to doc", + String::from_utf8(key.clone()).unwrap() + ); + Some(Ok((key, hash, *size))) + } + None => { + tracing::info!( + "error: got `AddProgress::Done` for unknown collection id {id}" + ); + Some(Err(anyhow::anyhow!( + "Received progress information on an unknown file." + ))) + } + } + } + AddProgress::AllDone { hash, .. } => { + imp.add_done(); + tracing::info!("AddProgress::AllDone({hash:?})"); + None + } + AddProgress::Abort(e) => { + tracing::info!("Error while adding data: {e}"); + Some(Err(anyhow::anyhow!("Error while adding files: {e}"))) + } + } + }) + .map(move |res| { + let doc = doc2.clone(); + let imp = imp2.clone(); + async move { + match res { + Ok((key, hash, size)) => { + let doc = doc.clone(); + doc.set_hash(author_id, key, hash, size).await?; + imp.import_progress(); + Ok(size) + } + Err(err) => Err(err), + } + } + }) + .buffered_unordered(128) + .try_collect() + .await?; + + task_imp.all_done(); + Ok(()) +} + +/// Progress bar for importing files. +#[derive(Debug, Clone)] +struct ImportProgressBar { + mp: MultiProgress, + import: ProgressBar, + add: ProgressBar, +} + +impl ImportProgressBar { + /// Creates a new import progress bar. + fn new(source: &str, doc_id: NamespaceId, expected_size: u64, expected_entries: u64) -> Self { + let mp = MultiProgress::new(); + let add = mp.add(ProgressBar::new(0)); + add.set_style(ProgressStyle::default_bar() + .template("{msg}\n{spinner:.green} [{bar:40.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, eta {eta})").unwrap() + .progress_chars("=>-")); + add.set_message(format!("Importing from {source}...")); + add.set_length(expected_size); + add.set_position(0); + add.enable_steady_tick(Duration::from_millis(500)); + + let doc_id = fmt_short(doc_id.to_bytes()); + let import = mp.add(ProgressBar::new(0)); + import.set_style(ProgressStyle::default_bar() + .template("{msg}\n{spinner:.green} [{bar:40.cyan/blue}] {pos}/{len} ({per_sec}, eta {eta})").unwrap() + .progress_chars("=>-")); + import.set_message(format!("Adding to doc {doc_id}...")); + import.set_length(expected_entries); + import.set_position(0); + import.enable_steady_tick(Duration::from_millis(500)); + + Self { mp, import, add } + } + + fn add_found(&self, _name: String, _size: u64) {} + + fn import_found(&self, _name: String) {} + + /// Marks having made some progress to the progress bar. + fn add_progress(&self, size: u64) { + self.add.inc(size); + } + + /// Marks having made one unit of progress on the import progress bar. + fn import_progress(&self) { + self.import.inc(1); + } + + /// Sets the `add` progress bar as completed. + fn add_done(&self) { + self.add.set_position(self.add.length().unwrap_or_default()); + } + + /// Sets the all progress bars as done. + fn all_done(self) { + self.mp.clear().ok(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + #[ignore] + #[allow(unused_variables, unreachable_code, clippy::diverging_sub_expression)] + async fn test_doc_import() -> Result<()> { + let temp_dir = tempfile::tempdir().context("tempdir")?; + + tokio::fs::create_dir_all(temp_dir.path()) + .await + .context("create dir all")?; + + let foobar = temp_dir.path().join("foobar"); + tokio::fs::write(foobar, "foobar") + .await + .context("write foobar")?; + let foo = temp_dir.path().join("foo"); + tokio::fs::write(foo, "foo").await.context("write foo")?; + + let data_dir = tempfile::tempdir()?; + + // let node = crate::commands::start::start_node(data_dir.path(), None, None).await?; + // let node = todo!(); + // let client = node.client(); + let docs: docs::Client = todo!(); + let authors = docs.authors(); + let doc = docs.create().await.context("doc create")?; + let author = authors.create().await.context("author create")?; + + // set up command, getting iroh node + let cli = ConsoleEnv::for_console(data_dir.path().to_owned(), &authors) + .await + .context("ConsoleEnv")?; + // let iroh = iroh::client::Iroh::connect_path(data_dir.path()) + // .await + // .context("rpc connect")?; + // let iroh = todo!(); + let docs = todo!(); + let blobs = todo!(); + + let command = DocCommands::Import { + doc: Some(doc.id()), + author: Some(author), + prefix: None, + path: temp_dir.path().to_string_lossy().into(), + in_place: false, + no_prompt: true, + }; + + command + .run(&docs, &blobs, &cli) + .await + .context("DocCommands run")?; + + let keys: Vec<_> = doc + .get_many(Query::all()) + .await + .context("doc get many")? + .try_collect() + .await?; + assert_eq!(2, keys.len()); + + // todo + // iroh.shutdown(false).await?; + Ok(()) + } +} diff --git a/src/cli/authors.rs b/src/cli/authors.rs new file mode 100644 index 0000000..40e6ec5 --- /dev/null +++ b/src/cli/authors.rs @@ -0,0 +1,104 @@ +//! Define the commands to manage authors. + +use anyhow::{bail, Result}; +use clap::Parser; +use derive_more::FromStr; +use futures_lite::StreamExt; +use iroh_base::base32::fmt_short; + +use super::{AuthorsClient, ConsoleEnv}; +use crate::{Author, AuthorId}; + +#[allow(missing_docs)] +/// Commands to manage authors. +#[derive(Debug, Clone, Parser)] +pub enum AuthorCommands { + /// Set the active author (Note: only works within the Iroh console). + Switch { author: AuthorId }, + /// Create a new author. + Create { + /// Switch to the created author (Note: only works in the Iroh console). + #[clap(long)] + switch: bool, + }, + /// Delete an author. + Delete { author: AuthorId }, + /// Export an author. + Export { author: AuthorId }, + /// Import an author. + Import { author: String }, + /// Print the default author for this node. + Default { + /// Switch to the default author (Note: only works in the Iroh console). + #[clap(long)] + switch: bool, + }, + /// List authors. + #[clap(alias = "ls")] + List, +} + +impl AuthorCommands { + /// Runs the author command given an iroh client and console environment. + pub async fn run(self, authors: &AuthorsClient, env: &ConsoleEnv) -> Result<()> { + match self { + Self::Switch { author } => { + env.set_author(author)?; + println!("Active author is now {}", fmt_short(author.as_bytes())); + } + Self::List => { + let mut stream = authors.list().await?; + while let Some(author_id) = stream.try_next().await? { + println!("{}", author_id); + } + } + Self::Default { switch } => { + if switch && !env.is_console() { + bail!("The --switch flag is only supported within the Iroh console."); + } + let author_id = authors.default().await?; + println!("{}", author_id); + if switch { + env.set_author(author_id)?; + println!("Active author is now {}", fmt_short(author_id.as_bytes())); + } + } + Self::Create { switch } => { + if switch && !env.is_console() { + bail!("The --switch flag is only supported within the Iroh console."); + } + + let author_id = authors.create().await?; + println!("{}", author_id); + + if switch { + env.set_author(author_id)?; + println!("Active author is now {}", fmt_short(author_id.as_bytes())); + } + } + Self::Delete { author } => { + authors.delete(author).await?; + println!("Deleted author {}", fmt_short(author.as_bytes())); + } + Self::Export { author } => match authors.export(author).await? { + Some(author) => { + println!("{}", author); + } + None => { + println!("No author found {}", fmt_short(author)); + } + }, + Self::Import { author } => match Author::from_str(&author) { + Ok(author) => { + let id = author.id(); + authors.import(author).await?; + println!("Imported {}", fmt_short(id)); + } + Err(err) => { + eprintln!("Invalid author key: {}", err); + } + }, + } + Ok(()) + } +} diff --git a/src/engine.rs b/src/engine.rs index c7ca64b..a91992f 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -11,9 +11,12 @@ use std::{ use anyhow::{bail, Context, Result}; use futures_lite::{Stream, StreamExt}; -use iroh_blobs::{downloader::Downloader, store::EntryStatus, Hash}; +use iroh::{key::PublicKey, Endpoint, NodeAddr}; +use iroh_blobs::{ + downloader::Downloader, net_protocol::ProtectCb, store::EntryStatus, + util::local_pool::LocalPoolHandle, Hash, +}; use iroh_gossip::net::Gossip; -use iroh_net::{key::PublicKey, Endpoint, NodeAddr}; use serde::{Deserialize, Serialize}; use tokio::sync::{mpsc, oneshot}; use tokio_util::task::AbortOnDropHandle; @@ -40,7 +43,7 @@ const SUBSCRIBE_CHANNEL_CAP: usize = 256; /// The sync engine coordinates actors that manage open documents, set-reconciliation syncs with /// peers and a gossip swarm for each syncing document. #[derive(derive_more::Debug, Clone)] -pub struct Engine { +pub struct Engine { /// [`Endpoint`] used by the engine. pub endpoint: Endpoint, /// Handle to the actor thread. @@ -52,20 +55,25 @@ pub struct Engine { actor_handle: Arc>, #[debug("ContentStatusCallback")] content_status_cb: ContentStatusCallback, + local_pool_handle: LocalPoolHandle, + blob_store: D, + #[cfg(feature = "rpc")] + pub(crate) rpc_handler: Arc>, } -impl Engine { +impl Engine { /// Start the sync engine. /// /// This will spawn two tokio tasks for the live sync coordination and gossip actors, and a /// thread for the [`crate::actor::SyncHandle`]. - pub async fn spawn( + pub async fn spawn( endpoint: Endpoint, gossip: Gossip, replica_store: crate::store::Store, - bao_store: B, + bao_store: D, downloader: Downloader, default_author_storage: DefaultAuthorStorage, + local_pool_handle: LocalPoolHandle, ) -> anyhow::Result { let (live_actor_tx, to_live_actor_recv) = mpsc::channel(ACTOR_CHANNEL_CAP); let me = endpoint.node_id().fmt_short(); @@ -80,7 +88,7 @@ impl Engine { sync.clone(), endpoint.clone(), gossip.clone(), - bao_store, + bao_store.clone(), downloader, to_live_actor_recv, live_actor_tx.clone(), @@ -111,9 +119,46 @@ impl Engine { actor_handle: Arc::new(AbortOnDropHandle::new(actor_handle)), content_status_cb, default_author: Arc::new(default_author), + local_pool_handle, + blob_store: bao_store, + #[cfg(feature = "rpc")] + rpc_handler: Default::default(), + }) + } + + /// Return a callback that can be added to blobs to protect the content of + /// all docs from garbage collection. + pub fn protect_cb(&self) -> ProtectCb { + let this = self.clone(); + Box::new(move |live| { + let this = this.clone(); + Box::pin(async move { + let doc_hashes = match this.sync.content_hashes().await { + Ok(hashes) => hashes, + Err(err) => { + tracing::warn!("Error getting doc hashes: {}", err); + return; + } + }; + for hash in doc_hashes { + match hash { + Ok(hash) => { + live.insert(hash); + } + Err(err) => { + tracing::error!("Error getting doc hash: {}", err); + } + } + } + }) }) } + /// Get the blob store. + pub fn blob_store(&self) -> &D { + &self.blob_store + } + /// Start to sync a document. /// /// If `peers` is non-empty, it will both do an initial set-reconciliation sync with each peer, @@ -186,10 +231,7 @@ impl Engine { } /// Handle an incoming iroh-docs connection. - pub async fn handle_connection( - &self, - conn: iroh_net::endpoint::Connecting, - ) -> anyhow::Result<()> { + pub async fn handle_connection(&self, conn: iroh::endpoint::Connecting) -> anyhow::Result<()> { self.to_live_actor .send(ToLiveActor::HandleConnection { conn }) .await?; @@ -205,6 +247,10 @@ impl Engine { reply_rx.await?; Ok(()) } + + pub(crate) fn local_pool_handle(&self) -> &LocalPoolHandle { + &self.local_pool_handle + } } /// Converts an [`EntryStatus`] into a ['ContentStatus']. diff --git a/src/engine/gossip.rs b/src/engine/gossip.rs index ff98931..b922e03 100644 --- a/src/engine/gossip.rs +++ b/src/engine/gossip.rs @@ -4,8 +4,8 @@ use anyhow::{Context, Result}; use bytes::Bytes; use futures_lite::StreamExt; use futures_util::FutureExt; +use iroh::NodeId; use iroh_gossip::net::{Event, Gossip, GossipEvent, GossipReceiver, GossipSender, JoinOptions}; -use iroh_net::NodeId; use tokio::{ sync::mpsc, task::{AbortHandle, JoinSet}, diff --git a/src/engine/live.rs b/src/engine/live.rs index 100ed0a..8884995 100644 --- a/src/engine/live.rs +++ b/src/engine/live.rs @@ -7,6 +7,7 @@ use std::{ use anyhow::{Context, Result}; use futures_lite::FutureExt; +use iroh::{key::PublicKey, Endpoint, NodeAddr, NodeId}; use iroh_blobs::{ downloader::{DownloadError, DownloadRequest, Downloader}, get::Stats, @@ -15,7 +16,6 @@ use iroh_blobs::{ }; use iroh_gossip::net::Gossip; use iroh_metrics::inc; -use iroh_net::{key::PublicKey, Endpoint, NodeAddr, NodeId}; use serde::{Deserialize, Serialize}; use tokio::{ sync::{self, mpsc, oneshot}, @@ -86,7 +86,7 @@ pub enum ToLiveActor { reply: sync::oneshot::Sender>, }, HandleConnection { - conn: iroh_net::endpoint::Connecting, + conn: iroh::endpoint::Connecting, }, AcceptSyncRequest { namespace: NamespaceId, @@ -759,7 +759,7 @@ impl LiveActor { } #[instrument("accept", skip_all)] - pub async fn handle_connection(&mut self, conn: iroh_net::endpoint::Connecting) { + pub async fn handle_connection(&mut self, conn: iroh::endpoint::Connecting) { let to_actor_tx = self.sync_actor_tx.clone(); let accept_request_cb = move |namespace, peer| { let to_actor_tx = to_actor_tx.clone(); diff --git a/src/engine/state.rs b/src/engine/state.rs index 83dc4ef..bb10dd8 100644 --- a/src/engine/state.rs +++ b/src/engine/state.rs @@ -4,7 +4,7 @@ use std::{ }; use anyhow::Result; -use iroh_net::NodeId; +use iroh::NodeId; use serde::{Deserialize, Serialize}; use tracing::{debug, warn}; diff --git a/src/lib.rs b/src/lib.rs index b7bac5f..f5ee94d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -38,6 +38,9 @@ pub mod metrics; #[cfg(feature = "net")] #[cfg_attr(iroh_docsrs, doc(cfg(feature = "net")))] pub mod net; +#[cfg(feature = "engine")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "engine")))] +pub mod protocol; #[cfg(feature = "net")] #[cfg_attr(iroh_docsrs, doc(cfg(feature = "net")))] mod ticket; @@ -45,6 +48,13 @@ mod ticket; #[cfg(feature = "engine")] #[cfg_attr(iroh_docsrs, doc(cfg(feature = "engine")))] pub mod engine; +#[cfg(feature = "rpc")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "rpc")))] +pub mod rpc; + +#[cfg(feature = "cli")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "cli")))] +pub mod cli; pub mod actor; pub mod store; @@ -54,6 +64,11 @@ mod heads; mod keys; mod ranger; +#[cfg(feature = "net")] +#[cfg_attr(iroh_docsrs, doc(cfg(feature = "net")))] +#[doc(inline)] +pub use net::ALPN; + #[cfg(feature = "net")] #[cfg_attr(iroh_docsrs, doc(cfg(feature = "net")))] pub use self::ticket::DocTicket; diff --git a/src/net.rs b/src/net.rs index 42353c3..8d15e37 100644 --- a/src/net.rs +++ b/src/net.rs @@ -5,9 +5,9 @@ use std::{ time::{Duration, Instant}, }; +use iroh::{endpoint::get_remote_node_id, key::PublicKey, Endpoint, NodeAddr}; #[cfg(feature = "metrics")] use iroh_metrics::inc; -use iroh_net::{endpoint::get_remote_node_id, key::PublicKey, Endpoint, NodeAddr}; use serde::{Deserialize, Serialize}; use tracing::{debug, error_span, trace, Instrument}; @@ -20,7 +20,7 @@ use crate::{ }; /// The ALPN identifier for the iroh-docs protocol -pub const DOCS_ALPN: &[u8] = b"/iroh-sync/1"; +pub const ALPN: &[u8] = b"/iroh-sync/1"; mod codec; @@ -35,7 +35,7 @@ pub async fn connect_and_sync( let peer_id = peer.node_id; trace!("connect"); let connection = endpoint - .connect(peer, DOCS_ALPN) + .connect(peer, crate::ALPN) .await .map_err(ConnectError::connect)?; @@ -106,7 +106,7 @@ pub enum AcceptOutcome { /// Handle an iroh-docs connection and sync all shared documents in the replica store. pub async fn handle_connection( sync: SyncHandle, - connecting: iroh_net::endpoint::Connecting, + connecting: iroh::endpoint::Connecting, accept_cb: F, ) -> Result where diff --git a/src/net/codec.rs b/src/net/codec.rs index e4e164d..5a25fe7 100644 --- a/src/net/codec.rs +++ b/src/net/codec.rs @@ -3,7 +3,7 @@ use std::future::Future; use anyhow::{anyhow, ensure}; use bytes::{Buf, BufMut, BytesMut}; use futures_util::SinkExt; -use iroh_net::key::PublicKey; +use iroh::key::PublicKey; use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_stream::StreamExt; @@ -294,8 +294,8 @@ impl BobState { #[cfg(test)] mod tests { use anyhow::Result; + use iroh::key::SecretKey; use iroh_base::hash::Hash; - use iroh_net::key::SecretKey; use rand_core::{CryptoRngCore, SeedableRng}; use super::*; diff --git a/src/protocol.rs b/src/protocol.rs new file mode 100644 index 0000000..ed9e4ea --- /dev/null +++ b/src/protocol.rs @@ -0,0 +1,23 @@ +//! [`ProtocolHandler`] implementation for the docs [`Engine`]. + +use std::sync::Arc; + +use anyhow::Result; +use futures_lite::future::Boxed as BoxedFuture; +use iroh::{endpoint::Connecting, protocol::ProtocolHandler}; + +use crate::engine::Engine; + +impl ProtocolHandler for Engine { + fn accept(self: Arc, conn: Connecting) -> BoxedFuture> { + Box::pin(async move { self.handle_connection(conn).await }) + } + + fn shutdown(self: Arc) -> BoxedFuture<()> { + Box::pin(async move { + if let Err(err) = (*self).shutdown().await { + tracing::warn!("shutdown error: {:?}", err); + } + }) + } +} diff --git a/src/ranger.rs b/src/ranger.rs index 95b6967..3738511 100644 --- a/src/ranger.rs +++ b/src/ranger.rs @@ -1,6 +1,5 @@ //! Implementation of Set Reconcilliation based on //! "Range-Based Set Reconciliation" by Aljoscha Meyer. -//! use std::{cmp::Ordering, fmt::Debug}; @@ -878,7 +877,7 @@ mod tests { Prefix(K), } - impl<'a, K, V> Iterator for SimpleRangeIterator<'a, K, V> + impl Iterator for SimpleRangeIterator<'_, K, V> where K: RangeKey + Default, V: Clone, diff --git a/src/rpc.rs b/src/rpc.rs new file mode 100644 index 0000000..d1e974a --- /dev/null +++ b/src/rpc.rs @@ -0,0 +1,98 @@ +//! Quic RPC implementation for docs. + +use proto::{Request, RpcService}; +use quic_rpc::{ + server::{ChannelTypes, RpcChannel}, + RpcClient, RpcServer, +}; +use tokio_util::task::AbortOnDropHandle; + +use crate::engine::Engine; + +pub mod client; +pub mod proto; + +mod docs_handle_request; + +type RpcError = serde_error::Error; +type RpcResult = std::result::Result; + +impl Engine { + /// Get an in memory client to interact with the docs engine. + pub fn client(&self) -> &client::docs::MemClient { + &self + .rpc_handler + .get_or_init(|| RpcHandler::new(self)) + .client + } + + /// Handle a docs request from the RPC server. + pub async fn handle_rpc_request>( + self, + msg: Request, + chan: RpcChannel, + ) -> Result<(), quic_rpc::server::RpcServerError> { + use Request::*; + let this = self; + match msg { + Open(msg) => chan.rpc(msg, this, Self::doc_open).await, + Close(msg) => chan.rpc(msg, this, Self::doc_close).await, + Status(msg) => chan.rpc(msg, this, Self::doc_status).await, + List(msg) => chan.server_streaming(msg, this, Self::doc_list).await, + Create(msg) => chan.rpc(msg, this, Self::doc_create).await, + Drop(msg) => chan.rpc(msg, this, Self::doc_drop).await, + Import(msg) => chan.rpc(msg, this, Self::doc_import).await, + Set(msg) => chan.rpc(msg, this, Self::doc_set).await, + ImportFile(msg) => { + chan.server_streaming(msg, this, Self::doc_import_file) + .await + } + ExportFile(msg) => { + chan.server_streaming(msg, this, Self::doc_export_file) + .await + } + Del(msg) => chan.rpc(msg, this, Self::doc_del).await, + SetHash(msg) => chan.rpc(msg, this, Self::doc_set_hash).await, + Get(msg) => chan.server_streaming(msg, this, Self::doc_get_many).await, + GetExact(msg) => chan.rpc(msg, this, Self::doc_get_exact).await, + StartSync(msg) => chan.rpc(msg, this, Self::doc_start_sync).await, + Leave(msg) => chan.rpc(msg, this, Self::doc_leave).await, + Share(msg) => chan.rpc(msg, this, Self::doc_share).await, + Subscribe(msg) => { + chan.try_server_streaming(msg, this, Self::doc_subscribe) + .await + } + SetDownloadPolicy(msg) => chan.rpc(msg, this, Self::doc_set_download_policy).await, + GetDownloadPolicy(msg) => chan.rpc(msg, this, Self::doc_get_download_policy).await, + GetSyncPeers(msg) => chan.rpc(msg, this, Self::doc_get_sync_peers).await, + + AuthorList(msg) => chan.server_streaming(msg, this, Self::author_list).await, + AuthorCreate(msg) => chan.rpc(msg, this, Self::author_create).await, + AuthorImport(msg) => chan.rpc(msg, this, Self::author_import).await, + AuthorExport(msg) => chan.rpc(msg, this, Self::author_export).await, + AuthorDelete(msg) => chan.rpc(msg, this, Self::author_delete).await, + AuthorGetDefault(msg) => chan.rpc(msg, this, Self::author_default).await, + AuthorSetDefault(msg) => chan.rpc(msg, this, Self::author_set_default).await, + } + } +} + +#[derive(Debug)] +pub(crate) struct RpcHandler { + /// Client to hand out + client: client::docs::MemClient, + /// Handler task + _handler: AbortOnDropHandle<()>, +} + +impl RpcHandler { + fn new(engine: &Engine) -> Self { + let engine = engine.clone(); + let (listener, connector) = quic_rpc::transport::flume::channel(1); + let listener = RpcServer::new(listener); + let client = client::docs::MemClient::new(RpcClient::new(connector)); + let _handler = listener + .spawn_accept_loop(move |req, chan| engine.clone().handle_rpc_request(req, chan)); + Self { client, _handler } + } +} diff --git a/src/rpc/client.rs b/src/rpc/client.rs new file mode 100644 index 0000000..90a0ad7 --- /dev/null +++ b/src/rpc/client.rs @@ -0,0 +1,20 @@ +//! RPC Client for docs and authors +use anyhow::Result; +use futures_util::{Stream, StreamExt}; + +pub mod authors; +pub mod docs; + +fn flatten( + s: impl Stream, E2>>, +) -> impl Stream> +where + E1: std::error::Error + Send + Sync + 'static, + E2: std::error::Error + Send + Sync + 'static, +{ + s.map(|res| match res { + Ok(Ok(res)) => Ok(res), + Ok(Err(err)) => Err(err.into()), + Err(err) => Err(err.into()), + }) +} diff --git a/src/rpc/client/authors.rs b/src/rpc/client/authors.rs new file mode 100644 index 0000000..18a154f --- /dev/null +++ b/src/rpc/client/authors.rs @@ -0,0 +1,101 @@ +//! API for document management. +//! +//! The main entry point is the [`Client`]. + +use anyhow::Result; +use futures_lite::{Stream, StreamExt}; +use quic_rpc::{client::BoxedConnector, Connector}; + +use super::flatten; +#[doc(inline)] +pub use crate::engine::{Origin, SyncEvent, SyncReason}; +use crate::{ + rpc::proto::{ + AuthorCreateRequest, AuthorDeleteRequest, AuthorExportRequest, AuthorGetDefaultRequest, + AuthorImportRequest, AuthorListRequest, AuthorSetDefaultRequest, RpcService, + }, + Author, AuthorId, +}; + +/// Iroh docs client. +#[derive(Debug, Clone)] +#[repr(transparent)] +pub struct Client> { + pub(super) rpc: quic_rpc::RpcClient, +} + +impl> Client { + /// Creates a new docs client. + pub fn new(rpc: quic_rpc::RpcClient) -> Self { + Self { rpc } + } + + /// Creates a new document author. + /// + /// You likely want to save the returned [`AuthorId`] somewhere so that you can use this author + /// again. + /// + /// If you need only a single author, use [`Self::default`]. + pub async fn create(&self) -> Result { + let res = self.rpc.rpc(AuthorCreateRequest).await??; + Ok(res.author_id) + } + + /// Returns the default document author of this node. + /// + /// On persistent nodes, the author is created on first start and its public key is saved + /// in the data directory. + /// + /// The default author can be set with [`Self::set_default`]. + pub async fn default(&self) -> Result { + let res = self.rpc.rpc(AuthorGetDefaultRequest).await??; + Ok(res.author_id) + } + + /// Sets the node-wide default author. + /// + /// If the author does not exist, an error is returned. + /// + /// On a persistent node, the author id will be saved to a file in the data directory and + /// reloaded after a restart. + pub async fn set_default(&self, author_id: AuthorId) -> Result<()> { + self.rpc + .rpc(AuthorSetDefaultRequest { author_id }) + .await??; + Ok(()) + } + + /// Lists document authors for which we have a secret key. + /// + /// It's only possible to create writes from authors that we have the secret key of. + pub async fn list(&self) -> Result>> { + let stream = self.rpc.server_streaming(AuthorListRequest {}).await?; + Ok(flatten(stream).map(|res| res.map(|res| res.author_id))) + } + + /// Exports the given author. + /// + /// Warning: The [`Author`] struct contains sensitive data. + pub async fn export(&self, author: AuthorId) -> Result> { + let res = self.rpc.rpc(AuthorExportRequest { author }).await??; + Ok(res.author) + } + + /// Imports the given author. + /// + /// Warning: The [`Author`] struct contains sensitive data. + pub async fn import(&self, author: Author) -> Result<()> { + self.rpc.rpc(AuthorImportRequest { author }).await??; + Ok(()) + } + + /// Deletes the given author by id. + /// + /// Warning: This permanently removes this author. + /// + /// Returns an error if attempting to delete the default author. + pub async fn delete(&self, author: AuthorId) -> Result<()> { + self.rpc.rpc(AuthorDeleteRequest { author }).await??; + Ok(()) + } +} diff --git a/src/rpc/client/docs.rs b/src/rpc/client/docs.rs new file mode 100644 index 0000000..7d6fe5f --- /dev/null +++ b/src/rpc/client/docs.rs @@ -0,0 +1,636 @@ +//! API for document management. +//! +//! The main entry point is the [`Client`]. +use std::{ + path::{Path, PathBuf}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use anyhow::{anyhow, Context as _, Result}; +use bytes::Bytes; +use derive_more::{Display, FromStr}; +use futures_lite::{Stream, StreamExt}; +use iroh::NodeAddr; +use iroh_base::node_addr::AddrInfoOptions; +use iroh_blobs::{export::ExportProgress, store::ExportMode, Hash}; +use portable_atomic::{AtomicBool, Ordering}; +use quic_rpc::{ + client::BoxedConnector, message::RpcMsg, transport::flume::FlumeConnector, Connector, +}; +use serde::{Deserialize, Serialize}; + +use super::{authors, flatten}; +use crate::{ + actor::OpenState, + rpc::proto::{ + CloseRequest, CreateRequest, DelRequest, DelResponse, DocListRequest, DocSubscribeRequest, + DropRequest, ExportFileRequest, GetDownloadPolicyRequest, GetExactRequest, GetManyRequest, + GetSyncPeersRequest, ImportFileRequest, ImportRequest, LeaveRequest, OpenRequest, + RpcService, SetDownloadPolicyRequest, SetHashRequest, SetRequest, ShareRequest, + StartSyncRequest, StatusRequest, + }, + store::{DownloadPolicy, Query}, + AuthorId, Capability, CapabilityKind, DocTicket, NamespaceId, PeerIdBytes, +}; +#[doc(inline)] +pub use crate::{ + engine::{LiveEvent, Origin, SyncEvent, SyncReason}, + Entry, +}; + +/// Type alias for a memory-backed client. +pub type MemClient = + Client>; + +/// Iroh docs client. +#[derive(Debug, Clone)] +#[repr(transparent)] +pub struct Client> { + pub(super) rpc: quic_rpc::RpcClient, +} + +impl> Client { + /// Creates a new docs client. + pub fn new(rpc: quic_rpc::RpcClient) -> Self { + Self { rpc } + } + + /// Returns an authors client. + pub fn authors(&self) -> authors::Client { + authors::Client::new(self.rpc.clone()) + } + + /// Creates a client. + pub async fn create(&self) -> Result> { + let res = self.rpc.rpc(CreateRequest {}).await??; + let doc = Doc::new(self.rpc.clone(), res.id); + Ok(doc) + } + + /// Deletes a document from the local node. + /// + /// This is a destructive operation. Both the document secret key and all entries in the + /// document will be permanently deleted from the node's storage. Content blobs will be deleted + /// through garbage collection unless they are referenced from another document or tag. + pub async fn drop_doc(&self, doc_id: NamespaceId) -> Result<()> { + self.rpc.rpc(DropRequest { doc_id }).await??; + Ok(()) + } + + /// Imports a document from a namespace capability. + /// + /// This does not start sync automatically. Use [`Doc::start_sync`] to start sync. + pub async fn import_namespace(&self, capability: Capability) -> Result> { + let res = self.rpc.rpc(ImportRequest { capability }).await??; + let doc = Doc::new(self.rpc.clone(), res.doc_id); + Ok(doc) + } + + /// Imports a document from a ticket and joins all peers in the ticket. + pub async fn import(&self, ticket: DocTicket) -> Result> { + let DocTicket { capability, nodes } = ticket; + let doc = self.import_namespace(capability).await?; + doc.start_sync(nodes).await?; + Ok(doc) + } + + /// Imports a document from a ticket, creates a subscription stream and joins all peers in the ticket. + /// + /// Returns the [`Doc`] and a [`Stream`] of [`LiveEvent`]s. + /// + /// The subscription stream is created before the sync is started, so the first call to this + /// method after starting the node is guaranteed to not miss any sync events. + pub async fn import_and_subscribe( + &self, + ticket: DocTicket, + ) -> Result<(Doc, impl Stream>)> { + let DocTicket { capability, nodes } = ticket; + let res = self.rpc.rpc(ImportRequest { capability }).await??; + let doc = Doc::new(self.rpc.clone(), res.doc_id); + let events = doc.subscribe().await?; + doc.start_sync(nodes).await?; + Ok((doc, events)) + } + + /// Lists all documents. + pub async fn list(&self) -> Result>> { + let stream = self.rpc.server_streaming(DocListRequest {}).await?; + Ok(flatten(stream).map(|res| res.map(|res| (res.id, res.capability)))) + } + + /// Returns a [`Doc`] client for a single document. + /// + /// Returns None if the document cannot be found. + pub async fn open(&self, id: NamespaceId) -> Result>> { + self.rpc.rpc(OpenRequest { doc_id: id }).await??; + let doc = Doc::new(self.rpc.clone(), id); + Ok(Some(doc)) + } +} + +/// Document handle +#[derive(Debug, Clone)] +pub struct Doc = BoxedConnector>(Arc>) +where + C: quic_rpc::Connector; + +impl> PartialEq for Doc { + fn eq(&self, other: &Self) -> bool { + self.0.id == other.0.id + } +} + +impl> Eq for Doc {} + +#[derive(Debug)] +struct DocInner = BoxedConnector> { + id: NamespaceId, + rpc: quic_rpc::RpcClient, + closed: AtomicBool, + rt: tokio::runtime::Handle, +} + +impl Drop for DocInner +where + C: quic_rpc::Connector, +{ + fn drop(&mut self) { + let doc_id = self.id; + let rpc = self.rpc.clone(); + if !self.closed.swap(true, Ordering::Relaxed) { + self.rt.spawn(async move { + rpc.rpc(CloseRequest { doc_id }).await.ok(); + }); + } + } +} + +impl> Doc { + fn new(rpc: quic_rpc::RpcClient, id: NamespaceId) -> Self { + Self(Arc::new(DocInner { + rpc, + id, + closed: AtomicBool::new(false), + rt: tokio::runtime::Handle::current(), + })) + } + + async fn rpc(&self, msg: M) -> Result + where + M: RpcMsg, + { + let res = self.0.rpc.rpc(msg).await?; + Ok(res) + } + + /// Returns the document id of this doc. + pub fn id(&self) -> NamespaceId { + self.0.id + } + + /// Closes the document. + pub async fn close(&self) -> Result<()> { + if !self.0.closed.swap(true, Ordering::Relaxed) { + self.rpc(CloseRequest { doc_id: self.id() }).await??; + } + Ok(()) + } + + fn ensure_open(&self) -> Result<()> { + if self.0.closed.load(Ordering::Relaxed) { + Err(anyhow!("document is closed")) + } else { + Ok(()) + } + } + + /// Sets the content of a key to a byte array. + pub async fn set_bytes( + &self, + author_id: AuthorId, + key: impl Into, + value: impl Into, + ) -> Result { + self.ensure_open()?; + let res = self + .rpc(SetRequest { + doc_id: self.id(), + author_id, + key: key.into(), + value: value.into(), + }) + .await??; + Ok(res.entry.content_hash()) + } + + /// Sets an entries on the doc via its key, hash, and size. + pub async fn set_hash( + &self, + author_id: AuthorId, + key: impl Into, + hash: Hash, + size: u64, + ) -> Result<()> { + self.ensure_open()?; + self.rpc(SetHashRequest { + doc_id: self.id(), + author_id, + key: key.into(), + hash, + size, + }) + .await??; + Ok(()) + } + + /// Adds an entry from an absolute file path + pub async fn import_file( + &self, + author: AuthorId, + key: Bytes, + path: impl AsRef, + in_place: bool, + ) -> Result { + self.ensure_open()?; + let stream = self + .0 + .rpc + .server_streaming(ImportFileRequest { + doc_id: self.id(), + author_id: author, + path: path.as_ref().into(), + key, + in_place, + }) + .await?; + Ok(ImportFileProgress::new(stream)) + } + + /// Exports an entry as a file to a given absolute path. + pub async fn export_file( + &self, + entry: Entry, + path: impl AsRef, + mode: ExportMode, + ) -> Result { + self.ensure_open()?; + let stream = self + .0 + .rpc + .server_streaming(ExportFileRequest { + entry, + path: path.as_ref().into(), + mode, + }) + .await?; + Ok(ExportFileProgress::new(stream)) + } + + /// Deletes entries that match the given `author` and key `prefix`. + /// + /// This inserts an empty entry with the key set to `prefix`, effectively clearing all other + /// entries whose key starts with or is equal to the given `prefix`. + /// + /// Returns the number of entries deleted. + pub async fn del(&self, author_id: AuthorId, prefix: impl Into) -> Result { + self.ensure_open()?; + let res = self + .rpc(DelRequest { + doc_id: self.id(), + author_id, + prefix: prefix.into(), + }) + .await??; + let DelResponse { removed } = res; + Ok(removed) + } + + /// Returns an entry for a key and author. + /// + /// Optionally also returns the entry unless it is empty (i.e. a deletion marker). + pub async fn get_exact( + &self, + author: AuthorId, + key: impl AsRef<[u8]>, + include_empty: bool, + ) -> Result> { + self.ensure_open()?; + let res = self + .rpc(GetExactRequest { + author, + key: key.as_ref().to_vec().into(), + doc_id: self.id(), + include_empty, + }) + .await??; + Ok(res.entry.map(|entry| entry.into())) + } + + /// Returns all entries matching the query. + pub async fn get_many( + &self, + query: impl Into, + ) -> Result>> { + self.ensure_open()?; + let stream = self + .0 + .rpc + .server_streaming(GetManyRequest { + doc_id: self.id(), + query: query.into(), + }) + .await?; + Ok(flatten(stream).map(|res| res.map(|res| res.entry.into()))) + } + + /// Returns a single entry. + pub async fn get_one(&self, query: impl Into) -> Result> { + self.get_many(query).await?.next().await.transpose() + } + + /// Shares this document with peers over a ticket. + pub async fn share( + &self, + mode: ShareMode, + addr_options: AddrInfoOptions, + ) -> anyhow::Result { + self.ensure_open()?; + let res = self + .rpc(ShareRequest { + doc_id: self.id(), + mode, + addr_options, + }) + .await??; + Ok(res.0) + } + + /// Starts to sync this document with a list of peers. + pub async fn start_sync(&self, peers: Vec) -> Result<()> { + self.ensure_open()?; + let _res = self + .rpc(StartSyncRequest { + doc_id: self.id(), + peers, + }) + .await??; + Ok(()) + } + + /// Stops the live sync for this document. + pub async fn leave(&self) -> Result<()> { + self.ensure_open()?; + let _res = self.rpc(LeaveRequest { doc_id: self.id() }).await??; + Ok(()) + } + + /// Subscribes to events for this document. + pub async fn subscribe(&self) -> anyhow::Result>> { + self.ensure_open()?; + let stream = self + .0 + .rpc + .try_server_streaming(DocSubscribeRequest { doc_id: self.id() }) + .await?; + Ok(stream.map(|res| match res { + Ok(res) => Ok(res.event), + Err(err) => Err(err.into()), + })) + } + + /// Returns status info for this document + pub async fn status(&self) -> anyhow::Result { + self.ensure_open()?; + let res = self.rpc(StatusRequest { doc_id: self.id() }).await??; + Ok(res.status) + } + + /// Sets the download policy for this document + pub async fn set_download_policy(&self, policy: DownloadPolicy) -> Result<()> { + self.rpc(SetDownloadPolicyRequest { + doc_id: self.id(), + policy, + }) + .await??; + Ok(()) + } + + /// Returns the download policy for this document + pub async fn get_download_policy(&self) -> Result { + let res = self + .rpc(GetDownloadPolicyRequest { doc_id: self.id() }) + .await??; + Ok(res.policy) + } + + /// Returns sync peers for this document + pub async fn get_sync_peers(&self) -> Result>> { + let res = self + .rpc(GetSyncPeersRequest { doc_id: self.id() }) + .await??; + Ok(res.peers) + } +} + +impl<'a, C> From<&'a Doc> for &'a quic_rpc::RpcClient +where + C: quic_rpc::Connector, +{ + fn from(doc: &'a Doc) -> &'a quic_rpc::RpcClient { + &doc.0.rpc + } +} + +/// Progress messages for an doc import operation +/// +/// An import operation involves computing the outboard of a file, and then +/// either copying or moving the file into the database, then setting the author, hash, size, and tag of that +/// file as an entry in the doc. +#[derive(Debug, Serialize, Deserialize)] +pub enum ImportProgress { + /// An item was found with name `name`, from now on referred to via `id`. + Found { + /// A new unique id for this entry. + id: u64, + /// The name of the entry. + name: String, + /// The size of the entry in bytes. + size: u64, + }, + /// We got progress ingesting item `id`. + Progress { + /// The unique id of the entry. + id: u64, + /// The offset of the progress, in bytes. + offset: u64, + }, + /// We are done adding `id` to the data store and the hash is `hash`. + IngestDone { + /// The unique id of the entry. + id: u64, + /// The hash of the entry. + hash: Hash, + }, + /// We are done setting the entry to the doc. + AllDone { + /// The key of the entry + key: Bytes, + }, + /// We got an error and need to abort. + /// + /// This will be the last message in the stream. + Abort(serde_error::Error), +} + +/// Intended capability for document share tickets +#[derive(Serialize, Deserialize, Debug, Clone, Display, FromStr)] +pub enum ShareMode { + /// Read-only access + Read, + /// Write access + Write, +} +/// Progress stream for [`Doc::import_file`]. +#[derive(derive_more::Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct ImportFileProgress { + #[debug(skip)] + stream: Pin> + Send + Unpin + 'static>>, +} + +impl ImportFileProgress { + fn new( + stream: (impl Stream, impl Into>> + + Send + + Unpin + + 'static), + ) -> Self { + let stream = stream.map(|item| match item { + Ok(item) => Ok(item.into()), + Err(err) => Err(err.into()), + }); + Self { + stream: Box::pin(stream), + } + } + + /// Finishes writing the stream, ignoring all intermediate progress events. + /// + /// Returns a [`ImportFileOutcome`] which contains a tag, key, and hash and the size of the + /// content. + pub async fn finish(mut self) -> Result { + let mut entry_size = 0; + let mut entry_hash = None; + while let Some(msg) = self.next().await { + match msg? { + ImportProgress::Found { size, .. } => { + entry_size = size; + } + ImportProgress::AllDone { key } => { + let hash = entry_hash + .context("expected DocImportProgress::IngestDone event to occur")?; + let outcome = ImportFileOutcome { + hash, + key, + size: entry_size, + }; + return Ok(outcome); + } + ImportProgress::Abort(err) => return Err(err.into()), + ImportProgress::Progress { .. } => {} + ImportProgress::IngestDone { hash, .. } => { + entry_hash = Some(hash); + } + } + } + Err(anyhow!("Response stream ended prematurely")) + } +} + +/// Outcome of a [`Doc::import_file`] operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ImportFileOutcome { + /// The hash of the entry's content + pub hash: Hash, + /// The size of the entry + pub size: u64, + /// The key of the entry + pub key: Bytes, +} + +impl Stream for ImportFileProgress { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).poll_next(cx) + } +} + +/// Progress stream for [`Doc::export_file`]. +#[derive(derive_more::Debug)] +pub struct ExportFileProgress { + #[debug(skip)] + stream: Pin> + Send + Unpin + 'static>>, +} +impl ExportFileProgress { + fn new( + stream: (impl Stream, impl Into>> + + Send + + Unpin + + 'static), + ) -> Self { + let stream = stream.map(|item| match item { + Ok(item) => Ok(item.into()), + Err(err) => Err(err.into()), + }); + Self { + stream: Box::pin(stream), + } + } + + /// Iterates through the export progress stream, returning when the stream has completed. + /// + /// Returns a [`ExportFileOutcome`] which contains a file path the data was written to and the size of the content. + pub async fn finish(mut self) -> Result { + let mut total_size = 0; + let mut path = None; + while let Some(msg) = self.next().await { + match msg? { + ExportProgress::Found { size, outpath, .. } => { + total_size = size.value(); + path = Some(outpath); + } + ExportProgress::AllDone => { + let path = path.context("expected ExportProgress::Found event to occur")?; + let outcome = ExportFileOutcome { + size: total_size, + path, + }; + return Ok(outcome); + } + ExportProgress::Done { .. } => {} + ExportProgress::Abort(err) => return Err(anyhow!(err)), + ExportProgress::Progress { .. } => {} + } + } + Err(anyhow!("Response stream ended prematurely")) + } +} + +/// Outcome of a [`Doc::export_file`] operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ExportFileOutcome { + /// The size of the entry + pub size: u64, + /// The path to which the entry was saved + pub path: PathBuf, +} + +impl Stream for ExportFileProgress { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.stream).poll_next(cx) + } +} diff --git a/src/rpc/docs_handle_request.rs b/src/rpc/docs_handle_request.rs new file mode 100644 index 0000000..2c147ff --- /dev/null +++ b/src/rpc/docs_handle_request.rs @@ -0,0 +1,549 @@ +use std::sync::{Arc, Mutex}; + +use anyhow::anyhow; +use futures_lite::{Stream, StreamExt}; +use iroh_blobs::{ + export::ExportProgress, + store::{ExportFormat, ImportProgress}, + util::progress::{AsyncChannelProgressSender, ProgressSender}, + BlobFormat, HashAndFormat, +}; + +use super::{ + client::docs::ShareMode, + proto::{ + AuthorCreateRequest, AuthorCreateResponse, AuthorDeleteRequest, AuthorDeleteResponse, + AuthorExportRequest, AuthorExportResponse, AuthorGetDefaultRequest, + AuthorGetDefaultResponse, AuthorImportRequest, AuthorImportResponse, AuthorListRequest, + AuthorListResponse, AuthorSetDefaultRequest, AuthorSetDefaultResponse, CloseRequest, + CloseResponse, CreateRequest as DocCreateRequest, CreateResponse as DocCreateResponse, + DelRequest, DelResponse, DocListRequest, DocSubscribeRequest, DocSubscribeResponse, + DropRequest, DropResponse, ExportFileRequest, ExportFileResponse, GetDownloadPolicyRequest, + GetDownloadPolicyResponse, GetExactRequest, GetExactResponse, GetManyRequest, + GetManyResponse, GetSyncPeersRequest, GetSyncPeersResponse, ImportFileRequest, + ImportFileResponse, ImportRequest as DocImportRequest, ImportResponse as DocImportResponse, + LeaveRequest, LeaveResponse, ListResponse as DocListResponse, OpenRequest, OpenResponse, + SetDownloadPolicyRequest, SetDownloadPolicyResponse, SetHashRequest, SetHashResponse, + SetRequest, SetResponse, ShareRequest, ShareResponse, StartSyncRequest, StartSyncResponse, + StatusRequest, StatusResponse, + }, + RpcError, RpcResult, +}; +use crate::{engine::Engine, Author, DocTicket, NamespaceSecret}; + +/// Capacity for the flume channels to forward sync store iterators to async RPC streams. +const ITER_CHANNEL_CAP: usize = 64; + +impl Engine { + pub(super) async fn author_create( + self, + _req: AuthorCreateRequest, + ) -> RpcResult { + // TODO: pass rng + let author = Author::new(&mut rand::rngs::OsRng {}); + self.sync + .import_author(author.clone()) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(AuthorCreateResponse { + author_id: author.id(), + }) + } + + pub(super) async fn author_default( + self, + _req: AuthorGetDefaultRequest, + ) -> RpcResult { + let author_id = self.default_author.get(); + Ok(AuthorGetDefaultResponse { author_id }) + } + + pub(super) async fn author_set_default( + self, + req: AuthorSetDefaultRequest, + ) -> RpcResult { + self.default_author + .set(req.author_id, &self.sync) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(AuthorSetDefaultResponse) + } + + pub(super) fn author_list( + self, + _req: AuthorListRequest, + ) -> impl Stream> + Unpin { + let (tx, rx) = async_channel::bounded(ITER_CHANNEL_CAP); + let sync = self.sync.clone(); + // we need to spawn a task to send our request to the sync handle, because the method + // itself must be sync. + tokio::task::spawn(async move { + let tx2 = tx.clone(); + if let Err(err) = sync.list_authors(tx).await { + tx2.send(Err(err)).await.ok(); + } + }); + rx.boxed().map(|r| { + r.map(|author_id| AuthorListResponse { author_id }) + .map_err(|e| RpcError::new(&*e)) + }) + } + + pub(super) async fn author_import( + self, + req: AuthorImportRequest, + ) -> RpcResult { + let author_id = self + .sync + .import_author(req.author) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(AuthorImportResponse { author_id }) + } + + pub(super) async fn author_export( + self, + req: AuthorExportRequest, + ) -> RpcResult { + let author = self + .sync + .export_author(req.author) + .await + .map_err(|e| RpcError::new(&*e))?; + + Ok(AuthorExportResponse { author }) + } + + pub(super) async fn author_delete( + self, + req: AuthorDeleteRequest, + ) -> RpcResult { + if req.author == self.default_author.get() { + return Err(RpcError::new(&*anyhow!( + "Deleting the default author is not supported" + ))); + } + self.sync + .delete_author(req.author) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(AuthorDeleteResponse) + } + + pub(super) async fn doc_create(self, _req: DocCreateRequest) -> RpcResult { + let namespace = NamespaceSecret::new(&mut rand::rngs::OsRng {}); + let id = namespace.id(); + self.sync + .import_namespace(namespace.into()) + .await + .map_err(|e| RpcError::new(&*e))?; + self.sync + .open(id, Default::default()) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(DocCreateResponse { id }) + } + + pub(super) async fn doc_drop(self, req: DropRequest) -> RpcResult { + let DropRequest { doc_id } = req; + self.leave(doc_id, true) + .await + .map_err(|e| RpcError::new(&*e))?; + self.sync + .drop_replica(doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(DropResponse {}) + } + + pub(super) fn doc_list( + self, + _req: DocListRequest, + ) -> impl Stream> + Unpin { + let (tx, rx) = async_channel::bounded(ITER_CHANNEL_CAP); + let sync = self.sync.clone(); + // we need to spawn a task to send our request to the sync handle, because the method + // itself must be sync. + tokio::task::spawn(async move { + let tx2 = tx.clone(); + if let Err(err) = sync.list_replicas(tx).await { + tx2.send(Err(err)).await.ok(); + } + }); + rx.boxed().map(|r| { + r.map(|(id, capability)| DocListResponse { id, capability }) + .map_err(|e| RpcError::new(&*e)) + }) + } + + pub(super) async fn doc_open(self, req: OpenRequest) -> RpcResult { + self.sync + .open(req.doc_id, Default::default()) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(OpenResponse {}) + } + + pub(super) async fn doc_close(self, req: CloseRequest) -> RpcResult { + self.sync + .close(req.doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(CloseResponse {}) + } + + pub(super) async fn doc_status(self, req: StatusRequest) -> RpcResult { + let status = self + .sync + .get_state(req.doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(StatusResponse { status }) + } + + pub(super) async fn doc_share(self, req: ShareRequest) -> RpcResult { + let ShareRequest { + doc_id, + mode, + addr_options, + } = req; + let mut me = self + .endpoint + .node_addr() + .await + .map_err(|e| RpcError::new(&*e))?; + me.apply_options(addr_options); + + let capability = match mode { + ShareMode::Read => crate::Capability::Read(doc_id), + ShareMode::Write => { + let secret = self + .sync + .export_secret_key(doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + crate::Capability::Write(secret) + } + }; + self.start_sync(doc_id, vec![]) + .await + .map_err(|e| RpcError::new(&*e))?; + + Ok(ShareResponse(DocTicket { + capability, + nodes: vec![me], + })) + } + + pub(super) async fn doc_subscribe( + self, + req: DocSubscribeRequest, + ) -> RpcResult>> { + let stream = self + .subscribe(req.doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + + Ok(stream.map(|el| { + el.map(|event| DocSubscribeResponse { event }) + .map_err(|e| RpcError::new(&*e)) + })) + } + + pub(super) async fn doc_import(self, req: DocImportRequest) -> RpcResult { + let DocImportRequest { capability } = req; + let doc_id = self + .sync + .import_namespace(capability) + .await + .map_err(|e| RpcError::new(&*e))?; + self.sync + .open(doc_id, Default::default()) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(DocImportResponse { doc_id }) + } + + pub(super) async fn doc_start_sync( + self, + req: StartSyncRequest, + ) -> RpcResult { + let StartSyncRequest { doc_id, peers } = req; + self.start_sync(doc_id, peers) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(StartSyncResponse {}) + } + + pub(super) async fn doc_leave(self, req: LeaveRequest) -> RpcResult { + let LeaveRequest { doc_id } = req; + self.leave(doc_id, false) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(LeaveResponse {}) + } + + pub(super) async fn doc_set(self, req: SetRequest) -> RpcResult { + let blobs_store = self.blob_store(); + let SetRequest { + doc_id, + author_id, + key, + value, + } = req; + let len = value.len(); + let tag = blobs_store + .import_bytes(value, BlobFormat::Raw) + .await + .map_err(|e| RpcError::new(&e))?; + self.sync + .insert_local(doc_id, author_id, key.clone(), *tag.hash(), len as u64) + .await + .map_err(|e| RpcError::new(&*e))?; + let entry = self + .sync + .get_exact(doc_id, author_id, key, false) + .await + .map_err(|e| RpcError::new(&*e))? + .ok_or_else(|| RpcError::new(&*anyhow!("failed to get entry after insertion")))?; + Ok(SetResponse { entry }) + } + + pub(super) async fn doc_del(self, req: DelRequest) -> RpcResult { + let DelRequest { + doc_id, + author_id, + prefix, + } = req; + let removed = self + .sync + .delete_prefix(doc_id, author_id, prefix) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(DelResponse { removed }) + } + + pub(super) async fn doc_set_hash(self, req: SetHashRequest) -> RpcResult { + let SetHashRequest { + doc_id, + author_id, + key, + hash, + size, + } = req; + self.sync + .insert_local(doc_id, author_id, key.clone(), hash, size) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(SetHashResponse {}) + } + + pub(super) fn doc_get_many( + self, + req: GetManyRequest, + ) -> impl Stream> + Unpin { + let GetManyRequest { doc_id, query } = req; + let (tx, rx) = async_channel::bounded(ITER_CHANNEL_CAP); + let sync = self.sync.clone(); + // we need to spawn a task to send our request to the sync handle, because the method + // itself must be sync. + tokio::task::spawn(async move { + let tx2 = tx.clone(); + if let Err(err) = sync.get_many(doc_id, query, tx).await { + tx2.send(Err(err)).await.ok(); + } + }); + rx.boxed().map(|r| { + r.map(|entry| GetManyResponse { entry }) + .map_err(|e| RpcError::new(&*e)) + }) + } + + pub(super) async fn doc_get_exact(self, req: GetExactRequest) -> RpcResult { + let GetExactRequest { + doc_id, + author, + key, + include_empty, + } = req; + let entry = self + .sync + .get_exact(doc_id, author, key, include_empty) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(GetExactResponse { entry }) + } + + pub(super) async fn doc_set_download_policy( + self, + req: SetDownloadPolicyRequest, + ) -> RpcResult { + self.sync + .set_download_policy(req.doc_id, req.policy) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(SetDownloadPolicyResponse {}) + } + + pub(super) async fn doc_get_download_policy( + self, + req: GetDownloadPolicyRequest, + ) -> RpcResult { + let policy = self + .sync + .get_download_policy(req.doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(GetDownloadPolicyResponse { policy }) + } + + pub(super) async fn doc_get_sync_peers( + self, + req: GetSyncPeersRequest, + ) -> RpcResult { + let peers = self + .sync + .get_sync_peers(req.doc_id) + .await + .map_err(|e| RpcError::new(&*e))?; + Ok(GetSyncPeersResponse { peers }) + } + + pub(super) fn doc_import_file( + self, + msg: ImportFileRequest, + ) -> impl Stream { + // provide a little buffer so that we don't slow down the sender + let (tx, rx) = async_channel::bounded(32); + let tx2 = tx.clone(); + let this = self.clone(); + self.local_pool_handle().spawn_detached(|| async move { + if let Err(e) = this.doc_import_file0(msg, tx).await { + tx2.send(super::client::docs::ImportProgress::Abort(RpcError::new( + &*e, + ))) + .await + .ok(); + } + }); + rx.map(ImportFileResponse) + } + + async fn doc_import_file0( + self, + msg: ImportFileRequest, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + use std::collections::BTreeMap; + + use iroh_blobs::store::ImportMode; + + use super::client::docs::ImportProgress as DocImportProgress; + + let progress = AsyncChannelProgressSender::new(progress); + let names = Arc::new(Mutex::new(BTreeMap::new())); + // convert import progress to provide progress + let import_progress = progress.clone().with_filter_map(move |x| match x { + ImportProgress::Found { id, name } => { + names.lock().unwrap().insert(id, name); + None + } + ImportProgress::Size { id, size } => { + let name = names.lock().unwrap().remove(&id)?; + Some(DocImportProgress::Found { id, name, size }) + } + ImportProgress::OutboardProgress { id, offset } => { + Some(DocImportProgress::Progress { id, offset }) + } + ImportProgress::OutboardDone { hash, id } => { + Some(DocImportProgress::IngestDone { hash, id }) + } + _ => None, + }); + let ImportFileRequest { + doc_id, + author_id, + key, + path: root, + in_place, + } = msg; + // Check that the path is absolute and exists. + anyhow::ensure!(root.is_absolute(), "path must be absolute"); + anyhow::ensure!( + root.exists(), + "trying to add missing path: {}", + root.display() + ); + + let import_mode = match in_place { + true => ImportMode::TryReference, + false => ImportMode::Copy, + }; + + let blobs = self.blob_store(); + let (temp_tag, size) = blobs + .import_file(root, import_mode, BlobFormat::Raw, import_progress) + .await?; + + let hash_and_format = temp_tag.inner(); + let HashAndFormat { hash, .. } = *hash_and_format; + self.doc_set_hash(SetHashRequest { + doc_id, + author_id, + key: key.clone(), + hash, + size, + }) + .await?; + drop(temp_tag); + progress.send(DocImportProgress::AllDone { key }).await?; + Ok(()) + } + + pub(super) fn doc_export_file( + self, + msg: ExportFileRequest, + ) -> impl Stream { + let (tx, rx) = async_channel::bounded(1024); + let tx2 = tx.clone(); + let this = self.clone(); + self.local_pool_handle().spawn_detached(|| async move { + if let Err(e) = this.doc_export_file0(msg, tx).await { + tx2.send(ExportProgress::Abort(RpcError::new(&*e))) + .await + .ok(); + } + }); + rx.map(ExportFileResponse) + } + + async fn doc_export_file0( + self, + msg: ExportFileRequest, + progress: async_channel::Sender, + ) -> anyhow::Result<()> { + let progress = AsyncChannelProgressSender::new(progress); + let ExportFileRequest { entry, path, mode } = msg; + let key = bytes::Bytes::from(entry.key().to_vec()); + let export_progress = progress.clone().with_map(move |mut x| { + // assign the doc key to the `meta` field of the initial progress event + if let ExportProgress::Found { meta, .. } = &mut x { + *meta = Some(key.clone()) + } + x + }); + + let blobs = self.blob_store(); + iroh_blobs::export::export( + blobs, + entry.content_hash(), + path, + ExportFormat::Blob, + mode, + export_progress, + ) + .await?; + progress.send(ExportProgress::AllDone).await?; + Ok(()) + } +} diff --git a/src/rpc/proto.rs b/src/rpc/proto.rs new file mode 100644 index 0000000..daad934 --- /dev/null +++ b/src/rpc/proto.rs @@ -0,0 +1,540 @@ +//! Protocol definitions for RPC. + +use std::path::PathBuf; + +use bytes::Bytes; +use iroh::NodeAddr; +use iroh_base::node_addr::AddrInfoOptions; +use iroh_blobs::{export::ExportProgress, store::ExportMode, Hash}; +use nested_enum_utils::enum_conversions; +use quic_rpc::pattern::try_server_streaming::StreamCreated; +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; + +use super::{ + client::docs::{ImportProgress, ShareMode}, + RpcError, RpcResult, +}; +use crate::{ + actor::OpenState, + engine::LiveEvent, + store::{DownloadPolicy, Query}, + Author, AuthorId, Capability, CapabilityKind, DocTicket, Entry, NamespaceId, PeerIdBytes, + SignedEntry, +}; + +/// The RPC service type for the docs protocol. +#[derive(Debug, Clone)] +pub struct RpcService; + +impl quic_rpc::Service for RpcService { + type Req = Request; + type Res = Response; +} + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions] +#[rpc_requests(RpcService)] +pub enum Request { + #[rpc(response = RpcResult)] + Open(OpenRequest), + #[rpc(response = RpcResult)] + Close(CloseRequest), + #[rpc(response = RpcResult)] + Status(StatusRequest), + #[server_streaming(response = RpcResult)] + List(DocListRequest), + #[rpc(response = RpcResult)] + Create(CreateRequest), + #[rpc(response = RpcResult)] + Drop(DropRequest), + #[rpc(response = RpcResult)] + Import(ImportRequest), + #[rpc(response = RpcResult)] + Set(SetRequest), + #[rpc(response = RpcResult)] + SetHash(SetHashRequest), + #[server_streaming(response = RpcResult)] + Get(GetManyRequest), + #[rpc(response = RpcResult)] + GetExact(GetExactRequest), + #[server_streaming(response = ImportFileResponse)] + ImportFile(ImportFileRequest), + #[server_streaming(response = ExportFileResponse)] + ExportFile(ExportFileRequest), + #[rpc(response = RpcResult)] + Del(DelRequest), + #[rpc(response = RpcResult)] + StartSync(StartSyncRequest), + #[rpc(response = RpcResult)] + Leave(LeaveRequest), + #[rpc(response = RpcResult)] + Share(ShareRequest), + #[try_server_streaming(create_error = RpcError, item_error = RpcError, item = DocSubscribeResponse)] + Subscribe(DocSubscribeRequest), + #[rpc(response = RpcResult)] + GetDownloadPolicy(GetDownloadPolicyRequest), + #[rpc(response = RpcResult)] + SetDownloadPolicy(SetDownloadPolicyRequest), + #[rpc(response = RpcResult)] + GetSyncPeers(GetSyncPeersRequest), + #[server_streaming(response = RpcResult)] + AuthorList(AuthorListRequest), + #[rpc(response = RpcResult)] + AuthorCreate(AuthorCreateRequest), + #[rpc(response = RpcResult)] + AuthorGetDefault(AuthorGetDefaultRequest), + #[rpc(response = RpcResult)] + AuthorSetDefault(AuthorSetDefaultRequest), + #[rpc(response = RpcResult)] + AuthorImport(AuthorImportRequest), + #[rpc(response = RpcResult)] + AuthorExport(AuthorExportRequest), + #[rpc(response = RpcResult)] + AuthorDelete(AuthorDeleteRequest), +} + +#[allow(missing_docs)] +#[derive(strum::Display, Debug, Serialize, Deserialize)] +#[enum_conversions] +pub enum Response { + Open(RpcResult), + Close(RpcResult), + Status(RpcResult), + List(RpcResult), + Create(RpcResult), + Drop(RpcResult), + Import(RpcResult), + Set(RpcResult), + SetHash(RpcResult), + Get(RpcResult), + GetExact(RpcResult), + ImportFile(ImportFileResponse), + ExportFile(ExportFileResponse), + Del(RpcResult), + Share(RpcResult), + StartSync(RpcResult), + Leave(RpcResult), + Subscribe(RpcResult), + GetDownloadPolicy(RpcResult), + SetDownloadPolicy(RpcResult), + GetSyncPeers(RpcResult), + StreamCreated(RpcResult), + AuthorList(RpcResult), + AuthorCreate(RpcResult), + AuthorGetDefault(RpcResult), + AuthorSetDefault(RpcResult), + AuthorImport(RpcResult), + AuthorExport(RpcResult), + AuthorDelete(RpcResult), +} + +/// Subscribe to events for a document. +#[derive(Serialize, Deserialize, Debug)] +pub struct DocSubscribeRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`DocSubscribeRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct DocSubscribeResponse { + /// The event that occurred on the document + pub event: LiveEvent, +} + +/// List all documents +#[derive(Serialize, Deserialize, Debug)] +pub struct DocListRequest {} + +/// Response to [`DocListRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct ListResponse { + /// The document id + pub id: NamespaceId, + /// The capability over the document. + pub capability: CapabilityKind, +} + +/// Create a new document +#[derive(Serialize, Deserialize, Debug)] +pub struct CreateRequest {} + +/// Response to [`CreateRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct CreateResponse { + /// The document id + pub id: NamespaceId, +} + +/// Import a document from a capability. +#[derive(Serialize, Deserialize, Debug)] +pub struct ImportRequest { + /// The namespace capability. + pub capability: Capability, +} + +/// Response to [`ImportRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct ImportResponse { + /// the document id + pub doc_id: NamespaceId, +} + +/// Share a document with peers over a ticket. +#[derive(Serialize, Deserialize, Debug)] +pub struct ShareRequest { + /// The document id + pub doc_id: NamespaceId, + /// Whether to share read or write access to the document + pub mode: ShareMode, + /// Configuration of the addresses in the ticket. + pub addr_options: AddrInfoOptions, +} + +/// The response to [`ShareRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct ShareResponse(pub DocTicket); + +/// Get info on a document +#[derive(Serialize, Deserialize, Debug)] +pub struct StatusRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`StatusRequest`] +// TODO: actually provide info +#[derive(Serialize, Deserialize, Debug)] +pub struct StatusResponse { + /// Live sync status + pub status: OpenState, +} + +/// Open a document +#[derive(Serialize, Deserialize, Debug)] +pub struct OpenRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`OpenRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct OpenResponse {} + +/// Open a document +#[derive(Serialize, Deserialize, Debug)] +pub struct CloseRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`CloseRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct CloseResponse {} + +/// Start to sync a doc with peers. +#[derive(Serialize, Deserialize, Debug)] +pub struct StartSyncRequest { + /// The document id + pub doc_id: NamespaceId, + /// List of peers to join + pub peers: Vec, +} + +/// Response to [`StartSyncRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct StartSyncResponse {} + +/// Stop the live sync for a doc, and optionally delete the document. +#[derive(Serialize, Deserialize, Debug)] +pub struct LeaveRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`LeaveRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct LeaveResponse {} + +/// Stop the live sync for a doc, and optionally delete the document. +#[derive(Serialize, Deserialize, Debug)] +pub struct DropRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`DropRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct DropResponse {} + +/// Set an entry in a document +#[derive(Serialize, Deserialize, Debug)] +pub struct SetRequest { + /// The document id + pub doc_id: NamespaceId, + /// Author of this entry. + pub author_id: AuthorId, + /// Key of this entry. + pub key: Bytes, + /// Value of this entry. + // TODO: Allow to provide the hash directly + // TODO: Add a way to provide content as stream + pub value: Bytes, +} + +/// Response to [`SetRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct SetResponse { + /// The newly-created entry. + pub entry: SignedEntry, +} + +/// A request to the node to add the data at the given filepath as an entry to the document +/// +/// Will produce a stream of [`ImportProgress`] messages. +#[derive(Debug, Serialize, Deserialize)] +pub struct ImportFileRequest { + /// The document id + pub doc_id: NamespaceId, + /// Author of this entry. + pub author_id: AuthorId, + /// Key of this entry. + pub key: Bytes, + /// The filepath to the data + /// + /// This should be an absolute path valid for the file system on which + /// the node runs. Usually the cli will run on the same machine as the + /// node, so this should be an absolute path on the cli machine. + pub path: PathBuf, + /// True if the provider can assume that the data will not change, so it + /// can be shared in place. + pub in_place: bool, +} + +/// Wrapper around [`ImportProgress`]. +#[derive(Debug, Serialize, Deserialize, derive_more::Into)] +pub struct ImportFileResponse(pub ImportProgress); + +/// A request to the node to save the data of the entry to the given filepath +/// +/// Will produce a stream of [`ExportFileResponse`] messages. +#[derive(Debug, Serialize, Deserialize)] +pub struct ExportFileRequest { + /// The entry you want to export + pub entry: Entry, + /// The filepath to where the data should be saved + /// + /// This should be an absolute path valid for the file system on which + /// the node runs. Usually the cli will run on the same machine as the + /// node, so this should be an absolute path on the cli machine. + pub path: PathBuf, + /// The mode of exporting. Setting to `ExportMode::TryReference` means attempting + /// to use references for keeping file + pub mode: ExportMode, +} + +/// Progress messages for an doc export operation +/// +/// An export operation involves reading the entry from the database ans saving the entry to the +/// given `outpath` +#[derive(Debug, Serialize, Deserialize, derive_more::Into)] +pub struct ExportFileResponse(pub ExportProgress); + +/// Delete entries in a document +#[derive(Serialize, Deserialize, Debug)] +pub struct DelRequest { + /// The document id. + pub doc_id: NamespaceId, + /// Author of this entry. + pub author_id: AuthorId, + /// Prefix to delete. + pub prefix: Bytes, +} + +/// Response to [`DelRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct DelResponse { + /// The number of entries that were removed. + pub removed: usize, +} + +/// Set an entry in a document via its hash +#[derive(Serialize, Deserialize, Debug)] +pub struct SetHashRequest { + /// The document id + pub doc_id: NamespaceId, + /// Author of this entry. + pub author_id: AuthorId, + /// Key of this entry. + pub key: Bytes, + /// Hash of this entry. + pub hash: Hash, + /// Size of this entry. + pub size: u64, +} + +/// Response to [`SetHashRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct SetHashResponse {} + +/// Get entries from a document +#[derive(Serialize, Deserialize, Debug)] +pub struct GetManyRequest { + /// The document id + pub doc_id: NamespaceId, + /// Query to run + pub query: Query, +} + +/// Response to [`GetManyRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct GetManyResponse { + /// The document entry + pub entry: SignedEntry, +} + +/// Get entries from a document +#[derive(Serialize, Deserialize, Debug)] +pub struct GetExactRequest { + /// The document id + pub doc_id: NamespaceId, + /// Key matcher + pub key: Bytes, + /// Author matcher + pub author: AuthorId, + /// Whether to include empty entries (prefix deletion markers) + pub include_empty: bool, +} + +/// Response to [`GetExactRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct GetExactResponse { + /// The document entry + pub entry: Option, +} + +/// Set a download policy +#[derive(Serialize, Deserialize, Debug)] +pub struct SetDownloadPolicyRequest { + /// The document id + pub doc_id: NamespaceId, + /// Download policy + pub policy: DownloadPolicy, +} + +/// Response to [`SetDownloadPolicyRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct SetDownloadPolicyResponse {} + +/// Get a download policy +#[derive(Serialize, Deserialize, Debug)] +pub struct GetDownloadPolicyRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`GetDownloadPolicyRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct GetDownloadPolicyResponse { + /// The download policy + pub policy: DownloadPolicy, +} + +/// Get peers for document +#[derive(Serialize, Deserialize, Debug)] +pub struct GetSyncPeersRequest { + /// The document id + pub doc_id: NamespaceId, +} + +/// Response to [`GetSyncPeersRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct GetSyncPeersResponse { + /// List of peers ids + pub peers: Option>, +} + +/// List document authors for which we have a secret key. +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorListRequest {} + +/// Response for [`AuthorListRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorListResponse { + /// The author id + pub author_id: AuthorId, +} + +/// Create a new document author. +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorCreateRequest; + +/// Response for [`AuthorCreateRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorCreateResponse { + /// The id of the created author + pub author_id: AuthorId, +} + +/// Get the default author. +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorGetDefaultRequest; + +/// Response for [`AuthorGetDefaultRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorGetDefaultResponse { + /// The id of the author + pub author_id: AuthorId, +} + +/// Set the default author. +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorSetDefaultRequest { + /// The id of the author + pub author_id: AuthorId, +} + +/// Response for [`AuthorSetDefaultRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorSetDefaultResponse; + +/// Delete an author +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorDeleteRequest { + /// The id of the author to delete + pub author: AuthorId, +} + +/// Response for [`AuthorDeleteRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorDeleteResponse; + +/// Exports an author +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorExportRequest { + /// The id of the author to delete + pub author: AuthorId, +} + +/// Response for [`AuthorExportRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorExportResponse { + /// The author + pub author: Option, +} + +/// Import author from secret key +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorImportRequest { + /// The author to import + pub author: Author, +} + +/// Response to [`ImportRequest`] +#[derive(Serialize, Deserialize, Debug)] +pub struct AuthorImportResponse { + /// The author id of the imported author + pub author_id: AuthorId, +} diff --git a/src/store/fs.rs b/src/store/fs.rs index 45723aa..75827bd 100644 --- a/src/store/fs.rs +++ b/src/store/fs.rs @@ -630,13 +630,13 @@ impl<'a> StoreInstance<'a> { } } -impl<'a> PublicKeyStore for StoreInstance<'a> { +impl PublicKeyStore for StoreInstance<'_> { fn public_key(&self, id: &[u8; 32]) -> std::result::Result { self.store.public_key(id) } } -impl<'a> super::DownloadPolicyStore for StoreInstance<'a> { +impl super::DownloadPolicyStore for StoreInstance<'_> { fn get_download_policy(&mut self, namespace: &NamespaceId) -> Result { self.store.get_download_policy(namespace) } @@ -936,7 +936,7 @@ impl<'a> LatestIterator<'a> { } } -impl<'a> Iterator for LatestIterator<'a> { +impl Iterator for LatestIterator<'_> { type Item = Result<(AuthorId, u64, Vec)>; fn next(&mut self) -> Option { diff --git a/src/store/fs/ranges.rs b/src/store/fs/ranges.rs index 34aaa80..58ede42 100644 --- a/src/store/fs/ranges.rs +++ b/src/store/fs/ranges.rs @@ -37,7 +37,7 @@ pub trait RangeExt { } } -impl<'a, K: Key + 'static, V: Value + 'static> RangeExt for Range<'a, K, V> { +impl RangeExt for Range<'_, K, V> { fn next_map( &mut self, map: impl for<'x> Fn(K::SelfType<'x>, V::SelfType<'x>) -> T, @@ -114,7 +114,7 @@ impl RecordsRange<'static> { } } -impl<'a> Iterator for RecordsRange<'a> { +impl Iterator for RecordsRange<'_> { type Item = anyhow::Result; fn next(&mut self) -> Option { self.0.next_map(into_entry) diff --git a/src/sync.rs b/src/sync.rs index ad22a3c..fdccf05 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -945,6 +945,21 @@ impl Entry { &self.record } + /// Get the content hash of the record. + pub fn content_hash(&self) -> Hash { + self.record.hash + } + + /// Get the content length of the record. + pub fn content_len(&self) -> u64 { + self.record.len + } + + /// Get the timestamp of the record. + pub fn timestamp(&self) -> u64 { + self.record.timestamp + } + /// Serialize this entry into its canonical byte representation used for signing. pub fn encode(&self, out: &mut Vec) { self.id.encode(out); @@ -2277,7 +2292,7 @@ mod tests { store: &'a mut Store, namespace: NamespaceId, } - impl<'a> QueryTester<'a> { + impl QueryTester<'_> { fn assert(&mut self, query: impl Into, expected: Vec<(&'static str, &Author)>) { let query = query.into(); let actual = self diff --git a/src/ticket.rs b/src/ticket.rs index 07e06fc..3147bfa 100644 --- a/src/ticket.rs +++ b/src/ticket.rs @@ -1,7 +1,7 @@ //! Tickets for [`iroh-docs`] documents. +use iroh::NodeAddr; use iroh_base::ticket; -use iroh_net::NodeAddr; use serde::{Deserialize, Serialize}; use crate::Capability; @@ -65,7 +65,7 @@ impl std::str::FromStr for DocTicket { mod tests { use std::str::FromStr; - use iroh_net::key::PublicKey; + use iroh::key::PublicKey; use iroh_test::{assert_eq_hex, hexdump::parse_hexdump}; use super::*; diff --git a/tests/client.rs b/tests/client.rs new file mode 100644 index 0000000..6aacd05 --- /dev/null +++ b/tests/client.rs @@ -0,0 +1,237 @@ +#![cfg(feature = "rpc")] +use anyhow::{Context, Result}; +use futures_util::TryStreamExt; +use iroh_blobs::{ + store::ExportMode, + util::fs::{key_to_path, path_to_key}, +}; +use iroh_docs::store::Query; +use rand::RngCore; +use testresult::TestResult; +use tokio::io::AsyncWriteExt; +use util::Node; + +mod util; + +/// Test that closing a doc does not close other instances. +#[tokio::test] +async fn test_doc_close() -> Result<()> { + let _guard = iroh_test::logging::setup(); + + let node = Node::memory().spawn().await?; + let author = node.authors().default().await?; + // open doc two times + let doc1 = node.docs().create().await?; + let doc2 = node.docs().open(doc1.id()).await?.expect("doc to exist"); + // close doc1 instance + doc1.close().await?; + // operations on doc1 now fail. + assert!(doc1.set_bytes(author, "foo", "bar").await.is_err()); + // dropping doc1 will close the doc if not already closed + // wait a bit because the close-on-drop spawns a task for which we cannot track completion. + drop(doc1); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // operations on doc2 still succeed + doc2.set_bytes(author, "foo", "bar").await?; + Ok(()) +} + +#[tokio::test] +async fn test_doc_import_export() -> TestResult<()> { + let _guard = iroh_test::logging::setup(); + + let node = Node::memory().spawn().await?; + + // create temp file + let temp_dir = tempfile::tempdir().context("tempdir")?; + + let in_root = temp_dir.path().join("in"); + tokio::fs::create_dir_all(in_root.clone()) + .await + .context("create dir all")?; + let out_root = temp_dir.path().join("out"); + + let path = in_root.join("test"); + + let size = 100; + let mut buf = vec![0u8; size]; + rand::thread_rng().fill_bytes(&mut buf); + let mut file = tokio::fs::File::create(path.clone()) + .await + .context("create file")?; + file.write_all(&buf.clone()).await.context("write_all")?; + file.flush().await.context("flush")?; + + // create doc & author + let client = node.client(); + let docs_client = client.docs(); + let doc = docs_client.create().await.context("doc create")?; + let author = client.authors().create().await.context("author create")?; + + // import file + let import_outcome = doc + .import_file( + author, + path_to_key(path.clone(), None, Some(in_root))?, + path, + true, + ) + .await + .context("import file")? + .finish() + .await + .context("import finish")?; + + // export file + let entry = doc + .get_one(Query::author(author).key_exact(import_outcome.key)) + .await + .context("get one")? + .unwrap(); + let key = entry.key().to_vec(); + let export_outcome = doc + .export_file( + entry, + key_to_path(key, None, Some(out_root))?, + ExportMode::Copy, + ) + .await + .context("export file")? + .finish() + .await + .context("export finish")?; + + let got_bytes = tokio::fs::read(export_outcome.path) + .await + .context("tokio read")?; + assert_eq!(buf, got_bytes); + + Ok(()) +} + +#[tokio::test] +async fn test_authors() -> Result<()> { + let node = Node::memory().spawn().await?; + + // default author always exists + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; + assert_eq!(authors.len(), 1); + let default_author = node.authors().default().await?; + assert_eq!(authors, vec![default_author]); + + let author_id = node.authors().create().await?; + + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; + assert_eq!(authors.len(), 2); + + let author = node + .authors() + .export(author_id) + .await? + .expect("should have author"); + node.authors().delete(author_id).await?; + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; + assert_eq!(authors.len(), 1); + + node.authors().import(author).await?; + + let authors: Vec<_> = node.authors().list().await?.try_collect().await?; + assert_eq!(authors.len(), 2); + + assert!(node.authors().default().await? != author_id); + node.authors().set_default(author_id).await?; + assert_eq!(node.authors().default().await?, author_id); + + Ok(()) +} + +#[tokio::test] +async fn test_default_author_memory() -> Result<()> { + let iroh = Node::memory().spawn().await?; + let author = iroh.authors().default().await?; + assert!(iroh.authors().export(author).await?.is_some()); + assert!(iroh.authors().delete(author).await.is_err()); + Ok(()) +} + +#[tokio::test] +async fn test_default_author_persist() -> TestResult<()> { + let _guard = iroh_test::logging::setup(); + + let iroh_root_dir = tempfile::TempDir::new()?; + let iroh_root = iroh_root_dir.path(); + + // check that the default author exists and cannot be deleted. + let default_author = { + let iroh = Node::persistent(iroh_root).spawn().await?; + let author = iroh.authors().default().await?; + assert!(iroh.authors().export(author).await?.is_some()); + assert!(iroh.authors().delete(author).await.is_err()); + iroh.shutdown().await?; + author + }; + + // check that the default author is persisted across restarts. + { + let iroh = Node::persistent(iroh_root).spawn().await?; + let author = iroh.authors().default().await?; + assert_eq!(author, default_author); + assert!(iroh.authors().export(author).await?.is_some()); + assert!(iroh.authors().delete(author).await.is_err()); + iroh.shutdown().await?; + }; + + // check that a new default author is created if the default author file is deleted + // manually. + let default_author = { + tokio::fs::remove_file(iroh_root.join("default-author")).await?; + let iroh = Node::persistent(iroh_root).spawn().await?; + let author = iroh.authors().default().await?; + assert!(author != default_author); + assert!(iroh.authors().export(author).await?.is_some()); + assert!(iroh.authors().delete(author).await.is_err()); + iroh.shutdown().await?; + author + }; + + // check that the node fails to start if the default author is missing from the docs store. + { + let mut docs_store = iroh_docs::store::fs::Store::persistent(iroh_root.join("docs.redb"))?; + docs_store.delete_author(default_author)?; + docs_store.flush()?; + drop(docs_store); + let iroh = Node::persistent(iroh_root).spawn().await; + assert!(iroh.is_err()); + + // somehow the blob store is not shutdown correctly (yet?) on macos. + // so we give it some time until we find a proper fix. + #[cfg(target_os = "macos")] + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + tokio::fs::remove_file(iroh_root.join("default-author")).await?; + drop(iroh); + let iroh = Node::persistent(iroh_root).spawn().await; + if let Err(cause) = iroh.as_ref() { + panic!("failed to start node: {:?}", cause); + } + iroh?.shutdown().await?; + } + + // check that the default author can be set manually and is persisted. + let default_author = { + let iroh = Node::persistent(iroh_root).spawn().await?; + let author = iroh.authors().create().await?; + iroh.authors().set_default(author).await?; + assert_eq!(iroh.authors().default().await?, author); + iroh.shutdown().await?; + author + }; + { + let iroh = Node::persistent(iroh_root).spawn().await?; + assert_eq!(iroh.authors().default().await?, default_author); + iroh.shutdown().await?; + } + + Ok(()) +} diff --git a/tests/gc.rs b/tests/gc.rs new file mode 100644 index 0000000..09de6c3 --- /dev/null +++ b/tests/gc.rs @@ -0,0 +1,521 @@ +#![cfg(feature = "rpc")] +use std::{ + io::{Cursor, Write}, + path::PathBuf, + time::Duration, +}; + +use anyhow::Result; +use bao_tree::{blake3, io::sync::Outboard, ChunkRanges}; +use bytes::Bytes; +use iroh_blobs::{ + hashseq::HashSeq, + store::{bao_tree, EntryStatus, MapMut, Store}, + util::Tag, + BlobFormat, HashAndFormat, IROH_BLOCK_SIZE, +}; +use rand::RngCore; +use util::Node; + +mod util; + +pub fn create_test_data(size: usize) -> Bytes { + let mut rand = rand::thread_rng(); + let mut res = vec![0u8; size]; + rand.fill_bytes(&mut res); + res.into() +} + +/// Take some data and encode it +pub fn simulate_remote(data: &[u8]) -> (blake3::Hash, Cursor) { + let outboard = bao_tree::io::outboard::PostOrderMemOutboard::create(data, IROH_BLOCK_SIZE); + let mut encoded = Vec::new(); + encoded + .write_all(outboard.tree.size().to_le_bytes().as_ref()) + .unwrap(); + bao_tree::io::sync::encode_ranges_validated(data, &outboard, &ChunkRanges::all(), &mut encoded) + .unwrap(); + let hash = outboard.root(); + (hash, Cursor::new(encoded.into())) +} + +/// Wrap a bao store in a node that has gc enabled. +async fn mem_node( + gc_period: Duration, +) -> ( + Node, + async_channel::Receiver<()>, +) { + let (gc_send, gc_recv) = async_channel::unbounded(); + let node = Node::memory() + .gc_interval(Some(gc_period)) + .register_gc_done_cb(Box::new(move || { + gc_send.send_blocking(()).ok(); + })) + .spawn() + .await + .unwrap(); + (node, gc_recv) +} + +/// Wrap a bao store in a node that has gc enabled. +async fn persistent_node( + path: PathBuf, + gc_period: Duration, +) -> ( + Node, + async_channel::Receiver<()>, +) { + let (gc_send, gc_recv) = async_channel::unbounded(); + let node = Node::persistent(path) + .gc_interval(Some(gc_period)) + .register_gc_done_cb(Box::new(move || { + gc_send.send_blocking(()).ok(); + })) + .spawn() + .await + .unwrap(); + (node, gc_recv) +} + +async fn gc_test_node() -> ( + Node, + iroh_blobs::store::mem::Store, + async_channel::Receiver<()>, +) { + let (node, gc_recv) = mem_node(Duration::from_millis(500)).await; + let store = node.blob_store().clone(); + (node, store, gc_recv) +} + +async fn step(evs: &async_channel::Receiver<()>) { + // drain the event queue, we want a new GC + while evs.try_recv().is_ok() {} + // wait for several GC cycles + for _ in 0..3 { + evs.recv().await.unwrap(); + } +} + +/// Test the absolute basics of gc, temp tags and tags for blobs. +#[tokio::test] +async fn gc_basics() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let (node, bao_store, evs) = gc_test_node().await; + let data1 = create_test_data(1234); + let tt1 = bao_store.import_bytes(data1, BlobFormat::Raw).await?; + let data2 = create_test_data(5678); + let tt2 = bao_store.import_bytes(data2, BlobFormat::Raw).await?; + let h1 = *tt1.hash(); + let h2 = *tt2.hash(); + // temp tags are still there, so the entries should be there + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); + + // drop the first tag, the entry should be gone after some time + drop(tt1); + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); + + // create an explicit tag for h1 (as raw) and then delete the temp tag. Entry should still be there. + let tag = Tag::from("test"); + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::raw(h2))) + .await?; + drop(tt2); + tracing::info!("dropped tt2"); + step(&evs).await; + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); + + // delete the explicit tag, entry should be gone + bao_store.set_tag(tag, None).await?; + step(&evs).await; + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); + + node.shutdown().await?; + Ok(()) +} + +/// Test gc for sequences of hashes that protect their children from deletion. +#[tokio::test] +async fn gc_hashseq_impl() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let (node, bao_store, evs) = gc_test_node().await; + let data1 = create_test_data(1234); + let tt1 = bao_store.import_bytes(data1, BlobFormat::Raw).await?; + let data2 = create_test_data(5678); + let tt2 = bao_store.import_bytes(data2, BlobFormat::Raw).await?; + let seq = vec![*tt1.hash(), *tt2.hash()] + .into_iter() + .collect::(); + let ttr = bao_store + .import_bytes(seq.into_inner(), BlobFormat::HashSeq) + .await?; + let h1 = *tt1.hash(); + let h2 = *tt2.hash(); + let hr = *ttr.hash(); + drop(tt1); + drop(tt2); + + // there is a temp tag for the link seq, so it and its entries should be there + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); + assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); + + // make a permanent tag for the link seq, then delete the temp tag. Entries should still be there. + let tag = Tag::from("test"); + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::hash_seq(hr))) + .await?; + drop(ttr); + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::Complete); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::Complete); + assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); + + // change the permanent tag to be just for the linkseq itself as a blob. Only the linkseq should be there, not the entries. + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::raw(hr))) + .await?; + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); + assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::Complete); + + // delete the permanent tag, everything should be gone + bao_store.set_tag(tag, None).await?; + step(&evs).await; + assert_eq!(bao_store.entry_status(&h1).await?, EntryStatus::NotFound); + assert_eq!(bao_store.entry_status(&h2).await?, EntryStatus::NotFound); + assert_eq!(bao_store.entry_status(&hr).await?, EntryStatus::NotFound); + + node.shutdown().await?; + Ok(()) +} + +mod file { + use std::{io, path::PathBuf}; + + use bao_tree::{ + io::fsm::{BaoContentItem, ResponseDecoderNext}, + BaoTree, + }; + use futures_lite::StreamExt; + use iroh_blobs::{ + store::{BaoBatchWriter, ConsistencyCheckProgress, Map, MapEntryMut, ReportLevel}, + util::progress::{AsyncChannelProgressSender, ProgressSender as _}, + TempTag, + }; + use iroh_io::AsyncSliceReaderExt; + use testdir::testdir; + use tokio::io::AsyncReadExt; + + use super::*; + + fn path(root: PathBuf, suffix: &'static str) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { + move |hash| root.join(format!("{}.{}", hash.to_hex(), suffix)) + } + + fn data_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { + // this assumes knowledge of the internal directory structure of the flat store + path(root.join("data"), "data") + } + + fn outboard_path(root: PathBuf) -> impl Fn(&iroh_blobs::Hash) -> PathBuf { + // this assumes knowledge of the internal directory structure of the flat store + path(root.join("data"), "obao4") + } + + async fn check_consistency(store: &impl Store) -> anyhow::Result { + let mut max_level = ReportLevel::Trace; + let (tx, rx) = async_channel::bounded(1); + let task = tokio::task::spawn(async move { + while let Ok(ev) = rx.recv().await { + if let ConsistencyCheckProgress::Update { level, .. } = &ev { + max_level = max_level.max(*level); + } + } + }); + store + .consistency_check(false, AsyncChannelProgressSender::new(tx).boxed()) + .await?; + task.await?; + Ok(max_level) + } + + #[tokio::test] + async fn redb_doc_import_stress() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let dir = testdir!(); + let (node, _) = persistent_node(dir.join("store"), Duration::from_secs(10)).await; + let bao_store = node.blob_store().clone(); + let client = node.client(); + let doc = client.docs().create().await?; + let author = client.authors().create().await?; + let temp_path = dir.join("temp"); + tokio::fs::create_dir_all(&temp_path).await?; + let mut to_import = Vec::new(); + for i in 0..100 { + let data = create_test_data(16 * 1024 * 3 + 1); + let path = temp_path.join(format!("file{}", i)); + tokio::fs::write(&path, &data).await?; + let key = Bytes::from(format!("{}", path.display())); + to_import.push((key, path, data)); + } + for (key, path, _) in to_import.iter() { + let mut progress = doc.import_file(author, key.clone(), path, true).await?; + while let Some(msg) = progress.next().await { + tracing::info!("import progress {:?}", msg); + } + } + for (i, (key, _, expected)) in to_import.iter().enumerate() { + let Some(entry) = doc.get_exact(author, key.clone(), true).await? else { + anyhow::bail!("doc entry not found {}", i); + }; + let hash = entry.content_hash(); + let Some(content) = bao_store.get(&hash).await? else { + anyhow::bail!("content not found {} {}", i, &hash.to_hex()[..8]); + }; + let data = content.data_reader().read_to_end().await?; + assert_eq!(data, expected); + } + Ok(()) + } + + /// Test gc for sequences of hashes that protect their children from deletion. + #[tokio::test] + async fn gc_file_basics() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let dir = testdir!(); + let path = data_path(dir.clone()); + let outboard_path = outboard_path(dir.clone()); + let (node, evs) = persistent_node(dir.clone(), Duration::from_millis(100)).await; + let bao_store = node.blob_store().clone(); + let data1 = create_test_data(10000000); + let tt1 = bao_store + .import_bytes(data1.clone(), BlobFormat::Raw) + .await?; + let data2 = create_test_data(1000000); + let tt2 = bao_store + .import_bytes(data2.clone(), BlobFormat::Raw) + .await?; + let seq = vec![*tt1.hash(), *tt2.hash()] + .into_iter() + .collect::(); + let ttr = bao_store + .import_bytes(seq.into_inner(), BlobFormat::HashSeq) + .await?; + + let h1 = *tt1.hash(); + let h2 = *tt2.hash(); + let hr = *ttr.hash(); + + // data is protected by the temp tag + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + // h1 is for a giant file, so we will have both data and outboard files + assert!(path(&h1).exists()); + assert!(outboard_path(&h1).exists()); + // h2 is for a mid sized file, so we will have just the data file + assert!(path(&h2).exists()); + assert!(!outboard_path(&h2).exists()); + // hr so small that data will be inlined and outboard will not exist at all + assert!(!path(&hr).exists()); + assert!(!outboard_path(&hr).exists()); + + drop(tt1); + drop(tt2); + let tag = Tag::from("test"); + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::hash_seq(*ttr.hash()))) + .await?; + drop(ttr); + + // data is now protected by a normal tag, nothing should be gone + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + // h1 is for a giant file, so we will have both data and outboard files + assert!(path(&h1).exists()); + assert!(outboard_path(&h1).exists()); + // h2 is for a mid sized file, so we will have just the data file + assert!(path(&h2).exists()); + assert!(!outboard_path(&h2).exists()); + // hr so small that data will be inlined and outboard will not exist at all + assert!(!path(&hr).exists()); + assert!(!outboard_path(&hr).exists()); + + tracing::info!("changing tag from hashseq to raw, this should orphan the children"); + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::raw(hr))) + .await?; + + // now only hr itself should be protected, but not its children + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + // h1 should be gone + assert!(!path(&h1).exists()); + assert!(!outboard_path(&h1).exists()); + // h2 should still not be there + assert!(!path(&h2).exists()); + assert!(!outboard_path(&h2).exists()); + // hr should still not be there + assert!(!path(&hr).exists()); + assert!(!outboard_path(&hr).exists()); + + bao_store.set_tag(tag, None).await?; + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + // h1 should be gone + assert!(!path(&h1).exists()); + assert!(!outboard_path(&h1).exists()); + // h2 should still not be there + assert!(!path(&h2).exists()); + assert!(!outboard_path(&h2).exists()); + // hr should still not be there + assert!(!path(&hr).exists()); + assert!(!outboard_path(&hr).exists()); + + node.shutdown().await?; + + Ok(()) + } + + /// Add a file to the store in the same way a download works. + /// + /// we know the hash in advance, create a partial entry, write the data to it and + /// the outboard file, then commit it to a complete entry. + /// + /// During this time, the partial entry is protected by a temp tag. + async fn simulate_download_partial( + bao_store: &S, + data: Bytes, + ) -> io::Result<(S::EntryMut, TempTag)> { + // simulate the remote side. + let (hash, mut response) = simulate_remote(data.as_ref()); + // simulate the local side. + // we got a hash and a response from the remote side. + let tt = bao_store.temp_tag(HashAndFormat::raw(hash.into())); + // get the size + let size = response.read_u64_le().await?; + // start reading the response + let mut reading = bao_tree::io::fsm::ResponseDecoder::new( + hash, + ChunkRanges::all(), + BaoTree::new(size, IROH_BLOCK_SIZE), + response, + ); + // create the partial entry + let entry = bao_store.get_or_create(hash.into(), size).await?; + // create the + let mut bw = entry.batch_writer().await?; + let mut buf = Vec::new(); + while let ResponseDecoderNext::More((next, res)) = reading.next().await { + let item = res?; + match &item { + BaoContentItem::Parent(_) => { + buf.push(item); + } + BaoContentItem::Leaf(_) => { + buf.push(item); + let batch = std::mem::take(&mut buf); + bw.write_batch(size, batch).await?; + } + } + reading = next; + } + bw.sync().await?; + drop(bw); + Ok((entry, tt)) + } + + async fn simulate_download_complete( + bao_store: &S, + data: Bytes, + ) -> io::Result { + let (entry, tt) = simulate_download_partial(bao_store, data).await?; + // commit the entry + bao_store.insert_complete(entry).await?; + Ok(tt) + } + + /// Test that partial files are deleted. + #[tokio::test] + async fn gc_file_partial() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let dir = testdir!(); + let path = data_path(dir.clone()); + let outboard_path = outboard_path(dir.clone()); + + let (node, evs) = persistent_node(dir.clone(), Duration::from_millis(10)).await; + let bao_store = node.blob_store().clone(); + + let data1: Bytes = create_test_data(10000000); + let (_entry, tt1) = simulate_download_partial(&bao_store, data1.clone()).await?; + drop(_entry); + let h1 = *tt1.hash(); + // partial data and outboard files should be there + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + assert!(path(&h1).exists()); + assert!(outboard_path(&h1).exists()); + + drop(tt1); + // partial data and outboard files should be gone + step(&evs).await; + bao_store.sync().await?; + assert!(check_consistency(&bao_store).await? <= ReportLevel::Info); + assert!(!path(&h1).exists()); + assert!(!outboard_path(&h1).exists()); + + node.shutdown().await?; + Ok(()) + } + + #[tokio::test] + async fn gc_file_stress() -> Result<()> { + let _ = tracing_subscriber::fmt::try_init(); + let dir = testdir!(); + + let (node, evs) = persistent_node(dir.clone(), Duration::from_secs(1)).await; + let bao_store = node.blob_store().clone(); + + let mut deleted = Vec::new(); + let mut live = Vec::new(); + // download + for i in 0..100 { + let data: Bytes = create_test_data(16 * 1024 * 3 + 1); + let tt = simulate_download_complete(&bao_store, data).await.unwrap(); + if i % 100 == 0 { + let tag = Tag::from(format!("test{}", i)); + bao_store + .set_tag(tag.clone(), Some(HashAndFormat::raw(*tt.hash()))) + .await?; + live.push(*tt.hash()); + } else { + deleted.push(*tt.hash()); + } + } + step(&evs).await; + + for h in deleted.iter() { + assert_eq!(bao_store.entry_status(h).await?, EntryStatus::NotFound); + assert!(!dir.join(format!("data/{}.data", h.to_hex())).exists()); + } + + for h in live.iter() { + assert_eq!(bao_store.entry_status(h).await?, EntryStatus::Complete); + assert!(dir.join(format!("data/{}.data", h.to_hex())).exists()); + } + + node.shutdown().await?; + Ok(()) + } +} diff --git a/tests/sync.rs b/tests/sync.rs new file mode 100644 index 0000000..c024ef1 --- /dev/null +++ b/tests/sync.rs @@ -0,0 +1,1390 @@ +#![cfg(feature = "rpc")] +use std::{ + collections::HashMap, + future::Future, + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use bytes::Bytes; +use futures_lite::Stream; +use futures_util::{FutureExt, StreamExt, TryStreamExt}; +use iroh::{ + key::{PublicKey, SecretKey}, + RelayMode, +}; +use iroh_base::node_addr::AddrInfoOptions; +use iroh_blobs::Hash; +use iroh_docs::{ + rpc::client::docs::{Doc, Entry, LiveEvent, ShareMode}, + store::{DownloadPolicy, FilterKind, Query}, + AuthorId, ContentStatus, +}; +use rand::{CryptoRng, Rng, SeedableRng}; +use tracing::{debug, error_span, info, Instrument}; +use tracing_subscriber::{prelude::*, EnvFilter}; +mod util; +use util::{Builder, Node}; + +const TIMEOUT: Duration = Duration::from_secs(60); + +fn test_node(secret_key: SecretKey) -> Builder { + Node::memory() + .secret_key(secret_key) + .relay_mode(RelayMode::Disabled) +} + +// The function is not `async fn` so that we can take a `&mut` borrow on the `rng` without +// capturing that `&mut` lifetime in the returned future. This allows to call it in a loop while +// still collecting the futures before awaiting them altogether (see [`spawn_nodes`]) +fn spawn_node( + i: usize, + rng: &mut (impl CryptoRng + Rng), +) -> impl Future>> + 'static { + let secret_key = SecretKey::generate_with_rng(rng); + async move { + let node = test_node(secret_key); + let node = node.spawn().await?; + info!(?i, me = %node.node_id().fmt_short(), "node spawned"); + Ok(node) + } +} + +async fn spawn_nodes( + n: usize, + mut rng: &mut (impl CryptoRng + Rng), +) -> anyhow::Result>> { + let mut futs = vec![]; + for i in 0..n { + futs.push(spawn_node(i, &mut rng)); + } + futures_buffered::join_all(futs).await.into_iter().collect() +} + +pub fn test_rng(seed: &[u8]) -> rand_chacha::ChaCha12Rng { + rand_chacha::ChaCha12Rng::from_seed(*Hash::new(seed).as_bytes()) +} + +macro_rules! match_event { + ($pattern:pat $(if $guard:expr)? $(,)?) => { + Box::new(move |e| matches!(e, $pattern $(if $guard)?)) + }; +} + +/// This tests the simplest scenario: A node connects to another node, and performs sync. +#[tokio::test] +async fn sync_simple() -> Result<()> { + setup_logging(); + let mut rng = test_rng(b"sync_simple"); + let nodes = spawn_nodes(2, &mut rng).await?; + let clients = nodes.iter().map(|node| node.client()).collect::>(); + + // create doc on node0 + let peer0 = nodes[0].node_id(); + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; + let blobs0 = clients[0].blobs(); + let hash0 = doc0 + .set_bytes(author0, b"k1".to_vec(), b"v1".to_vec()) + .await?; + assert_latest(blobs0, &doc0, b"k1", b"v1").await; + let ticket = doc0 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + + let mut events0 = doc0.subscribe().await?; + + info!("node1: join"); + let peer1 = nodes[1].node_id(); + let doc1 = clients[1].docs().import(ticket.clone()).await?; + let blobs1 = clients[1].blobs(); + let mut events1 = doc1.subscribe().await?; + info!("node1: assert 5 events"); + assert_next_unordered( + &mut events1, + TIMEOUT, + vec![ + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer0)), + Box::new(move |e| matches!(e, LiveEvent::InsertRemote { from, .. } if *from == peer0 )), + Box::new(move |e| match_sync_finished(e, peer0)), + Box::new(move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == hash0)), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + assert_latest(blobs1, &doc1, b"k1", b"v1").await; + + info!("node0: assert 2 events"); + assert_next( + &mut events0, + TIMEOUT, + vec![ + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer1)), + Box::new(move |e| match_sync_finished(e, peer1)), + ], + ) + .await; + + for node in nodes { + node.shutdown().await?; + } + Ok(()) +} + +/// Test subscribing to replica events (without sync) +#[tokio::test] +async fn sync_subscribe_no_sync() -> Result<()> { + let mut rng = test_rng(b"sync_subscribe"); + setup_logging(); + let node = spawn_node(0, &mut rng).await?; + let client = node.client(); + let doc = client.docs().create().await?; + let mut sub = doc.subscribe().await?; + let author = client.authors().create().await?; + doc.set_bytes(author, b"k".to_vec(), b"v".to_vec()).await?; + let event = tokio::time::timeout(Duration::from_millis(100), sub.next()).await?; + assert!( + matches!(event, Some(Ok(LiveEvent::InsertLocal { .. }))), + "expected InsertLocal but got {event:?}" + ); + node.shutdown().await?; + Ok(()) +} + +#[tokio::test] +async fn sync_gossip_bulk() -> Result<()> { + let n_entries: usize = std::env::var("N_ENTRIES") + .map(|x| x.parse().expect("N_ENTRIES must be a number")) + .unwrap_or(100); + let mut rng = test_rng(b"sync_gossip_bulk"); + setup_logging(); + + let nodes = spawn_nodes(2, &mut rng).await?; + let clients = nodes.iter().map(|node| node.client()).collect::>(); + + let _peer0 = nodes[0].node_id(); + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; + let mut ticket = doc0 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + // unset peers to not yet start sync + let peers = ticket.nodes.clone(); + ticket.nodes = vec![]; + let doc1 = clients[1].docs().import(ticket).await?; + let mut events = doc1.subscribe().await?; + + // create entries for initial sync. + let now = Instant::now(); + let value = b"foo"; + for i in 0..n_entries { + let key = format!("init/{i}"); + doc0.set_bytes(author0, key.as_bytes().to_vec(), value.to_vec()) + .await?; + } + let elapsed = now.elapsed(); + info!( + "insert took {elapsed:?} for {n_entries} ({:?} per entry)", + elapsed / n_entries as u32 + ); + + let now = Instant::now(); + let mut count = 0; + doc0.start_sync(vec![]).await?; + doc1.start_sync(peers).await?; + while let Some(event) = events.next().await { + let event = event?; + if matches!(event, LiveEvent::InsertRemote { .. }) { + count += 1; + } + if count == n_entries { + break; + } + } + let elapsed = now.elapsed(); + info!( + "initial sync took {elapsed:?} for {n_entries} ({:?} per entry)", + elapsed / n_entries as u32 + ); + + // publish another 1000 entries + let mut count = 0; + let value = b"foo"; + let now = Instant::now(); + for i in 0..n_entries { + let key = format!("gossip/{i}"); + doc0.set_bytes(author0, key.as_bytes().to_vec(), value.to_vec()) + .await?; + } + let elapsed = now.elapsed(); + info!( + "insert took {elapsed:?} for {n_entries} ({:?} per entry)", + elapsed / n_entries as u32 + ); + + while let Some(event) = events.next().await { + let event = event?; + if matches!(event, LiveEvent::InsertRemote { .. }) { + count += 1; + } + if count == n_entries { + break; + } + } + let elapsed = now.elapsed(); + info!( + "gossip recv took {elapsed:?} for {n_entries} ({:?} per entry)", + elapsed / n_entries as u32 + ); + + Ok(()) +} + +/// This tests basic sync and gossip with 3 peers. +#[tokio::test] +#[ignore = "flaky"] +async fn sync_full_basic() -> testresult::TestResult<()> { + let mut rng = test_rng(b"sync_full_basic"); + setup_logging(); + let mut nodes = spawn_nodes(2, &mut rng).await?; + let mut clients = nodes + .iter() + .map(|node| node.client().clone()) + .collect::>(); + + // peer0: create doc and ticket + let peer0 = nodes[0].node_id(); + let author0 = clients[0].authors().create().await?; + let doc0 = clients[0].docs().create().await?; + let blobs0 = clients[0].blobs(); + let mut events0 = doc0.subscribe().await?; + let key0 = b"k1"; + let value0 = b"v1"; + let hash0 = doc0 + .set_bytes(author0, key0.to_vec(), value0.to_vec()) + .await?; + + info!("peer0: wait for 1 event (local insert)"); + let e = next(&mut events0).await; + assert!( + matches!(&e, LiveEvent::InsertLocal { entry } if entry.content_hash() == hash0), + "expected LiveEvent::InsertLocal but got {e:?}", + ); + assert_latest(blobs0, &doc0, key0, value0).await; + let ticket = doc0 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + + info!("peer1: spawn"); + let peer1 = nodes[1].node_id(); + let author1 = clients[1].authors().create().await?; + info!("peer1: join doc"); + let doc1 = clients[1].docs().import(ticket.clone()).await?; + let blobs1 = clients[1].blobs(); + + info!("peer1: wait for 4 events (for sync and join with peer0)"); + let mut events1 = doc1.subscribe().await?; + assert_next_unordered( + &mut events1, + TIMEOUT, + vec![ + match_event!(LiveEvent::NeighborUp(peer) if *peer == peer0), + match_event!(LiveEvent::InsertRemote { from, .. } if *from == peer0 ), + Box::new(move |e| match_sync_finished(e, peer0)), + match_event!(LiveEvent::ContentReady { hash } if *hash == hash0), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + + info!("peer0: wait for 2 events (join & accept sync finished from peer1)"); + assert_next( + &mut events0, + TIMEOUT, + vec![ + match_event!(LiveEvent::NeighborUp(peer) if *peer == peer1), + Box::new(move |e| match_sync_finished(e, peer1)), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + + info!("peer1: insert entry"); + let key1 = b"k2"; + let value1 = b"v2"; + let hash1 = doc1 + .set_bytes(author1, key1.to_vec(), value1.to_vec()) + .await?; + assert_latest(blobs1, &doc1, key1, value1).await; + info!("peer1: wait for 1 event (local insert, and pendingcontentready)"); + assert_next( + &mut events1, + TIMEOUT, + vec![match_event!(LiveEvent::InsertLocal { entry} if entry.content_hash() == hash1)], + ) + .await; + + // peer0: assert events for entry received via gossip + info!("peer0: wait for 2 events (gossip'ed entry from peer1)"); + assert_next( + &mut events0, + TIMEOUT, + vec![ + Box::new( + move |e| matches!(e, LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == peer1), + ), + Box::new(move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == hash1)), + ], + ).await; + assert_latest(blobs0, &doc0, key1, value1).await; + + // Note: If we could check gossip messages directly here (we can't easily), we would notice + // that peer1 will receive a `Op::ContentReady` gossip message, broadcast + // by peer0 with neighbor scope. This message is superfluous, and peer0 could know that, however + // our gossip implementation does not allow us to filter message receivers this way. + + info!("peer2: spawn"); + nodes.push(spawn_node(nodes.len(), &mut rng).await?); + clients.push(nodes.last().unwrap().client().clone()); + let doc2 = clients[2].docs().import(ticket).await?; + let blobs2 = clients[2].blobs(); + let peer2 = nodes[2].node_id(); + let mut events2 = doc2.subscribe().await?; + + info!("peer2: wait for 9 events (from sync with peers)"); + assert_next_unordered_with_optionals( + &mut events2, + TIMEOUT, + // required events + vec![ + // 2 NeighborUp events + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer0)), + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer1)), + // 2 SyncFinished events + Box::new(move |e| match_sync_finished(e, peer0)), + Box::new(move |e| match_sync_finished(e, peer1)), + // 2 InsertRemote events + Box::new( + move |e| matches!(e, LiveEvent::InsertRemote { entry, content_status: ContentStatus::Missing, .. } if entry.content_hash() == hash0), + ), + Box::new( + move |e| matches!(e, LiveEvent::InsertRemote { entry, content_status: ContentStatus::Missing, .. } if entry.content_hash() == hash1), + ), + // 2 ContentReady events + Box::new(move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == hash0)), + Box::new(move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == hash1)), + // at least 1 PendingContentReady + match_event!(LiveEvent::PendingContentReady), + ], + // optional events + // it may happen that we run sync two times against our two peers: + // if the first sync (as a result of us joining the peer manually through the ticket) completes + // before the peer shows up as a neighbor, we run sync again for the NeighborUp event. + vec![ + // 2 SyncFinished events + Box::new(move |e| match_sync_finished(e, peer0)), + Box::new(move |e| match_sync_finished(e, peer1)), + match_event!(LiveEvent::PendingContentReady), + match_event!(LiveEvent::PendingContentReady), + ] + ).await; + assert_latest(blobs2, &doc2, b"k1", b"v1").await; + assert_latest(blobs2, &doc2, b"k2", b"v2").await; + + info!("peer0: wait for 2 events (join & accept sync finished from peer2)"); + assert_next( + &mut events0, + TIMEOUT, + vec![ + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer2)), + Box::new(move |e| match_sync_finished(e, peer2)), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + + info!("peer1: wait for 2 events (join & accept sync finished from peer2)"); + assert_next( + &mut events1, + TIMEOUT, + vec![ + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(peer) if *peer == peer2)), + Box::new(move |e| match_sync_finished(e, peer2)), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + + info!("shutdown"); + for node in nodes { + node.shutdown().await?; + } + + Ok(()) +} + +#[tokio::test] +async fn sync_open_close() -> Result<()> { + let mut rng = test_rng(b"sync_subscribe_stop_close"); + setup_logging(); + let node = spawn_node(0, &mut rng).await?; + let client = node.client(); + + let doc = client.docs().create().await?; + let status = doc.status().await?; + assert_eq!(status.handles, 1); + + let doc2 = client.docs().open(doc.id()).await?.unwrap(); + let status = doc2.status().await?; + assert_eq!(status.handles, 2); + + doc.close().await?; + assert!(doc.status().await.is_err()); + + let status = doc2.status().await?; + assert_eq!(status.handles, 1); + + Ok(()) +} + +#[tokio::test] +async fn sync_subscribe_stop_close() -> Result<()> { + let mut rng = test_rng(b"sync_subscribe_stop_close"); + setup_logging(); + let node = spawn_node(0, &mut rng).await?; + let client = node.client(); + + let doc = client.docs().create().await?; + let author = client.authors().create().await?; + + let status = doc.status().await?; + assert_eq!(status.subscribers, 0); + assert_eq!(status.handles, 1); + assert!(!status.sync); + + doc.start_sync(vec![]).await?; + let status = doc.status().await?; + assert!(status.sync); + assert_eq!(status.handles, 2); + assert_eq!(status.subscribers, 1); + + let sub = doc.subscribe().await?; + let status = doc.status().await?; + assert_eq!(status.subscribers, 2); + drop(sub); + // trigger an event that makes the actor check if the event channels are still connected + doc.set_bytes(author, b"x".to_vec(), b"x".to_vec()).await?; + let status = doc.status().await?; + assert_eq!(status.subscribers, 1); + + doc.leave().await?; + let status = doc.status().await?; + assert_eq!(status.subscribers, 0); + assert_eq!(status.handles, 1); + assert!(!status.sync); + + Ok(()) +} + +#[tokio::test] +#[cfg(feature = "test-utils")] +async fn test_sync_via_relay() -> Result<()> { + let _guard = iroh_test::logging::setup(); + let (relay_map, _relay_url, _guard) = iroh::test_utils::run_relay_server().await?; + + let node1 = Node::memory() + .relay_mode(RelayMode::Custom(relay_map.clone())) + .insecure_skip_relay_cert_verify(true) + .spawn() + .await?; + let node1_id = node1.node_id(); + let node2 = Node::memory() + .bind_random_port() + .relay_mode(RelayMode::Custom(relay_map.clone())) + .insecure_skip_relay_cert_verify(true) + .spawn() + .await?; + + let doc1 = node1.docs().create().await?; + let author1 = node1.authors().create().await?; + let inserted_hash = doc1 + .set_bytes(author1, b"foo".to_vec(), b"bar".to_vec()) + .await?; + let mut ticket = doc1 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + + // remove direct addrs to force connect via relay + ticket.nodes[0].info.direct_addresses = Default::default(); + + // join + let doc2 = node2.docs().import(ticket).await?; + let blobs2 = node2.blobs(); + let mut events = doc2.subscribe().await?; + + assert_next_unordered_with_optionals( + &mut events, + Duration::from_secs(2), + vec![ + Box::new(move |e| matches!(e, LiveEvent::NeighborUp(n) if *n== node1_id)), + Box::new(move |e| match_sync_finished(e, node1_id)), + Box::new( + move |e| matches!(e, LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == node1_id), + ), + Box::new( + move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == inserted_hash), + ), + match_event!(LiveEvent::PendingContentReady), + ], + vec![Box::new(move |e| match_sync_finished(e, node1_id))], + ).await; + let actual = blobs2 + .read_to_bytes( + doc2.get_exact(author1, b"foo", false) + .await? + .expect("entry to exist") + .content_hash(), + ) + .await?; + assert_eq!(actual.as_ref(), b"bar"); + + // update + let updated_hash = doc1 + .set_bytes(author1, b"foo".to_vec(), b"update".to_vec()) + .await?; + assert_next_unordered_with_optionals( + &mut events, + Duration::from_secs(2), + vec![ + Box::new( + move |e| matches!(e, LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == node1_id), + ), + Box::new( + move |e| matches!(e, LiveEvent::ContentReady { hash } if *hash == updated_hash), + ), + ], + vec![ + Box::new(move |e| match_sync_finished(e, node1_id)), + Box::new(move |e| matches!(e, LiveEvent::PendingContentReady)), + ], + ).await; + let actual = blobs2 + .read_to_bytes( + doc2.get_exact(author1, b"foo", false) + .await? + .expect("entry to exist") + .content_hash(), + ) + .await?; + assert_eq!(actual.as_ref(), b"update"); + Ok(()) +} + +#[tokio::test] +#[cfg(feature = "test-utils")] +#[ignore = "flaky"] +async fn sync_restart_node() -> Result<()> { + let mut rng = test_rng(b"sync_restart_node"); + setup_logging(); + let (relay_map, _relay_url, _guard) = iroh::test_utils::run_relay_server().await?; + + let discovery_server = iroh::test_utils::DnsPkarrServer::run().await?; + + let node1_dir = tempfile::TempDir::with_prefix("test-sync_restart_node-node1")?; + let secret_key_1 = SecretKey::generate_with_rng(&mut rng); + + let node1 = Node::persistent(&node1_dir) + .secret_key(secret_key_1.clone()) + .insecure_skip_relay_cert_verify(true) + .relay_mode(RelayMode::Custom(relay_map.clone())) + .dns_resolver(discovery_server.dns_resolver()) + .node_discovery(discovery_server.discovery(secret_key_1.clone())) + .spawn() + .await?; + let id1 = node1.node_id(); + + // create doc & ticket on node1 + let doc1 = node1.docs().create().await?; + let blobs1 = node1.blobs(); + let mut events1 = doc1.subscribe().await?; + let ticket = doc1 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + + // create node2 + let secret_key_2 = SecretKey::generate_with_rng(&mut rng); + let node2 = Node::memory() + .secret_key(secret_key_2.clone()) + .relay_mode(RelayMode::Custom(relay_map.clone())) + .insecure_skip_relay_cert_verify(true) + .dns_resolver(discovery_server.dns_resolver()) + .node_discovery(discovery_server.discovery(secret_key_2.clone())) + .spawn() + .await?; + let id2 = node2.node_id(); + let author2 = node2.authors().create().await?; + let doc2 = node2.docs().import(ticket.clone()).await?; + let blobs2 = node2.blobs(); + + info!("node2 set a"); + let hash_a = doc2.set_bytes(author2, "n2/a", "a").await?; + assert_latest(blobs2, &doc2, b"n2/a", b"a").await; + + assert_next_unordered_with_optionals( + &mut events1, + Duration::from_secs(10), + vec![ + match_event!(LiveEvent::NeighborUp(n) if *n == id2), + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == id2), + match_event!(LiveEvent::ContentReady { hash } if *hash == hash_a), + match_event!(LiveEvent::PendingContentReady), + ], + vec![ + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::PendingContentReady), + ], + ) + .await; + assert_latest(blobs1, &doc1, b"n2/a", b"a").await; + + info!(me = id1.fmt_short(), "node1 start shutdown"); + node1.shutdown().await?; + info!(me = id1.fmt_short(), "node1 down"); + + info!(me = id1.fmt_short(), "sleep 1s"); + tokio::time::sleep(Duration::from_secs(1)).await; + + info!(me = id2.fmt_short(), "node2 set b"); + let hash_b = doc2.set_bytes(author2, "n2/b", "b").await?; + + info!(me = id1.fmt_short(), "node1 respawn"); + let node1 = Node::persistent(&node1_dir) + .secret_key(secret_key_1.clone()) + .insecure_skip_relay_cert_verify(true) + .relay_mode(RelayMode::Custom(relay_map.clone())) + .dns_resolver(discovery_server.dns_resolver()) + .node_discovery(discovery_server.discovery(secret_key_1.clone())) + .spawn() + .await?; + assert_eq!(id1, node1.node_id()); + + let doc1 = node1.docs().open(doc1.id()).await?.expect("doc to exist"); + let blobs1 = node1.blobs(); + let mut events1 = doc1.subscribe().await?; + assert_latest(blobs1, &doc1, b"n2/a", b"a").await; + + // check that initial resync is working + doc1.start_sync(vec![]).await?; + assert_next_unordered_with_optionals( + &mut events1, + Duration::from_secs(10), + vec![ + match_event!(LiveEvent::NeighborUp(n) if *n== id2), + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == id2), + match_event!(LiveEvent::ContentReady { hash } if *hash == hash_b), + ], + vec![ + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::PendingContentReady), + ] + ).await; + assert_latest(blobs1, &doc1, b"n2/b", b"b").await; + + // check that live conn is working + info!(me = id2.fmt_short(), "node2 set c"); + let hash_c = doc2.set_bytes(author2, "n2/c", "c").await?; + assert_next_unordered_with_optionals( + &mut events1, + Duration::from_secs(10), + vec![ + match_event!(LiveEvent::InsertRemote { from, content_status: ContentStatus::Missing, .. } if *from == id2), + match_event!(LiveEvent::ContentReady { hash } if *hash == hash_c), + ], + vec![ + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::PendingContentReady), + match_event!(LiveEvent::SyncFinished(e) if e.peer == id2 && e.result.is_ok()), + match_event!(LiveEvent::PendingContentReady), + ] + ).await; + + assert_latest(blobs1, &doc1, b"n2/c", b"c").await; + + Ok(()) +} + +/// Joins two nodes that write to the same document but have differing download policies and tests +/// that they both synced the key info but not the content. +#[tokio::test] +async fn test_download_policies() -> Result<()> { + // keys node a has + let star_wars_movies = &[ + "star_wars/prequel/the_phantom_menace", + "star_wars/prequel/attack_of_the_clones", + "star_wars/prequel/revenge_of_the_sith", + "star_wars/og/a_new_hope", + "star_wars/og/the_empire_strikes_back", + "star_wars/og/return_of_the_jedi", + ]; + // keys node b has + let lotr_movies = &[ + "lotr/fellowship_of_the_ring", + "lotr/the_two_towers", + "lotr/return_of_the_king", + ]; + + // content policy for what b wants + let policy_b = + DownloadPolicy::EverythingExcept(vec![FilterKind::Prefix("star_wars/og".into())]); + // content policy for what a wants + let policy_a = DownloadPolicy::NothingExcept(vec![FilterKind::Exact( + "lotr/fellowship_of_the_ring".into(), + )]); + + // a will sync all lotr keys but download a single key + const EXPECTED_A_SYNCED: usize = 3; + const EXPECTED_A_DOWNLOADED: usize = 1; + + // b will sync all star wars content but download only the prequel keys + const EXPECTED_B_SYNCED: usize = 6; + const EXPECTED_B_DOWNLOADED: usize = 3; + + let mut rng = test_rng(b"sync_download_policies"); + setup_logging(); + let nodes = spawn_nodes(2, &mut rng).await?; + let clients = nodes.iter().map(|node| node.client()).collect::>(); + + let doc_a = clients[0].docs().create().await?; + let author_a = clients[0].authors().create().await?; + let ticket = doc_a + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + + let doc_b = clients[1].docs().import(ticket).await?; + let author_b = clients[1].authors().create().await?; + + doc_a.set_download_policy(policy_a).await?; + doc_b.set_download_policy(policy_b).await?; + + let mut events_a = doc_a.subscribe().await?; + let mut events_b = doc_b.subscribe().await?; + + let mut key_hashes: HashMap = HashMap::default(); + + // set content in a + for k in star_wars_movies.iter() { + let hash = doc_a + .set_bytes(author_a, k.to_owned(), k.to_owned()) + .await?; + key_hashes.insert(hash, k); + } + + // set content in b + for k in lotr_movies.iter() { + let hash = doc_b + .set_bytes(author_b, k.to_owned(), k.to_owned()) + .await?; + key_hashes.insert(hash, k); + } + + assert_eq!(key_hashes.len(), star_wars_movies.len() + lotr_movies.len()); + + let fut = async { + use LiveEvent::*; + let mut downloaded_a: Vec<&'static str> = Vec::new(); + let mut downloaded_b: Vec<&'static str> = Vec::new(); + let mut synced_a = 0usize; + let mut synced_b = 0usize; + loop { + tokio::select! { + Some(Ok(ev)) = events_a.next() => { + match ev { + InsertRemote { content_status, entry, .. } => { + synced_a += 1; + if let ContentStatus::Complete = content_status { + downloaded_a.push(key_hashes.get(&entry.content_hash()).unwrap()) + } + }, + ContentReady { hash } => { + downloaded_a.push(key_hashes.get(&hash).unwrap()); + }, + _ => {} + } + } + Some(Ok(ev)) = events_b.next() => { + match ev { + InsertRemote { content_status, entry, .. } => { + synced_b += 1; + if let ContentStatus::Complete = content_status { + downloaded_b.push(key_hashes.get(&entry.content_hash()).unwrap()) + } + }, + ContentReady { hash } => { + downloaded_b.push(key_hashes.get(&hash).unwrap()); + }, + _ => {} + } + } + } + + if synced_a == EXPECTED_A_SYNCED + && downloaded_a.len() == EXPECTED_A_DOWNLOADED + && synced_b == EXPECTED_B_SYNCED + && downloaded_b.len() == EXPECTED_B_DOWNLOADED + { + break; + } + } + (downloaded_a, downloaded_b) + }; + + let (downloaded_a, mut downloaded_b) = tokio::time::timeout(TIMEOUT, fut) + .await + .context("timeout elapsed")?; + + downloaded_b.sort(); + assert_eq!(downloaded_a, vec!["lotr/fellowship_of_the_ring"]); + assert_eq!( + downloaded_b, + vec![ + "star_wars/prequel/attack_of_the_clones", + "star_wars/prequel/revenge_of_the_sith", + "star_wars/prequel/the_phantom_menace", + ] + ); + + Ok(()) +} + +/// Test sync between many nodes with propagation through sync reports. +#[tokio::test(flavor = "multi_thread")] +#[ignore = "flaky"] +async fn sync_big() -> Result<()> { + setup_logging(); + let mut rng = test_rng(b"sync_big"); + let n_nodes = std::env::var("NODES") + .map(|v| v.parse().expect("NODES must be a number")) + .unwrap_or(10); + let n_entries_init = 1; + + tokio::task::spawn(async move { + for i in 0.. { + tokio::time::sleep(Duration::from_secs(1)).await; + info!("tick {i}"); + } + }); + + let nodes = spawn_nodes(n_nodes, &mut rng).await?; + let node_ids = nodes.iter().map(|node| node.node_id()).collect::>(); + let clients = nodes.iter().map(|node| node.client()).collect::>(); + let authors = collect_futures(clients.iter().map(|c| c.authors().create())).await?; + + let doc0 = clients[0].docs().create().await?; + let mut ticket = doc0 + .share(ShareMode::Write, AddrInfoOptions::RelayAndAddresses) + .await?; + // do not join for now, just import without any peer info + let peer0 = ticket.nodes[0].clone(); + ticket.nodes = vec![]; + + let docs_clients: Vec<_> = clients.iter().skip(1).collect(); + let mut docs = vec![]; + docs.push(doc0); + docs.extend_from_slice( + &collect_futures(docs_clients.into_iter().map(|c| { + let ticket = ticket.clone(); + async move { c.docs().import(ticket).await } + })) + .await?, + ); + + let mut expected = vec![]; + + // create initial data on each node + publish(&docs, &mut expected, n_entries_init, |i, j| { + ( + authors[i], + format!("init/{}/{j}", node_ids[i].fmt_short()), + format!("init:{i}:{j}"), + ) + }) + .await?; + + // assert initial data + for (i, doc) in docs.iter().enumerate() { + let blobs = nodes[i].blobs(); + let entries = get_all_with_content(blobs, doc).await?; + let mut expected = expected + .iter() + .filter(|e| e.author == authors[i]) + .cloned() + .collect::>(); + expected.sort(); + assert_eq!(entries, expected, "phase1 pre-sync correct"); + } + + // setup event streams + let events = collect_futures(docs.iter().map(|d| d.subscribe())).await?; + + // join nodes together + for (i, doc) in docs.iter().enumerate().skip(1) { + info!(me = %node_ids[i].fmt_short(), peer = %peer0.node_id.fmt_short(), "join"); + doc.start_sync(vec![peer0.clone()]).await?; + } + + // wait for InsertRemote events stuff to happen + info!("wait for all peers to receive insert events"); + let expected_inserts = (n_nodes - 1) * n_entries_init; + let mut tasks = tokio::task::JoinSet::default(); + for (i, events) in events.into_iter().enumerate() { + let doc = docs[i].clone(); + let me = doc.id().fmt_short(); + let expected = expected.clone(); + let fut = async move { + wait_for_events(events, expected_inserts, TIMEOUT, |e| { + matches!(e, LiveEvent::InsertRemote { .. }) + }) + .await?; + let entries = get_all(&doc).await?; + if entries != expected { + Err(anyhow!( + "node {i} failed (has {} entries but expected to have {})", + entries.len(), + expected.len() + )) + } else { + info!( + "received and checked all {} expected entries", + expected.len() + ); + Ok(()) + } + } + .instrument(error_span!("sync-test", %me)); + let fut = fut.map(move |r| r.with_context(move || format!("node {i} ({me})"))); + tasks.spawn(fut); + } + + while let Some(res) = tasks.join_next().await { + res??; + } + + assert_all_docs(&docs, &node_ids, &expected, "after initial sync").await; + + info!("shutdown"); + for node in nodes { + node.shutdown().await?; + } + + Ok(()) +} + +#[tokio::test] +#[cfg(feature = "test-utils")] +async fn test_list_docs_stream() -> testresult::TestResult<()> { + let node = Node::memory() + .relay_mode(RelayMode::Disabled) + .spawn() + .await?; + let count = 200; + + // create docs + for _i in 0..count { + let doc = node.docs().create().await?; + doc.close().await?; + } + + // create doc stream + let mut stream = node.docs().list().await?; + + // process each doc and call into the docs actor. + // this makes sure that we don't deadlock the docs actor. + let mut i = 0; + let fut = async { + while let Some((id, _)) = stream.try_next().await.unwrap() { + let _doc = node.docs().open(id).await.unwrap().unwrap(); + i += 1; + } + }; + + tokio::time::timeout(Duration::from_secs(2), fut) + .await + .expect("not to timeout"); + + assert_eq!(i, count); + + Ok(()) +} + +/// Get all entries of a document. +async fn get_all(doc: &Doc) -> anyhow::Result> { + let entries = doc.get_many(Query::all()).await?; + let entries = entries.collect::>().await; + entries.into_iter().collect() +} + +/// Get all entries of a document with the blob content. +async fn get_all_with_content( + blobs: &iroh_blobs::rpc::client::blobs::Client, + doc: &Doc, +) -> anyhow::Result> { + let entries = doc.get_many(Query::all()).await?; + let entries = entries.and_then(|entry| async { + let hash = entry.content_hash(); + let content = blobs.read_to_bytes(hash).await; + content.map(|c| (entry, c)) + }); + let entries = entries.collect::>().await; + let entries = entries.into_iter().collect::>>()?; + Ok(entries) +} + +async fn publish( + docs: &[Doc], + expected: &mut Vec, + n: usize, + cb: impl Fn(usize, usize) -> (AuthorId, String, String), +) -> anyhow::Result<()> { + for (i, doc) in docs.iter().enumerate() { + for j in 0..n { + let (author, key, value) = cb(i, j); + doc.set_bytes(author, key.as_bytes().to_vec(), value.as_bytes().to_vec()) + .await?; + expected.push(ExpectedEntry { author, key, value }); + } + } + expected.sort(); + Ok(()) +} + +/// Collect an iterator into futures by joining them all and failing if any future failed. +async fn collect_futures( + futs: impl IntoIterator>>, +) -> anyhow::Result> { + futures_buffered::join_all(futs) + .await + .into_iter() + .collect::>>() +} + +/// Collect `count` events from the `events` stream, only collecting events for which `matcher` +/// returns true. +async fn wait_for_events( + mut events: impl Stream> + Send + Unpin + 'static, + count: usize, + timeout: Duration, + matcher: impl Fn(&LiveEvent) -> bool, +) -> anyhow::Result> { + let mut res = Vec::with_capacity(count); + let sleep = tokio::time::sleep(timeout); + tokio::pin!(sleep); + while res.len() < count { + tokio::select! { + () = &mut sleep => { + bail!("Failed to collect {count} elements in {timeout:?} (collected only {})", res.len()); + }, + event = events.try_next() => { + let event = event?; + match event { + None => bail!("stream ended after {} items, but expected {count}", res.len()), + Some(event) => if matcher(&event) { + res.push(event); + debug!("recv event {} of {count}", res.len()); + } + } + } + } + } + Ok(res) +} + +async fn assert_all_docs( + docs: &[Doc], + node_ids: &[PublicKey], + expected: &Vec, + label: &str, +) { + info!("validate all peers: {label}"); + for (i, doc) in docs.iter().enumerate() { + let entries = get_all(doc).await.unwrap_or_else(|err| { + panic!("failed to get entries for peer {:?}: {err:?}", node_ids[i]) + }); + assert_eq!( + &entries, + expected, + "{label}: peer {i} {:?} failed (have {} but expected {})", + node_ids[i], + entries.len(), + expected.len() + ); + } +} + +#[derive(Debug, Ord, Eq, PartialEq, PartialOrd, Clone)] +struct ExpectedEntry { + author: AuthorId, + key: String, + value: String, +} + +impl PartialEq for ExpectedEntry { + fn eq(&self, other: &Entry) -> bool { + self.key.as_bytes() == other.key() + && Hash::new(&self.value) == other.content_hash() + && self.author == other.author() + } +} +impl PartialEq<(Entry, Bytes)> for ExpectedEntry { + fn eq(&self, (entry, content): &(Entry, Bytes)) -> bool { + self.key.as_bytes() == entry.key() + && Hash::new(&self.value) == entry.content_hash() + && self.author == entry.author() + && self.value.as_bytes() == content.as_ref() + } +} +impl PartialEq for Entry { + fn eq(&self, other: &ExpectedEntry) -> bool { + other.eq(self) + } +} +impl PartialEq for (Entry, Bytes) { + fn eq(&self, other: &ExpectedEntry) -> bool { + other.eq(self) + } +} + +#[tokio::test] +async fn doc_delete() -> Result<()> { + let node = Node::memory() + .gc_interval(Some(Duration::from_millis(100))) + .spawn() + .await?; + let client = node.client(); + let doc = client.docs().create().await?; + let blobs = client.blobs(); + let author = client.authors().create().await?; + let hash = doc + .set_bytes(author, b"foo".to_vec(), b"hi".to_vec()) + .await?; + assert_latest(blobs, &doc, b"foo", b"hi").await; + let deleted = doc.del(author, b"foo".to_vec()).await?; + assert_eq!(deleted, 1); + + let entry = doc.get_exact(author, b"foo".to_vec(), false).await?; + assert!(entry.is_none()); + + // wait for gc + // TODO: allow to manually trigger gc + tokio::time::sleep(Duration::from_millis(200)).await; + let bytes = client.blobs().read_to_bytes(hash).await; + assert!(bytes.is_err()); + node.shutdown().await?; + Ok(()) +} + +#[tokio::test] +async fn sync_drop_doc() -> Result<()> { + let mut rng = test_rng(b"sync_drop_doc"); + setup_logging(); + let node = spawn_node(0, &mut rng).await?; + let client = node.client(); + + let doc = client.docs().create().await?; + let author = client.authors().create().await?; + + let mut sub = doc.subscribe().await?; + doc.set_bytes(author, b"foo".to_vec(), b"bar".to_vec()) + .await?; + let ev = sub.next().await; + assert!(matches!(ev, Some(Ok(LiveEvent::InsertLocal { .. })))); + + client.docs().drop_doc(doc.id()).await?; + let res = doc.get_exact(author, b"foo".to_vec(), true).await; + assert!(res.is_err()); + let res = doc + .set_bytes(author, b"foo".to_vec(), b"bar".to_vec()) + .await; + assert!(res.is_err()); + let res = client.docs().open(doc.id()).await; + assert!(res.is_err()); + let ev = sub.next().await; + assert!(ev.is_none()); + + Ok(()) +} + +async fn assert_latest( + blobs: &iroh_blobs::rpc::client::blobs::Client, + doc: &Doc, + key: &[u8], + value: &[u8], +) { + let content = get_latest(blobs, doc, key).await.unwrap(); + assert_eq!(content, value.to_vec()); +} + +async fn get_latest( + blobs: &iroh_blobs::rpc::client::blobs::Client, + doc: &Doc, + key: &[u8], +) -> anyhow::Result> { + let query = Query::single_latest_per_key().key_exact(key); + let entry = doc + .get_many(query) + .await? + .next() + .await + .ok_or_else(|| anyhow!("entry not found"))??; + let content = blobs.read_to_bytes(entry.content_hash()).await?; + Ok(content.to_vec()) +} + +fn setup_logging() { + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr)) + .with(EnvFilter::from_default_env()) + .try_init() + .ok(); +} + +async fn next(mut stream: impl Stream> + Unpin) -> T { + let event = stream + .next() + .await + .expect("stream ended") + .expect("stream produced error"); + debug!("Event: {event:?}"); + event +} + +#[allow(clippy::type_complexity)] +fn apply_matchers(item: &T, matchers: &mut Vec bool + Send>>) -> bool { + for i in 0..matchers.len() { + if matchers[i](item) { + let _ = matchers.remove(i); + return true; + } + } + false +} + +/// Receive the next `matchers.len()` elements from a stream and matches them against the functions +/// in `matchers`, in order. +/// +/// Returns all received events. +#[allow(clippy::type_complexity)] +async fn assert_next( + mut stream: impl Stream> + Unpin + Send, + timeout: Duration, + matchers: Vec bool + Send>>, +) -> Vec { + let fut = async { + let mut items = vec![]; + for (i, f) in matchers.iter().enumerate() { + let item = stream + .next() + .await + .expect("event stream ended prematurely") + .expect("event stream errored"); + if !(f)(&item) { + panic!("assertion failed for event {i} {item:?}"); + } + items.push(item); + } + items + }; + let res = tokio::time::timeout(timeout, fut).await; + res.expect("timeout reached") +} + +/// Receive `matchers.len()` elements from a stream and assert that each element matches one of the +/// functions in `matchers`. +/// +/// Order of the matchers is not relevant. +/// +/// Returns all received events. +#[allow(clippy::type_complexity)] +async fn assert_next_unordered( + stream: impl Stream> + Unpin + Send, + timeout: Duration, + matchers: Vec bool + Send>>, +) -> Vec { + assert_next_unordered_with_optionals(stream, timeout, matchers, vec![]).await +} + +/// Receive between `min` and `max` elements from the stream and assert that each element matches +/// either one of the matchers in `required_matchers` or in `optional_matchers`. +/// +/// Order of the matchers is not relevant. +/// +/// Will return an error if: +/// * Any element fails to match one of the required or optional matchers +/// * More than `max` elements were received, but not all required matchers were used yet +/// * The timeout completes before all required matchers were used +/// +/// Returns all received events. +#[allow(clippy::type_complexity)] +async fn assert_next_unordered_with_optionals( + mut stream: impl Stream> + Unpin + Send, + timeout: Duration, + mut required_matchers: Vec bool + Send>>, + mut optional_matchers: Vec bool + Send>>, +) -> Vec { + let max = required_matchers.len() + optional_matchers.len(); + let required = required_matchers.len(); + // we have to use a mutex because rustc is not intelligent enough to realize + // that the mutable borrow terminates when the future completes + let events = Arc::new(parking_lot::Mutex::new(vec![])); + let fut = async { + while let Some(event) = stream.next().await { + let event = event.context("failed to read from stream")?; + let len = { + let mut events = events.lock(); + events.push(event.clone()); + events.len() + }; + if !apply_matchers(&event, &mut required_matchers) + && !apply_matchers(&event, &mut optional_matchers) + { + bail!("Event didn't match any matcher: {event:?}"); + } + if required_matchers.is_empty() || len == max { + break; + } + } + if !required_matchers.is_empty() { + bail!( + "Matched only {} of {required} required matchers", + required - required_matchers.len() + ); + } + Ok(()) + }; + tokio::pin!(fut); + let res = tokio::time::timeout(timeout, fut) + .await + .map_err(|_| anyhow!("Timeout reached ({timeout:?})")) + .and_then(|res| res); + let events = events.lock().clone(); + if let Err(err) = &res { + println!("Received events: {events:#?}"); + println!( + "Received {} events, expected between {required} and {max}", + events.len() + ); + panic!("Failed to receive or match all events: {err:?}"); + } + events +} + +/// Asserts that the event is a [`LiveEvent::SyncFinished`] and that the contained [`SyncEvent`] +/// has no error and matches `peer` and `namespace`. +fn match_sync_finished(event: &LiveEvent, peer: PublicKey) -> bool { + let LiveEvent::SyncFinished(e) = event else { + return false; + }; + e.peer == peer && e.result.is_ok() +} diff --git a/tests/util.rs b/tests/util.rs new file mode 100644 index 0000000..9dc47ba --- /dev/null +++ b/tests/util.rs @@ -0,0 +1,358 @@ +#![cfg(feature = "rpc")] +#![allow(dead_code)] +use std::{ + marker::PhantomData, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, + ops::Deref, + path::{Path, PathBuf}, + sync::Arc, +}; + +use iroh::{discovery::Discovery, dns::DnsResolver, key::SecretKey, NodeId, RelayMode}; +use iroh_blobs::{ + store::{GcConfig, Store as BlobStore}, + util::local_pool::LocalPool, +}; +use nested_enum_utils::enum_conversions; +use quic_rpc::transport::{Connector, Listener}; +use serde::{Deserialize, Serialize}; +use tokio_util::task::AbortOnDropHandle; + +/// Default bind address for the node. +/// 11204 is "iroh" in leetspeak +pub const DEFAULT_BIND_PORT: u16 = 11204; + +/// The default bind address for the iroh IPv4 socket. +pub const DEFAULT_BIND_ADDR_V4: SocketAddrV4 = + SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_BIND_PORT); + +/// The default bind address for the iroh IPv6 socket. +pub const DEFAULT_BIND_ADDR_V6: SocketAddrV6 = + SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_BIND_PORT + 1, 0, 0); + +/// An iroh node that just has the blobs transport +#[derive(Debug)] +pub struct Node { + router: iroh::protocol::Router, + client: Client, + store: S, + local_pool: LocalPool, + rpc_task: AbortOnDropHandle<()>, +} + +impl Deref for Node { + type Target = Client; + + fn deref(&self) -> &Self::Target { + &self.client + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[enum_conversions] +enum Request { + BlobsOrTags(iroh_blobs::rpc::proto::Request), + Docs(iroh_docs::rpc::proto::Request), +} + +#[derive(Debug, Serialize, Deserialize)] +#[enum_conversions] +enum Response { + BlobsOrTags(iroh_blobs::rpc::proto::Response), + Docs(iroh_docs::rpc::proto::Response), +} + +#[derive(Debug, Clone, Copy)] +struct Service; + +impl quic_rpc::Service for Service { + type Req = Request; + type Res = Response; +} + +#[derive(Debug, Clone)] +pub struct Client { + blobs: iroh_blobs::rpc::client::blobs::Client, + docs: iroh_docs::rpc::client::docs::Client, + authors: iroh_docs::rpc::client::authors::Client, +} + +impl Client { + fn new(client: quic_rpc::RpcClient) -> Self { + Self { + blobs: iroh_blobs::rpc::client::blobs::Client::new(client.clone().map().boxed()), + docs: iroh_docs::rpc::client::docs::Client::new(client.clone().map().boxed()), + authors: iroh_docs::rpc::client::authors::Client::new(client.map().boxed()), + } + } + + pub fn blobs(&self) -> &iroh_blobs::rpc::client::blobs::Client { + &self.blobs + } + + pub fn docs(&self) -> &iroh_docs::rpc::client::docs::Client { + &self.docs + } + + pub fn authors(&self) -> &iroh_docs::rpc::client::authors::Client { + &self.authors + } +} + +/// An iroh node builder +#[derive(derive_more::Debug)] +pub struct Builder { + path: Option, + secret_key: Option, + relay_mode: RelayMode, + dns_resolver: Option, + node_discovery: Option>, + gc_interval: Option, + #[debug(skip)] + register_gc_done_cb: Option>, + insecure_skip_relay_cert_verify: bool, + bind_random_port: bool, + _p: PhantomData, +} + +impl Builder { + /// Spawns the node + async fn spawn0(self, store: S) -> anyhow::Result> { + let mut addr_v4 = DEFAULT_BIND_ADDR_V4; + let mut addr_v6 = DEFAULT_BIND_ADDR_V6; + if self.bind_random_port { + addr_v4.set_port(0); + addr_v6.set_port(0); + } + let mut builder = iroh::Endpoint::builder() + .bind_addr_v4(addr_v4) + .bind_addr_v6(addr_v6) + .discovery_n0() + .relay_mode(self.relay_mode.clone()) + .insecure_skip_relay_cert_verify(self.insecure_skip_relay_cert_verify); + if let Some(dns_resolver) = self.dns_resolver.clone() { + builder = builder.dns_resolver(dns_resolver); + } + let endpoint = builder.bind().await?; + let addr = endpoint.node_addr().await?; + let local_pool = LocalPool::single(); + let mut router = iroh::protocol::Router::builder(endpoint.clone()); + + // Setup blobs + let downloader = iroh_blobs::downloader::Downloader::new( + store.clone(), + endpoint.clone(), + local_pool.handle().clone(), + ); + let blobs = Arc::new(iroh_blobs::net_protocol::Blobs::new( + store.clone(), + local_pool.handle().clone(), + Default::default(), + downloader.clone(), + endpoint.clone(), + )); + let gossip = iroh_gossip::net::Gossip::from_endpoint( + endpoint.clone(), + Default::default(), + &addr.info, + ); + let replica_store = match self.path { + Some(ref path) => iroh_docs::store::Store::persistent(path.join("docs.redb"))?, + None => iroh_docs::store::Store::memory(), + }; + let author_store = match self.path { + Some(ref path) => { + iroh_docs::engine::DefaultAuthorStorage::Persistent(path.join("default-author")) + } + None => iroh_docs::engine::DefaultAuthorStorage::Mem, + }; + let docs = match iroh_docs::engine::Engine::spawn( + endpoint, + gossip.clone(), + replica_store, + store.clone(), + downloader, + author_store, + local_pool.handle().clone(), + ) + .await + { + Ok(docs) => docs, + Err(err) => { + store.shutdown().await; + return Err(err); + } + }; + router = router.accept(iroh_blobs::ALPN, blobs.clone()); + router = router.accept(iroh_docs::ALPN, Arc::new(docs.clone())); + router = router.accept(iroh_gossip::net::GOSSIP_ALPN, Arc::new(gossip.clone())); + + // Build the router + let router = router.spawn().await?; + + // Setup RPC + let (internal_rpc, controller) = + quic_rpc::transport::flume::channel::(1); + let controller = controller.boxed(); + let internal_rpc = internal_rpc.boxed(); + let internal_rpc = quic_rpc::RpcServer::::new(internal_rpc); + + let docs2 = docs.clone(); + let blobs2 = blobs.clone(); + let rpc_task: tokio::task::JoinHandle<()> = tokio::task::spawn(async move { + loop { + let request = internal_rpc.accept().await; + match request { + Ok(accepting) => { + let blobs = blobs2.clone(); + let docs = docs2.clone(); + tokio::task::spawn(async move { + let (msg, chan) = accepting.read_first().await?; + match msg { + Request::BlobsOrTags(msg) => { + blobs.handle_rpc_request(msg, chan.map().boxed()).await?; + } + Request::Docs(msg) => { + docs.handle_rpc_request(msg, chan.map().boxed()).await?; + } + } + anyhow::Ok(()) + }); + } + Err(err) => { + tracing::warn!("rpc error: {:?}", err); + } + } + } + }); + + let client = quic_rpc::RpcClient::new(controller); + if let Some(period) = self.gc_interval { + blobs.add_protected(docs.protect_cb())?; + blobs.start_gc(GcConfig { + period, + done_callback: self.register_gc_done_cb, + })?; + } + + let client = Client::new(client); + Ok(Node { + router, + client, + store, + rpc_task: AbortOnDropHandle::new(rpc_task), + local_pool, + }) + } + + pub fn secret_key(mut self, value: SecretKey) -> Self { + self.secret_key = Some(value); + self + } + + pub fn relay_mode(mut self, value: RelayMode) -> Self { + self.relay_mode = value; + self + } + + pub fn dns_resolver(mut self, value: DnsResolver) -> Self { + self.dns_resolver = Some(value); + self + } + + pub fn node_discovery(mut self, value: Box) -> Self { + self.node_discovery = Some(value); + self + } + + pub fn gc_interval(mut self, value: Option) -> Self { + self.gc_interval = value; + self + } + + pub fn register_gc_done_cb(mut self, value: Box) -> Self { + self.register_gc_done_cb = Some(value); + self + } + + pub fn insecure_skip_relay_cert_verify(mut self, value: bool) -> Self { + self.insecure_skip_relay_cert_verify = value; + self + } + + pub fn bind_random_port(mut self) -> Self { + self.bind_random_port = true; + self + } + + fn new(path: Option) -> Self { + Self { + path, + secret_key: None, + relay_mode: RelayMode::Default, + gc_interval: None, + insecure_skip_relay_cert_verify: false, + bind_random_port: false, + dns_resolver: None, + node_discovery: None, + register_gc_done_cb: None, + _p: PhantomData, + } + } +} + +impl Node { + /// Creates a new node with memory storage + pub fn memory() -> Builder { + Builder::new(None) + } +} + +impl Builder { + /// Spawns the node + pub async fn spawn(self) -> anyhow::Result> { + let store = iroh_blobs::store::mem::Store::new(); + self.spawn0(store).await + } +} + +impl Node { + /// Creates a new node with persistent storage + pub fn persistent(path: impl AsRef) -> Builder { + let path = Some(path.as_ref().to_owned()); + Builder::new(path) + } +} + +impl Builder { + /// Spawns the node + pub async fn spawn(self) -> anyhow::Result> { + let store = iroh_blobs::store::fs::Store::load(self.path.clone().unwrap()).await?; + self.spawn0(store).await + } +} + +impl Node { + /// Returns the node id + pub fn node_id(&self) -> NodeId { + self.router.endpoint().node_id() + } + + /// Returns the blob store + pub fn blob_store(&self) -> &S { + &self.store + } + + /// Shuts down the node + pub async fn shutdown(self) -> anyhow::Result<()> { + self.router.shutdown().await?; + self.local_pool.shutdown().await; + self.rpc_task.abort(); + Ok(()) + } + + /// Returns the client + pub fn client(&self) -> &Client { + &self.client + } +}