From c2afad0634c188f5e57e431ad2d2ace6f72d018b Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 24 Apr 2025 10:43:28 -0700 Subject: [PATCH 01/20] qos_core: add tokio and tokio-vsock dependencies required for any async work --- src/Cargo.lock | 1 + src/qos_core/Cargo.toml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/Cargo.lock b/src/Cargo.lock index b9f7edb8..5ac31963 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2217,6 +2217,7 @@ dependencies = [ "rustls", "serde", "serde_bytes", + "tokio", "vsss-rs", "webpki-roots", ] diff --git a/src/qos_core/Cargo.toml b/src/qos_core/Cargo.toml index 4fcbd4cf..f3c8531a 100644 --- a/src/qos_core/Cargo.toml +++ b/src/qos_core/Cargo.toml @@ -21,6 +21,9 @@ aws-nitro-enclaves-nsm-api = { version = "0.4", default-features = false } serde_bytes = { version = "0.11", default-features = false } serde = { version = "1", features = ["derive"], default-features = false } +tokio = { version = "1.38.0", features = ["macros", "rt-multi-thread"], default-features = false } +tokio-vsock = { version = "0.7.1", default-features = false } + [dev-dependencies] qos_test_primitives = { path = "../qos_test_primitives" } qos_p256 = { path = "../qos_p256", features = ["mock"] } From 57982b029de98185b36e30b998e99626a0777875 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 24 Apr 2025 13:25:44 -0700 Subject: [PATCH 02/20] dependencies: upgrade and unify libc allows for tokio-vsock usage --- src/Cargo.lock | 438 +++++++++++++++++++++----------------- src/init/Cargo.lock | 460 ++++++++++++++++++++++++++++++++-------- src/qos_core/Cargo.toml | 6 +- 3 files changed, 615 insertions(+), 289 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 5ac31963..d2f68e75 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "addr2line" @@ -135,13 +135,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -194,7 +194,7 @@ checksum = "d92c1f4471b33f6a7af9ea421b249ed18a11c71156564baf6293148fa6ad1b09" dependencies = [ "libc", "log", - "nix", + "nix 0.26.4", "serde", "serde_bytes", "serde_cbor", @@ -312,7 +312,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.100", + "syn 2.0.87", "which", ] @@ -351,9 +351,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.7" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", "cfg_aliases", @@ -361,15 +361,16 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.7" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", + "syn_derive", ] [[package]] @@ -386,9 +387,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cc" @@ -434,7 +435,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -466,9 +467,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -490,15 +491,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -579,9 +580,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -589,27 +590,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -706,14 +707,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "dunce" -version = "1.0.5" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "ecdsa" @@ -741,9 +742,9 @@ dependencies = [ [[package]] name = "either" -version = "1.15.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elliptic-curve" @@ -811,7 +812,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -853,9 +854,9 @@ dependencies = [ [[package]] name = "flagset" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ea1ec5f8307826a5b71094dd91fc04d4ae75d5709b20ad351c7fb4815c86ec" +checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1" [[package]] name = "fnv" @@ -886,9 +887,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -901,9 +902,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -911,15 +912,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -928,38 +929,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1009,9 +1010,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -1105,12 +1106,6 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" - [[package]] name = "heapless" version = "0.8.0" @@ -1243,9 +1238,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.1" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1255,9 +1250,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.32" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -1414,7 +1409,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -1457,12 +1452,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.14.5", "serde", ] @@ -1481,7 +1476,7 @@ version = "0.1.0" dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", - "nix", + "nix 0.26.4", "qos_client", "qos_core", "qos_crypto", @@ -1522,18 +1517,18 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1570,12 +1565,12 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -1608,9 +1603,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loom" @@ -1655,6 +1650,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.17" @@ -1728,10 +1732,23 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset", + "memoffset 0.7.1", "pin-utils", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", +] + [[package]] name = "nom" version = "7.1.3" @@ -1768,9 +1785,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ "num-integer", "num-traits", @@ -1864,9 +1881,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" dependencies = [ "memchr", ] @@ -1968,7 +1985,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -2031,7 +2048,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -2110,12 +2127,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" @@ -2124,7 +2138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -2171,9 +2185,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -2208,7 +2222,7 @@ dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", "libc", - "nix", + "nix 0.26.4", "qos_crypto", "qos_hex", "qos_nsm", @@ -2218,6 +2232,7 @@ dependencies = [ "serde", "serde_bytes", "tokio", + "tokio-vsock", "vsss-rs", "webpki-roots", ] @@ -2323,9 +2338,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -2380,7 +2395,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.14", + "getrandom 0.2.10", ] [[package]] @@ -2394,18 +2409,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", @@ -2464,7 +2479,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.14", + "getrandom 0.2.10", "libc", "untrusted", "windows-sys 0.52.0", @@ -2557,9 +2572,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.26" +version = "0.23.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ "aws-lc-rs", "log", @@ -2660,9 +2675,9 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -2688,20 +2703,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" dependencies = [ "itoa", "memchr", @@ -2727,20 +2742,20 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "serde_with" -version = "3.11.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", + "indexmap 2.2.6", "serde", "serde_derive", "serde_json", @@ -2750,14 +2765,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.11.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -2932,15 +2947,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -2967,7 +2994,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3008,7 +3035,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3019,7 +3046,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3074,9 +3101,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" dependencies = [ "tinyvec_macros", ] @@ -3112,14 +3139,27 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", +] + +[[package]] +name = "tokio-vsock" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1824fc0300433f400df6b6264a9ab00ba93f39d38c3157fb5f05183476c4af10" +dependencies = [ + "bytes", + "futures", + "libc", + "tokio", + "vsock", ] [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -3127,7 +3167,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -3149,15 +3189,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -3228,9 +3268,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -3250,9 +3290,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.1" +version = "2.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" dependencies = [ "base64 0.22.1", "log", @@ -3287,11 +3327,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.13.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.2.10", ] [[package]] @@ -3302,9 +3342,19 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "vsock" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "4e8b4d00e672f147fc86a09738fadb1445bd1c0a40542378dfb82909deeee688" +dependencies = [ + "libc", + "nix 0.29.0", +] [[package]] name = "vsss-rs" @@ -3351,35 +3401,34 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", - "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3387,22 +3436,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "webpki" @@ -3416,9 +3465,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.10" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37493cadf42a2a939ed404698ded7fb378bf301b5011f973361779a3a74f8c93" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" dependencies = [ "rustls-pki-types", ] @@ -3485,7 +3534,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -3520,7 +3569,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3531,7 +3580,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3583,7 +3632,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -3603,18 +3652,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3634,9 +3683,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -3646,9 +3695,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -3658,15 +3707,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -3676,9 +3725,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -3688,9 +3737,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -3700,9 +3749,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -3712,9 +3761,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -3815,7 +3864,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "synstructure 0.13.1", ] @@ -3859,7 +3908,6 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "byteorder", "zerocopy-derive", ] @@ -3871,7 +3919,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3891,7 +3939,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "synstructure 0.13.1", ] @@ -3912,7 +3960,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -3934,5 +3982,5 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] diff --git a/src/init/Cargo.lock b/src/init/Cargo.lock index 482645c0..b4af5202 100644 --- a/src/init/Cargo.lock +++ b/src/init/Cargo.lock @@ -2,6 +2,21 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + [[package]] name = "aead" version = "0.5.2" @@ -97,12 +112,27 @@ checksum = "d92c1f4471b33f6a7af9ea421b249ed18a11c71156564baf6293148fa6ad1b09" dependencies = [ "libc", "log", - "nix", + "nix 0.26.4", "serde", "serde_bytes", "serde_cbor", ] +[[package]] +name = "backtrace" +version = "0.3.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + [[package]] name = "base16ct" version = "0.1.1" @@ -133,6 +163,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + [[package]] name = "bitvec" version = "1.0.1" @@ -156,9 +192,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.7" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", "cfg_aliases", @@ -166,15 +202,16 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.7" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", + "syn_derive", ] [[package]] @@ -189,6 +226,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + [[package]] name = "cc" version = "1.2.16" @@ -247,9 +290,9 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -300,9 +343,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -310,27 +353,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -485,9 +528,9 @@ dependencies = [ [[package]] name = "flagset" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ea1ec5f8307826a5b71094dd91fc04d4ae75d5709b20ad351c7fb4815c86ec" +checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1" [[package]] name = "fnv" @@ -501,6 +544,95 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -523,9 +655,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -542,6 +674,12 @@ dependencies = [ "polyval", ] +[[package]] +name = "gimli" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" + [[package]] name = "group" version = "0.12.1" @@ -708,9 +846,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -742,9 +880,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "memchr" @@ -761,6 +899,35 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi", + "windows-sys", +] + [[package]] name = "multiexp" version = "0.4.0" @@ -780,13 +947,26 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", - "memoffset", + "memoffset 0.7.1", "pin-utils", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset 0.9.1", +] + [[package]] name = "num" version = "0.4.3" @@ -868,11 +1048,20 @@ dependencies = [ "autocfg", ] +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" -version = "1.21.3" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" @@ -913,6 +1102,12 @@ dependencies = [ "base64ct", ] +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + [[package]] name = "pin-utils" version = "0.1.0" @@ -959,12 +1154,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "primeorder" @@ -1010,9 +1202,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -1032,13 +1224,15 @@ dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", "libc", - "nix", + "nix 0.26.4", "qos_crypto", "qos_hex", "qos_nsm", "qos_p256", "serde", "serde_bytes", + "tokio", + "tokio-vsock", "vsss-rs", ] @@ -1167,6 +1361,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + [[package]] name = "rustversion" version = "1.0.19" @@ -1215,9 +1415,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -1243,23 +1443,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", - "memchr", "ryu", "serde", ] @@ -1272,14 +1471,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] name = "serde_with" -version = "3.11.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ "base64", "chrono", @@ -1295,14 +1494,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.11.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -1352,6 +1551,25 @@ dependencies = [ "rand_core", ] +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys", +] + [[package]] name = "spin" version = "0.9.8" @@ -1422,15 +1640,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "tap" version = "1.0.1" @@ -1454,7 +1684,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -1488,11 +1718,51 @@ dependencies = [ "time-core", ] +[[package]] +name = "tokio" +version = "1.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "tokio-vsock" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1824fc0300433f400df6b6264a9ab00ba93f39d38c3157fb5f05183476c4af10" +dependencies = [ + "bytes", + "futures", + "libc", + "tokio", + "vsock", +] + [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -1535,9 +1805,19 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "vsock" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "4e8b4d00e672f147fc86a09738fadb1445bd1c0a40542378dfb82909deeee688" +dependencies = [ + "libc", + "nix 0.29.0", +] [[package]] name = "vsss-rs" @@ -1566,35 +1846,34 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", - "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1602,22 +1881,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "webpki" @@ -1649,9 +1928,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -1665,51 +1944,51 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -1747,7 +2026,6 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ - "byteorder", "zerocopy-derive", ] @@ -1759,7 +2037,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] [[package]] @@ -1779,5 +2057,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.87", ] diff --git a/src/qos_core/Cargo.toml b/src/qos_core/Cargo.toml index f3c8531a..33d9f373 100644 --- a/src/qos_core/Cargo.toml +++ b/src/qos_core/Cargo.toml @@ -21,8 +21,8 @@ aws-nitro-enclaves-nsm-api = { version = "0.4", default-features = false } serde_bytes = { version = "0.11", default-features = false } serde = { version = "1", features = ["derive"], default-features = false } -tokio = { version = "1.38.0", features = ["macros", "rt-multi-thread"], default-features = false } -tokio-vsock = { version = "0.7.1", default-features = false } +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time"], default-features = false } +tokio-vsock = { version = "0.7.1", optional = true } [dev-dependencies] qos_test_primitives = { path = "../qos_test_primitives" } @@ -33,6 +33,6 @@ webpki-roots = { version = "0.26.1" } [features] # Support for VSOCK -vm = [] +vm = ["tokio-vsock"] # Never use in production - support for mock NSM mock = ["qos_nsm/mock"] From 4d74472b66238125dbc493c4f302d2cf7e5067b1 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 24 Apr 2025 13:26:51 -0700 Subject: [PATCH 03/20] update MSRV to 1.86 minimum required bumped due to tokio and tokio-vsock --- src/rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rust-toolchain.toml b/src/rust-toolchain.toml index f6ae0fd5..b6b5ef05 100644 --- a/src/rust-toolchain.toml +++ b/src/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.81" +channel = "1.86" components = ["rustfmt", "cargo", "clippy", "rust-analyzer"] profile = "minimal" From 70709ccb2637b1d63e572609d957fe29c96b9989 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 24 Apr 2025 13:35:37 -0700 Subject: [PATCH 04/20] implement async qos_net::AsyncProxy this includes: - Reaper::execute_async [qos_core] - AsyncStream [qos_core] - AsyncPool/AsyncStream [qos_core] - AsyncListener [qos_core] - AsyncRequestProcessor [qos_core] - AsyncProxyConnection [qos_net] - AsyncProxy [qos_net] - AsyncProxyStream [qos_net] - AsyncHostServer [qos_host] - integration tests additions as well as fixing qos_next's qos_core "vm" feature dependency issue by only requiring it if qos_net is requested with it --- src/Cargo.lock | 24 ++ src/Makefile | 2 + src/init/Cargo.lock | 12 + src/init/Cargo.toml | 4 + src/init/init.rs | 77 +++- src/integration/Cargo.toml | 3 +- .../src/bin/pivot_async_remote_tls.rs | 136 +++++++ src/integration/src/lib.rs | 5 + src/integration/tests/async_remote_tls.rs | 94 +++++ src/qos_core/Cargo.toml | 11 +- src/qos_core/src/async_client.rs | 28 ++ src/qos_core/src/async_server.rs | 83 ++++ src/qos_core/src/bin/async_qos_core.rs | 11 + src/qos_core/src/cli.rs | 98 ++++- src/qos_core/src/io/async_pool.rs | 152 +++++++ src/qos_core/src/io/async_stream.rs | 378 ++++++++++++++++++ src/qos_core/src/io/mod.rs | 41 +- src/qos_core/src/io/stream.rs | 18 + src/qos_core/src/lib.rs | 12 + src/qos_core/src/protocol/async_processor.rs | 100 +++++ src/qos_core/src/protocol/async_state.rs | 1 + src/qos_core/src/protocol/mod.rs | 5 +- src/qos_core/src/protocol/processor.rs | 40 +- src/qos_core/src/protocol/services/boot.rs | 42 +- src/qos_core/src/protocol/services/genesis.rs | 10 +- src/qos_core/src/protocol/services/key.rs | 81 +--- .../src/protocol/services/provision.rs | 8 +- src/qos_core/src/protocol/state.rs | 59 +-- src/qos_core/src/reaper.rs | 112 ++++++ src/qos_enclave/src/main.rs | 4 +- src/qos_host/Cargo.toml | 1 + src/qos_host/src/async_host.rs | 275 +++++++++++++ src/qos_host/src/cli.rs | 60 ++- src/qos_host/src/lib.rs | 2 + src/qos_net/Cargo.toml | 17 +- src/qos_net/src/async_cli.rs | 35 ++ src/qos_net/src/async_proxy.rs | 193 +++++++++ src/qos_net/src/async_proxy_connection.rs | 132 ++++++ src/qos_net/src/async_proxy_stream.rs | 148 +++++++ src/qos_net/src/bin/async_qos_net.rs | 17 + src/qos_net/src/cli.rs | 89 ++++- src/qos_net/src/error.rs | 4 +- src/qos_net/src/lib.rs | 18 +- src/qos_net/src/main.rs | 6 +- src/qos_nsm/src/nsm.rs | 2 +- src/qos_system/src/lib.rs | 29 +- src/qos_test_primitives/Cargo.toml | 1 + src/qos_test_primitives/src/lib.rs | 13 + src/rust-toolchain.toml | 2 +- 49 files changed, 2481 insertions(+), 214 deletions(-) create mode 100644 src/integration/src/bin/pivot_async_remote_tls.rs create mode 100644 src/integration/tests/async_remote_tls.rs create mode 100644 src/qos_core/src/async_client.rs create mode 100644 src/qos_core/src/async_server.rs create mode 100644 src/qos_core/src/bin/async_qos_core.rs create mode 100644 src/qos_core/src/io/async_pool.rs create mode 100644 src/qos_core/src/io/async_stream.rs create mode 100644 src/qos_core/src/protocol/async_processor.rs create mode 100644 src/qos_core/src/protocol/async_state.rs create mode 100644 src/qos_host/src/async_host.rs create mode 100644 src/qos_net/src/async_cli.rs create mode 100644 src/qos_net/src/async_proxy.rs create mode 100644 src/qos_net/src/async_proxy_connection.rs create mode 100644 src/qos_net/src/async_proxy_stream.rs create mode 100644 src/qos_net/src/bin/async_qos_net.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index d2f68e75..9999ce44 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1490,6 +1490,7 @@ dependencies = [ "rustls", "serde", "tokio", + "tokio-rustls", "ureq", "webpki-roots", ] @@ -2221,6 +2222,7 @@ version = "0.1.0" dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", + "futures", "libc", "nix 0.26.4", "qos_crypto", @@ -2275,6 +2277,7 @@ version = "0.1.0" dependencies = [ "borsh", "chunked_transfer", + "futures", "hickory-resolver", "httparse", "qos_core", @@ -2324,6 +2327,7 @@ dependencies = [ name = "qos_test_primitives" version = "0.1.0" dependencies = [ + "nix 0.26.4", "rand 0.8.5", ] @@ -2832,6 +2836,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "1.6.4" @@ -3126,6 +3139,7 @@ dependencies = [ "mio", "num_cpus", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.48.0", @@ -3142,6 +3156,16 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-vsock" version = "0.7.1" diff --git a/src/Makefile b/src/Makefile index 3f6d365a..7ee92a72 100644 --- a/src/Makefile +++ b/src/Makefile @@ -159,6 +159,8 @@ test: @# The integration tests rely on binaries from other crates being built, so @# we build all the workspace targets. cargo build --all + @# We also need the async version of qos_net + cargo build --bin async_qos_net --features async_proxy @# Run tests cargo test @# When we build the workspace it resolves with the qos_core mock feature diff --git a/src/init/Cargo.lock b/src/init/Cargo.lock index b4af5202..244c48ee 100644 --- a/src/init/Cargo.lock +++ b/src/init/Cargo.lock @@ -827,6 +827,7 @@ dependencies = [ "qos_core", "qos_nsm", "qos_system", + "tokio", ] [[package]] @@ -1223,6 +1224,7 @@ version = "0.1.0" dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", + "futures", "libc", "nix 0.26.4", "qos_crypto", @@ -1541,6 +1543,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "2.0.0" @@ -1729,6 +1740,7 @@ dependencies = [ "libc", "mio", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "windows-sys", diff --git a/src/init/Cargo.toml b/src/init/Cargo.toml index ac1ecf97..dc09104d 100644 --- a/src/init/Cargo.toml +++ b/src/init/Cargo.toml @@ -10,7 +10,11 @@ qos_aws = { path = "../qos_aws"} qos_system = { path = "../qos_system"} qos_core = { path = "../qos_core", features = ["vm"], default-features = false } qos_nsm = { path = "../qos_nsm", default-features = false } +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false, optional = true} [[bin]] name = "init" path = "init.rs" + +[features] +async = ["qos_core/async", "tokio"] diff --git a/src/init/init.rs b/src/init/init.rs index 39f0a6cc..5e1d7653 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -1,18 +1,14 @@ -use qos_system::{dmesg, get_local_cid, freopen, mount, reboot}; use qos_core::{ - handles::Handles, - io::{SocketAddress, VMADDR_NO_FLAGS}, - reaper::Reaper, - EPHEMERAL_KEY_FILE, - MANIFEST_FILE, - PIVOT_FILE, - QUORUM_FILE, - SEC_APP_SOCK, + handles::Handles, + io::{SocketAddress, VMADDR_NO_FLAGS}, + reaper::Reaper, + EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; use qos_nsm::Nsm; +use qos_system::{dmesg, freopen, get_local_cid, mount, reboot}; //TODO: Feature flag -use qos_aws::{init_platform}; +use qos_aws::init_platform; // Mount common filesystems with conservative permissions fn init_rootfs() { @@ -58,6 +54,7 @@ fn boot() { init_platform(); } +#[cfg(not(feature = "async"))] fn main() { boot(); dmesg("QuorumOS Booted".to_string()); @@ -66,18 +63,60 @@ fn main() { dmesg(format!("CID is {}", cid)); let handles = Handles::new( - EPHEMERAL_KEY_FILE.to_string(), - QUORUM_FILE.to_string(), - MANIFEST_FILE.to_string(), - PIVOT_FILE.to_string(), + EPHEMERAL_KEY_FILE.to_string(), + QUORUM_FILE.to_string(), + MANIFEST_FILE.to_string(), + PIVOT_FILE.to_string(), ); + Reaper::execute( - &handles, - Box::new(Nsm), - SocketAddress::new_vsock(cid, 3, VMADDR_NO_FLAGS), - SocketAddress::new_unix(SEC_APP_SOCK), - None, + &handles, + Box::new(Nsm), + SocketAddress::new_vsock(cid, 3, VMADDR_NO_FLAGS), + SocketAddress::new_unix(SEC_APP_SOCK), + None, + ); + + reboot(); +} + +#[cfg(feature = "async")] +#[tokio::main] +async fn main() { + use qos_core::io::{AsyncStreamPool, TimeVal, TimeValLike}; + + boot(); + dmesg("QuorumOS Booted in Async mode".to_string()); + + let cid = get_local_cid().unwrap(); + dmesg(format!("CID is {}", cid)); + + let handles = Handles::new( + EPHEMERAL_KEY_FILE.to_string(), + QUORUM_FILE.to_string(), + MANIFEST_FILE.to_string(), + PIVOT_FILE.to_string(), ); + let start_port = 3; + let default_pool_size = qos_core::DEFAULT_POOL_SIZE + .parse() + .expect("unable to parse default pool size"); + let core_pool = AsyncStreamPool::new( + (start_port..start_port + default_pool_size) + .into_iter() + .map(|p| SocketAddress::new_vsock(cid, p, VMADDR_NO_FLAGS)), + TimeVal::seconds(0), + ); + + let app_pool = AsyncStreamPool::new( + (0..default_pool_size) + .into_iter() + .map(|p| SocketAddress::new_unix(&format!("{SEC_APP_SOCK}_{p}"))), + TimeVal::seconds(5), + ); + + Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None); + reboot(); } diff --git a/src/integration/Cargo.toml b/src/integration/Cargo.toml index 618f04e5..00ae7a66 100644 --- a/src/integration/Cargo.toml +++ b/src/integration/Cargo.toml @@ -9,7 +9,7 @@ qos_core = { path = "../qos_core", features = ["mock"], default-features = false qos_nsm = { path = "../qos_nsm", features = ["mock"], default-features = false } qos_host = { path = "../qos_host", default-features = false } qos_client = { path = "../qos_client", default-features = false } -qos_net = { path = "../qos_net", default-features = false } +qos_net = { path = "../qos_net", features = ["proxy", "async_proxy"], default-features = false } qos_crypto = { path = "../qos_crypto" } qos_hex = { path = "../qos_hex" } qos_p256 = { path = "../qos_p256", features = ["mock"] } @@ -19,6 +19,7 @@ tokio = { version = "1.38.0", features = ["macros", "rt-multi-thread"], default- borsh = { version = "1.0", features = ["std", "derive"] , default-features = false} nix = { version = "0.26", features = ["socket"], default-features = false } rustls = { version = "0.23.5" } +tokio-rustls = { version = "0.26.2" } webpki-roots = { version = "0.26.1" } [dev-dependencies] diff --git a/src/integration/src/bin/pivot_async_remote_tls.rs b/src/integration/src/bin/pivot_async_remote_tls.rs new file mode 100644 index 00000000..bee5d9fa --- /dev/null +++ b/src/integration/src/bin/pivot_async_remote_tls.rs @@ -0,0 +1,136 @@ +use core::panic; +use std::{io::ErrorKind, sync::Arc}; + +use borsh::BorshDeserialize; +use integration::PivotRemoteTlsMsg; +use qos_core::{ + async_server::{AsyncRequestProcessor, AsyncSocketServer}, + io::{ + AsyncStreamPool, SharedAsyncStreamPool, SocketAddress, TimeVal, + TimeValLike, + }, + protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, +}; +use qos_net::async_proxy_stream::AsyncProxyStream; +use rustls::RootCertStore; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio_rustls::TlsConnector; + +#[derive(Clone)] +struct Processor { + net_pool: SharedAsyncStreamPool, +} + +impl Processor { + fn new(net_pool: SharedAsyncStreamPool) -> Self { + Processor { net_pool } + } +} + +impl AsyncRequestProcessor for Processor { + async fn process(&self, request: Vec) -> Vec { + let msg = PivotRemoteTlsMsg::try_from_slice(&request) + .expect("Received invalid message - test is broken!"); + + match msg { + PivotRemoteTlsMsg::RemoteTlsRequest { host, path } => { + let pool = self.net_pool.read().await; + let mut stream = AsyncProxyStream::connect_by_name( + pool.get().await, + host.clone(), + 443, + vec!["8.8.8.8".to_string()], + 53, + ) + .await + .unwrap(); + + let root_store = RootCertStore { + roots: webpki_roots::TLS_SERVER_ROOTS.into(), + }; + let server_name: rustls::pki_types::ServerName<'_> = + host.clone().try_into().unwrap(); + let config: rustls::ClientConfig = + rustls::ClientConfig::builder() + .with_root_certificates(root_store) + .with_no_client_auth(); + let conn = TlsConnector::from(Arc::new(config)); + let mut tls = conn + .connect(server_name, &mut stream) + .await + .expect("tls unable to establish connection"); + + let http_request = + format!("GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n"); + + tls.write_all(http_request.as_bytes()).await.unwrap(); + + let mut response_bytes = Vec::new(); + let read_to_end_result = + tls.read_to_end(&mut response_bytes).await; + match read_to_end_result { + Ok(read_size) => { + assert!(read_size > 0); + // Refresh the connection for additional calls + stream.refresh().await.unwrap(); + } + Err(e) => { + // Only EOF errors are expected. This means the + // connection was closed by the remote server https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof + if e.kind() != ErrorKind::UnexpectedEof { + panic!( + "unexpected error trying to read_to_end: {e:?}" + ); + } + } + } + + let fetched_content = + std::str::from_utf8(&response_bytes).unwrap(); + borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsResponse(format!( + "Content fetched successfully: {fetched_content}" + ))) + .expect("RemoteTlsResponse is valid borsh") + } + PivotRemoteTlsMsg::RemoteTlsResponse(_) => { + panic!("Unexpected RemoteTlsResponse - test is broken") + } + } + } +} + +#[tokio::main] +async fn main() { + // Parse args: + // - first argument is the socket to bind to (normal server server) + // - second argument is the socket to use for remote proxying + let args: Vec = std::env::args().collect(); + + let socket_path: &String = &args[1]; + let proxy_path: &String = &args[2]; + + let pool = AsyncStreamPool::new( + std::iter::once(SocketAddress::new_unix(socket_path)), + TimeVal::seconds(0), // listener, no timeout + ); + + let proxy_pool = AsyncStreamPool::new( + std::iter::once(SocketAddress::new_unix(proxy_path)), + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + ) + .shared(); + + let tasks = + AsyncSocketServer::listen_all(pool, &Processor::new(proxy_pool)) + .unwrap(); + + match tokio::signal::ctrl_c().await { + Ok(_) => { + eprintln!("pivot handling ctrl+c the tokio way"); + for task in tasks { + task.abort(); + } + } + Err(err) => panic!("{err}"), + } +} diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index 83aefe12..6b727c11 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -28,7 +28,12 @@ pub const PIVOT_PANIC_PATH: &str = "../target/debug/pivot_panic"; /// Path to an enclave app that has routes to test remote connection features. pub const PIVOT_REMOTE_TLS_PATH: &str = "../target/debug/pivot_remote_tls"; /// Path to an enclave app that has routes to test remote connection features. +pub const PIVOT_ASYNC_REMOTE_TLS_PATH: &str = + "../target/debug/pivot_async_remote_tls"; +/// Path to an enclave app that has routes to test remote connection features. pub const QOS_NET_PATH: &str = "../target/debug/qos_net"; +/// Path to an enclave app that has routes to test async remote connection features. +pub const ASYNC_QOS_NET_PATH: &str = "../target/debug/async_qos_net"; /// Path to an enclave app that has routes to stress our socket. pub const PIVOT_SOCKET_STRESS_PATH: &str = "../target/debug/pivot_socket_stress"; diff --git a/src/integration/tests/async_remote_tls.rs b/src/integration/tests/async_remote_tls.rs new file mode 100644 index 00000000..b5b39998 --- /dev/null +++ b/src/integration/tests/async_remote_tls.rs @@ -0,0 +1,94 @@ +use std::{process::Command, time::Duration}; + +use borsh::BorshDeserialize; +use integration::{ + PivotRemoteTlsMsg, ASYNC_QOS_NET_PATH, PIVOT_ASYNC_REMOTE_TLS_PATH, +}; +use qos_core::{ + async_client::AsyncClient, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, +}; + +use qos_test_primitives::ChildWrapper; + +const REMOTE_TLS_TEST_NET_PROXY_SOCKET: &str = + "/tmp/async_remote_tls_test.net.sock"; +const REMOTE_TLS_TEST_ENCLAVE_SOCKET: &str = + "/tmp/async_remote_tls_test.enclave.sock"; +const POOL_SIZE: &str = "1"; + +#[tokio::test] +async fn fetch_async_remote_tls_content() { + let _net_proxy: ChildWrapper = Command::new(ASYNC_QOS_NET_PATH) + .arg("--usock") + .arg(REMOTE_TLS_TEST_NET_PROXY_SOCKET) + .arg("--pool-size") + .arg(POOL_SIZE) + .spawn() + .unwrap() + .into(); + + // because qos_net's listen call uses a pool it will create a "_X" suffix, we just point to the 1st and only + // listening socket in the pool here + let socket_net_path = format!("{REMOTE_TLS_TEST_NET_PROXY_SOCKET}_0"); + + let _enclave_app: ChildWrapper = Command::new(PIVOT_ASYNC_REMOTE_TLS_PATH) + .arg(REMOTE_TLS_TEST_ENCLAVE_SOCKET) + .arg(&socket_net_path) + .spawn() + .unwrap() + .into(); + + // ensure the enclave socket is created by qos_net before proceeding + while !std::fs::exists(REMOTE_TLS_TEST_ENCLAVE_SOCKET).unwrap() { + tokio::time::sleep(Duration::from_millis(50)).await; + } + + let enclave_pool = AsyncStreamPool::new( + std::iter::once(SocketAddress::new_unix( + REMOTE_TLS_TEST_ENCLAVE_SOCKET, + )), + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + ); + + let enclave_client = AsyncClient::new(enclave_pool.shared()); + + let app_request = borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsRequest { + host: "api.turnkey.com".to_string(), + path: "/health".to_string(), + }) + .unwrap(); + + let response = enclave_client.call(&app_request).await.unwrap(); + let response_text = + match PivotRemoteTlsMsg::try_from_slice(&response).unwrap() { + PivotRemoteTlsMsg::RemoteTlsResponse(s) => s, + PivotRemoteTlsMsg::RemoteTlsRequest { host: _, path: _ } => { + panic!("unexpected RemoteTlsRequest sent as response") + } + }; + + assert!(response_text.contains("Content fetched successfully")); + assert!(response_text.contains("HTTP/1.1 200 OK")); + assert!(response_text.contains("currentTime")); + + let app_request = borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsRequest { + host: "www.googleapis.com".to_string(), + path: "/oauth2/v3/certs".to_string(), + }) + .unwrap(); + + let response = enclave_client.call(&app_request).await.unwrap(); + let response_text = + match PivotRemoteTlsMsg::try_from_slice(&response).unwrap() { + PivotRemoteTlsMsg::RemoteTlsResponse(s) => s, + PivotRemoteTlsMsg::RemoteTlsRequest { host: _, path: _ } => { + panic!("unexpected RemoteTlsRequest sent as response") + } + }; + + assert!(response_text.contains("Content fetched successfully")); + assert!(response_text.contains("HTTP/1.1 200 OK")); + assert!(response_text.contains("keys")); +} diff --git a/src/qos_core/Cargo.toml b/src/qos_core/Cargo.toml index 33d9f373..756f5296 100644 --- a/src/qos_core/Cargo.toml +++ b/src/qos_core/Cargo.toml @@ -21,7 +21,8 @@ aws-nitro-enclaves-nsm-api = { version = "0.4", default-features = false } serde_bytes = { version = "0.11", default-features = false } serde = { version = "1", features = ["derive"], default-features = false } -tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time"], default-features = false } +futures = { version = "0.3.30" } +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false, optional = true} tokio-vsock = { version = "0.7.1", optional = true } [dev-dependencies] @@ -32,7 +33,13 @@ rustls = { version = "0.23.5" } webpki-roots = { version = "0.26.1" } [features] +async = ["tokio", "tokio-vsock"] # Support for VSOCK -vm = ["tokio-vsock"] +vm = [] # Never use in production - support for mock NSM mock = ["qos_nsm/mock"] + +[[bin]] +name = "async_qos_core" +path = "src/bin/async_qos_core.rs" +required-features = ["async"] diff --git a/src/qos_core/src/async_client.rs b/src/qos_core/src/async_client.rs new file mode 100644 index 00000000..04c46682 --- /dev/null +++ b/src/qos_core/src/async_client.rs @@ -0,0 +1,28 @@ +//! Streaming socket based client to connect with +//! [`crate::server::SocketServer`]. + +use crate::{client::ClientError, io::SharedAsyncStreamPool}; + +/// Client for communicating with the enclave `crate::server::SocketServer`. +#[derive(Clone, Debug)] +pub struct AsyncClient { + pool: SharedAsyncStreamPool, +} + +impl AsyncClient { + /// Create a new client. + #[must_use] + pub fn new(pool: SharedAsyncStreamPool) -> Self { + Self { pool } + } + + /// Send raw bytes and wait for a response until the clients configured + /// timeout. + pub async fn call(&self, request: &[u8]) -> Result, ClientError> { + let pool = self.pool.read().await; + let mut stream = pool.get().await; + + let resp = stream.call(request).await?; + Ok(resp) + } +} diff --git a/src/qos_core/src/async_server.rs b/src/qos_core/src/async_server.rs new file mode 100644 index 00000000..426375bc --- /dev/null +++ b/src/qos_core/src/async_server.rs @@ -0,0 +1,83 @@ +//! Streaming socket based server for use in an enclave. Listens for connections +//! from [`crate::client::Client`]. + +use tokio::task::JoinHandle; + +use crate::{ + io::{AsyncListener, AsyncStreamPool, IOError}, + server::SocketServerError, +}; + +/// Something that can process requests in an async way. +pub trait AsyncRequestProcessor: Send { + /// Process an incoming request and return a response in async. + /// + /// The request and response are raw bytes. Likely this should be encoded + /// data and logic inside of this function should take care of decoding the + /// request and encoding a response. + fn process( + &self, + request: Vec, + ) -> impl std::future::Future> + Send; +} + +/// A bare bones, socket based server. +pub struct AsyncSocketServer; + +impl AsyncSocketServer { + /// Listen and respond to incoming requests on all the pool's addresses with the given `processor`. + /// *NOTE*: the `POOL_SIZE` must match on both sides, since we expect ALL sockets to be connected + /// to right away (e.g. not on first use). The client side connect (above) will always connect them all. + /// This method returns a list of tasks that are running as part of this listener. `JoinHandle::abort()` + /// should be called on each when the program exists (e.g. on ctrl+c) + pub fn listen_all( + pool: AsyncStreamPool, + processor: &R, + ) -> Result>>, SocketServerError> + where + R: AsyncRequestProcessor + 'static + Clone, + { + println!("`AsyncSocketServer` listening on pool size {}", pool.len()); + + let listeners = pool.listen()?; + + let mut tasks = Vec::new(); + for listener in listeners { + let p = processor.clone(); + let task = + tokio::spawn(async move { accept_loop(listener, p).await }); + + tasks.push(task); + } + + Ok(tasks) + } +} + +async fn accept_loop

( + listener: AsyncListener, + processor: P, +) -> Result<(), SocketServerError> +where + P: AsyncRequestProcessor + Clone, +{ + loop { + let mut stream = listener.accept().await?; + loop { + match stream.recv().await { + Ok(payload) => { + let response = processor.process(payload).await; + stream.send(&response).await?; + } + Err(err) => match err { + IOError::StdIoError(err) => { + if err.kind() == std::io::ErrorKind::UnexpectedEof { + break; // just re-accept + } + } + _ => return Err(err.into()), + }, + } + } + } +} diff --git a/src/qos_core/src/bin/async_qos_core.rs b/src/qos_core/src/bin/async_qos_core.rs new file mode 100644 index 00000000..59183526 --- /dev/null +++ b/src/qos_core/src/bin/async_qos_core.rs @@ -0,0 +1,11 @@ +use qos_core::cli::CLI; + +fn main() { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("tokio main to run") + .block_on(async { + CLI::async_execute(); + }); +} diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index e4526e5c..5d0ba782 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -9,9 +9,13 @@ use crate::{ io::SocketAddress, parser::{GetParserForOptions, OptionsParser, Parser, Token}, reaper::Reaper, - EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, + DEFAULT_POOL_SIZE, EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, + QUORUM_FILE, SEC_APP_SOCK, }; +#[cfg(feature = "async")] +use crate::io::AsyncStreamPool; + /// "cid" pub const CID: &str = "cid"; /// "port" @@ -28,6 +32,8 @@ pub const EPHEMERAL_FILE_OPT: &str = "ephemeral-file"; /// Name for the option to specify the manifest file. pub const MANIFEST_FILE_OPT: &str = "manifest-file"; const APP_USOCK: &str = "app-usock"; +/// Name for the option to specify the maximum `AsyncPool` size. +pub const POOL_SIZE: &str = "pool-size"; /// CLI options for starting up the enclave server. #[derive(Default, Clone, Debug, PartialEq)] @@ -44,11 +50,55 @@ impl EnclaveOpts { Self { parsed } } + /// Create a new [`AsyncPool`] of [`AsyncStream`] using the list of [`SocketAddress`] for the enclave server and + /// return the new [`AsyncPool`]. Analogous to [`Self::addr`] and [`Self::app_addr`] depending on the [`app`] parameter. + #[cfg(feature = "async")] + #[allow(unused)] + fn async_pool(&self, app: bool) -> AsyncStreamPool { + use nix::sys::time::{TimeVal, TimeValLike}; + + let pool_size: u32 = self + .parsed + .single(POOL_SIZE) + .expect("invalid pool options") + .parse() + .expect("invalid pool_size specified"); + let usock_param = if app { APP_USOCK } else { USOCK }; + + match ( + self.parsed.single(CID), + self.parsed.single(PORT), + self.parsed.single(usock_param), + ) { + #[cfg(feature = "vm")] + (Some(c), Some(p), None) => { + let c = c.parse::().unwrap(); + let start_port = p.parse::().unwrap(); + + let addresses = (start_port..start_port + pool_size).map(|p| { + SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS) + }); + + AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + } + (None, None, Some(u)) => { + let addresses = (0..pool_size).map(|i| { + let u = format!("{u}_{i}"); // add _X suffix for pooling + SocketAddress::new_unix(&u) + }); + + AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + } + _ => panic!("Invalid socket opts"), + } + } + /// Get the `SocketAddress` for the enclave server. /// /// # Panics /// /// Panics if the opts are not valid for exactly one of unix or vsock. + #[allow(unused)] fn addr(&self) -> SocketAddress { match ( self.parsed.single(CID), @@ -66,6 +116,7 @@ impl EnclaveOpts { } } + #[allow(unused)] fn app_addr(&self) -> SocketAddress { SocketAddress::new_unix( self.parsed @@ -149,6 +200,32 @@ impl CLI { ); } } + + /// Execute the enclave server CLI with the environment args using tokio/async + #[cfg(feature = "async")] + pub fn async_execute() { + let mut args: Vec = env::args().collect(); + let opts = EnclaveOpts::new(&mut args); + + if opts.parsed.version() { + println!("version: {}", env!("CARGO_PKG_VERSION")); + } else if opts.parsed.help() { + println!("{}", opts.parsed.info()); + } else { + Reaper::async_execute( + &Handles::new( + opts.ephemeral_file(), + opts.quorum_file(), + opts.manifest_file(), + opts.pivot_file(), + ), + opts.nsm(), + opts.async_pool(false), + opts.async_pool(true), + None, + ); + } + } } /// Parser for enclave CLI @@ -201,6 +278,11 @@ impl GetParserForOptions for EnclaveParser { .takes_value(true) .default_value(SEC_APP_SOCK) ) + .token( + Token::new(POOL_SIZE, "the pool size for use with all socket types") + .takes_value(true) + .default_value(DEFAULT_POOL_SIZE) + ) } } @@ -281,6 +363,20 @@ mod test { assert_eq!(opts.addr(), SocketAddress::new_unix("./test.sock")); } + #[test] + #[cfg(feature = "async")] + fn parse_pool_size() { + let mut args: Vec<_> = + vec!["binary", "--usock", "./test.sock", "--pool-size", "7"] + .into_iter() + .map(String::from) + .collect(); + let opts = EnclaveOpts::new(&mut args); + + let pool = opts.async_pool(false); + assert_eq!(pool.len(), 7); + } + #[test] fn parse_manifest_file() { let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] diff --git a/src/qos_core/src/io/async_pool.rs b/src/qos_core/src/io/async_pool.rs new file mode 100644 index 00000000..e096caa4 --- /dev/null +++ b/src/qos_core/src/io/async_pool.rs @@ -0,0 +1,152 @@ +use std::sync::Arc; + +use nix::sys::time::TimeVal; +use tokio::sync::{Mutex, MutexGuard, RwLock}; + +use super::{AsyncListener, AsyncStream, IOError, SocketAddress}; + +/// Socket Pool Errors +#[derive(Debug)] +pub enum PoolError { + /// No addresses were provided in the pool constructor + NoAddressesSpecified, +} + +/// Generic Async pool using tokio Mutex +#[derive(Debug)] +struct AsyncPool { + handles: Vec>, +} + +/// Specialization of `AsyncPool` with `AsyncStream` and connection/liste logic. +#[derive(Debug)] +pub struct AsyncStreamPool { + addresses: Vec, // local copy used for `listen` only TODO: refactor listeners out of pool + pool: AsyncPool, +} + +/// Helper type to wrap `AsyncStreamPool` in `Arc` and `RwLock`. Used to allow multiple processors to run across IO +/// await points without locking the whole set. +pub type SharedAsyncStreamPool = Arc>; + +impl AsyncStreamPool { + /// Create a new `AsyncStreamPool` which will contain all the known addresses but no connections yet. + /// Includes the connect timeout which gets used in case `get` gets called. + pub fn new( + addresses: impl IntoIterator, + timeout: TimeVal, + ) -> Self { + let addresses: Vec = addresses.into_iter().collect(); + + // TODO: DEBUG remove + for addr in &addresses { + println!("pool address: {:?}", addr.debug_info()); + } + + let streams: Vec = + addresses.iter().map(|a| AsyncStream::new(a, timeout)).collect(); + + let pool = AsyncPool::from(streams); + + Self { addresses, pool } + } + + /// Helper function to get the Arc and Mutex wrapping + #[must_use] + pub fn shared(self) -> SharedAsyncStreamPool { + Arc::new(RwLock::new(self)) + } + + /// Returns number of expected sockets/connections + #[must_use] + pub fn len(&self) -> usize { + self.addresses.len() + } + + /// Returns true if pool is empty + #[must_use] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Gets the next available `AsyncStream` behind a `MutexGuard` + pub async fn get(&self) -> MutexGuard { + self.pool.get().await + } + + /// Create a new pool by listening new connection on all the addresses + pub fn listen(self) -> Result, IOError> { + let mut listeners = Vec::new(); + + for addr in self.addresses { + let listener = AsyncListener::listen(&addr)?; + + listeners.push(listener); + } + + Ok(listeners) + } +} + +impl AsyncPool { + /// Get a `AsyncStream` behind a `MutexGuard` for use in a `AsyncStream::call` + /// Will wait (async) if all connections are locked until one becomes available + async fn get(&self) -> MutexGuard { + // TODO: make this into an error + if self.handles.is_empty() { + panic!("empty handles in AsyncPool. Bad init?"); + } + + let iter = self.handles.iter().map(|h| { + let l = h.lock(); + Box::pin(l) + }); + + // find a unlock stream + let (stream, _, _) = futures::future::select_all(iter).await; + + stream + } +} + +impl From> for AsyncPool { + fn from(value: Vec) -> Self { + let handles: Vec> = + value.into_iter().map(|val| Mutex::new(val)).collect(); + + Self { handles } + } +} + +#[cfg(test)] +mod test { + use super::*; + + // constructor for basic i32 with repeating 0 values for testing + impl AsyncPool { + fn test(count: usize) -> Self { + Self { + handles: std::iter::repeat(0) + .take(count) + .map(Mutex::new) + .collect(), + } + } + } + + // tests if basic pool works with still-available connections + #[tokio::test] + async fn test_async_pool_available() { + let pool = AsyncPool::test(2); + + let first = pool.get().await; + assert_eq!(*first, 0); + let second = pool.get().await; + assert_eq!(*second, 0); + + // this would hang (wait) if we didn't drop one of the previous ones + drop(first); + let third = pool.get().await; + assert_eq!(*third, 0); + } +} diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs new file mode 100644 index 00000000..f4794176 --- /dev/null +++ b/src/qos_core/src/io/async_stream.rs @@ -0,0 +1,378 @@ +//! Abstractions to handle connection based socket streams. + +use std::{ + path::Path, + pin::Pin, + time::{Duration, SystemTime}, +}; + +pub use nix::sys::time::TimeVal; + +use tokio::{ + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::{UnixListener, UnixSocket, UnixStream}, +}; +#[cfg(feature = "vm")] +use tokio_vsock::{VsockListener, VsockStream}; + +use super::{IOError, SocketAddress}; + +#[derive(Debug)] +enum InnerListener { + Unix(UnixListener), + #[cfg(feature = "vm")] + Vsock(VsockListener), +} + +#[derive(Debug)] +enum InnerStream { + Unix(UnixStream), + #[cfg(feature = "vm")] + Vsock(VsockStream), +} + +/// Handle on a stream +#[derive(Debug)] +pub struct AsyncStream { + address: Option, + inner: Option, + timeout: Duration, +} + +impl AsyncStream { + // accept a new connection, used by server side + fn unix_accepted(stream: UnixStream) -> Self { + Self { + address: None, + inner: Some(InnerStream::Unix(stream)), + timeout: Duration::ZERO, + } + } + + // accept a new connection, used by server side + #[cfg(feature = "vm")] + fn vsock_accepted(stream: VsockStream) -> Self { + Self { + address: None, + inner: Some(InnerStream::Vsock(stream)), + timeout: Duration::ZERO, + } + } + + /// Create a new `AsyncStream` with known `SocketAddress` and `TimeVal`. The stream starts disconnected + /// and will connect on the first `call`. + #[must_use] + pub fn new(address: &SocketAddress, timeout: TimeVal) -> Self { + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + let timeout = Duration::new( + timeout.tv_sec() as u64, + timeout.tv_usec() as u32 * 1000, + ); + + Self { address: Some(address.clone()), inner: None, timeout } + } + + /// Create a new `Stream` from a `SocketAddress` and a timeout and connect using async + pub async fn connect(&mut self) -> Result<(), IOError> { + match self.address()? { + SocketAddress::Unix(uaddr) => { + let path = + uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; + + let inner = retry_unix_connect(path, self.timeout).await?; + + self.inner = Some(InnerStream::Unix(inner)); + } + #[cfg(feature = "vm")] + SocketAddress::Vsock(vaddr) => { + let inner = retry_vsock_connect( + vaddr.cid(), + vaddr.port(), + self.timeout, + ) + .await?; + + self.inner = Some(InnerStream::Vsock(inner)); + } + } + + Ok(()) + } + + fn address(&self) -> Result<&SocketAddress, IOError> { + self.address.as_ref().ok_or(IOError::ConnectAddressInvalid) + } + + fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { + self.inner.as_mut().ok_or(IOError::DisconnectedStream) + } + + /// Reconnects this `AsyncStream` by calling `connect` again on the underlaying socket + pub async fn reconnect(&mut self) -> Result<(), IOError> { + let timeout = self.timeout; + + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => { + let addr = s + .peer_addr()? + .as_pathname() + .ok_or(IOError::ConnectAddressInvalid)? + .to_owned(); + let new_socket = UnixSocket::new_stream()?; + let new_stream = + tokio::time::timeout(timeout, new_socket.connect(addr)) + .await??; + *s = new_stream; + } + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => { + let vaddr = s.peer_addr()?; + let new_stream = + tokio::time::timeout(timeout, VsockStream::connect(vaddr)) + .await??; + *s = new_stream; + } + } + Ok(()) + } + + /// Sends a buffer over the underlying socket using async + pub async fn send(&mut self, buf: &[u8]) -> Result<(), IOError> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => send(s, buf).await, + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => send(s, buf).await, + } + } + + /// Receive from the underlying socket using async + pub async fn recv(&mut self) -> Result, IOError> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => recv(s).await, + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => recv(s).await, + } + } + + /// Perform a "call" by sending the `req_buf` bytes and waiting for reply on the same socket. + pub async fn call(&mut self, req_buf: &[u8]) -> Result, IOError> { + // first time? connect + if self.inner.is_none() { + self.connect().await?; + } + + self.send(req_buf).await?; + self.recv().await + } +} + +async fn send( + stream: &mut S, + buf: &[u8], +) -> Result<(), IOError> { + let len = buf.len(); + // First, send the length of the buffer + let len_buf: [u8; size_of::()] = (len as u64).to_le_bytes(); + stream.write_all(&len_buf).await?; + // Send the actual contents of the buffer + stream.write_all(buf).await?; + + Ok(()) +} + +async fn recv( + stream: &mut S, +) -> Result, IOError> { + let length: usize = { + let mut buf = [0u8; size_of::()]; + stream.read_exact(&mut buf).await?; + u64::from_le_bytes(buf) + .try_into() + // Should only be possible if we are on 32bit architecture + .map_err(|_| IOError::ArithmeticSaturation)? + }; + + // Read the buffer + let mut buf = vec![0; length]; + stream.read_exact(&mut buf).await?; + + Ok(buf) +} + +impl From for std::io::Error { + fn from(value: IOError) -> Self { + match value { + IOError::DisconnectedStream => std::io::Error::new( + std::io::ErrorKind::NotFound, + "connection not found", + ), + _ => { + std::io::Error::new(std::io::ErrorKind::Other, "unknown error") + } + } + } +} + +impl AsyncRead for AsyncStream { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_read(cx, buf), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_read(cx, buf), + } + } +} + +impl AsyncWrite for AsyncStream { + fn poll_write( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_write(cx, buf), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_write(cx, buf), + } + } + + fn poll_flush( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_flush(cx), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_flush(cx), + } + } + + fn poll_shutdown( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_shutdown(cx), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_shutdown(cx), + } + } +} + +/// Abstraction to listen for incoming stream connections. +pub struct AsyncListener { + inner: InnerListener, + // addr: SocketAddress, +} + +impl AsyncListener { + /// Bind and listen on the given address. + pub(crate) fn listen(addr: &SocketAddress) -> Result { + let listener = match addr { + SocketAddress::Unix(uaddr) => { + let path = + uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; + let inner = InnerListener::Unix(UnixListener::bind(path)?); + Self { inner } + } + #[cfg(feature = "vm")] + SocketAddress::Vsock(vaddr) => { + let vaddr = + tokio_vsock::VsockAddr::new(vaddr.cid(), vaddr.port()); + let inner = InnerListener::Vsock(VsockListener::bind(vaddr)?); + Self { inner } + } + }; + + Ok(listener) + } + + /// Accept a new connection. + pub async fn accept(&self) -> Result { + let stream = match &self.inner { + InnerListener::Unix(l) => { + let (s, _) = l.accept().await?; + AsyncStream::unix_accepted(s) + } + #[cfg(feature = "vm")] + InnerListener::Vsock(l) => { + let (s, _) = l.accept().await?; + AsyncStream::vsock_accepted(s) + } + }; + + Ok(stream) + } +} + +impl Drop for AsyncListener { + fn drop(&mut self) { + match &mut self.inner { + InnerListener::Unix(usock) => match usock.local_addr() { + Ok(addr) => { + if let Some(path) = addr.as_pathname() { + _ = std::fs::remove_file(path); + } else { + eprintln!("unable to path the usock"); // do not crash in Drop + } + } + Err(e) => eprintln!("{e}"), // do not crash in Drop + }, + #[cfg(feature = "vm")] + InnerListener::Vsock(_vsock) => {} // vsock's drop will clear this + } + } +} + +// raw unix socket connect retry with timeout, 50ms period +async fn retry_unix_connect( + path: &Path, + timeout: Duration, +) -> Result { + let sleep_time = Duration::from_millis(50); + let eot = SystemTime::now() + timeout; + + loop { + let socket = UnixSocket::new_stream()?; + match tokio::time::timeout(timeout, socket.connect(path)).await? { + Ok(stream) => return Ok(stream), + Err(err) => { + if SystemTime::now() > eot { + return Err(err); + } + tokio::time::sleep(sleep_time).await; + } + } + } +} + +// raw vsock socket connect retry with timeout, 50ms period +#[cfg(feature = "vm")] +async fn retry_vsock_connect( + cid: u32, + port: u32, + timeout: Duration, +) -> Result { + use tokio_vsock::VsockAddr; + + let sleep_time = Duration::from_millis(50); + let eot = SystemTime::now() + timeout; + + loop { + let addr = VsockAddr::new(cid, port); + match tokio::time::timeout(timeout, VsockStream::connect(addr)).await? { + Ok(stream) => return Ok(stream), + Err(err) => { + if SystemTime::now() > eot { + return Err(err); + } + tokio::time::sleep(sleep_time).await; + } + } + } +} diff --git a/src/qos_core/src/io/mod.rs b/src/qos_core/src/io/mod.rs index 8f1de7a0..4f2eb212 100644 --- a/src/qos_core/src/io/mod.rs +++ b/src/qos_core/src/io/mod.rs @@ -3,8 +3,16 @@ //! NOTE TO MAINTAINERS: Interaction with any sys calls should be contained //! within this module. -mod stream; +#[cfg(feature = "async")] +mod async_pool; +#[cfg(feature = "async")] +mod async_stream; +#[cfg(feature = "async")] +pub use async_pool::*; +#[cfg(feature = "async")] +pub use async_stream::*; +mod stream; pub use stream::{ Listener, SocketAddress, Stream, TimeVal, TimeValLike, MAX_PAYLOAD_SIZE, VMADDR_FLAG_TO_HOST, VMADDR_NO_FLAGS, @@ -13,12 +21,20 @@ pub use stream::{ /// QOS I/O error #[derive(Debug)] pub enum IOError { + /// `std::io::Error` wrapper. + StdIoError(std::io::Error), /// `nix::Error` wrapper. NixError(nix::Error), /// Arithmetic operation saturated. ArithmeticSaturation, /// Unknown error. UnknownError, + /// Stream was not connected when expected to be connected. + DisconnectedStream, + /// Connect address invalid + ConnectAddressInvalid, + /// Timed out while claling `connect` over a socket. + ConnectTimeout, /// Timed out while calling `recv` over a socket. RecvTimeout, /// The `recv` system call was interrupted while receiving over a socket. @@ -33,6 +49,9 @@ pub enum IOError { RecvNixError(nix::Error), /// Reading the response size resulted in a size which exceeds the max payload size. OversizedPayload(usize), + #[cfg(feature = "async")] + /// A async socket pool error during pool operations. + PoolError(PoolError), } impl From for IOError { @@ -40,3 +59,23 @@ impl From for IOError { Self::NixError(err) } } + +impl From for IOError { + fn from(err: std::io::Error) -> Self { + Self::StdIoError(err) + } +} + +#[cfg(feature = "async")] +impl From for IOError { + fn from(_: tokio::time::error::Elapsed) -> Self { + Self::ConnectTimeout + } +} + +#[cfg(feature = "async")] +impl From for IOError { + fn from(value: PoolError) -> Self { + Self::PoolError(value) + } +} diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index 49785305..b5586ddb 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -114,6 +114,24 @@ impl SocketAddress { Self::Unix(ua) => Box::new(ua), } } + + /// Shows socket debug info + pub fn debug_info(&self) -> String { + match self { + #[cfg(feature = "vm")] + Self::Vsock(vsock) => { + format!("vsock cid: {} port: {}", vsock.cid(), vsock.port()) + } + Self::Unix(usock) => { + format!( + "usock path: {:?}", + usock + .path() + .unwrap_or(&std::path::PathBuf::from("unknown/error")) + ) + } + } + } } /// Handle on a stream diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index 42fdbacd..76c08438 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -17,6 +17,11 @@ compile_error!( "feature \"vm\" and feature \"mock\" cannot be enabled at the same time" ); +#[cfg(feature = "async")] +pub mod async_client; +#[cfg(feature = "async")] +pub mod async_server; + pub mod cli; pub mod client; pub mod handles; @@ -60,3 +65,10 @@ pub const SEC_APP_SOCK: &str = "./local-enclave/sec_app.sock"; /// Default socket for enclave <-> secure app communication. #[cfg(feature = "vm")] pub const SEC_APP_SOCK: &str = "/sec_app.sock"; + +/// Default socket pool size is 20 +#[cfg(feature = "async")] +pub const DEFAULT_POOL_SIZE: &str = "1"; // DEBUG: ales - set to something real after debugging +/// Default socket pool size is 0 for sync (unused) +#[cfg(not(feature = "async"))] +pub const DEFAULT_POOL_SIZE: &str = "0"; diff --git a/src/qos_core/src/protocol/async_processor.rs b/src/qos_core/src/protocol/async_processor.rs new file mode 100644 index 00000000..79336bfb --- /dev/null +++ b/src/qos_core/src/protocol/async_processor.rs @@ -0,0 +1,100 @@ +//! Quorum protocol processor +use std::sync::Arc; + +use borsh::BorshDeserialize; +use tokio::sync::Mutex; + +use super::{ + error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, ProtocolPhase, +}; +use crate::{async_server::AsyncRequestProcessor, io::SharedAsyncStreamPool}; + +const MEGABYTE: usize = 1024 * 1024; +const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; + +/// Helper type to keep `ProtocolState` shared using `Arc>` +type SharedProtocolState = Arc>; + +impl ProtocolState { + /// Wrap this `ProtocolState` into a `Mutex` in an `Arc`. + pub fn shared(self) -> SharedProtocolState { + Arc::new(Mutex::new(self)) + } +} + +/// Enclave state machine that executes when given a `ProtocolMsg`. +#[derive(Clone)] +pub struct AsyncProcessor { + app_pool: SharedAsyncStreamPool, + state: SharedProtocolState, +} + +impl AsyncProcessor { + /// Create a new `Self`. + #[must_use] + pub fn new( + state: SharedProtocolState, + app_pool: SharedAsyncStreamPool, + ) -> Self { + Self { app_pool, state } + } + + /// Helper to get phase between locking the shared state + async fn get_phase(&self) -> ProtocolPhase { + self.state.lock().await.get_phase() + } +} + +impl AsyncRequestProcessor for AsyncProcessor { + async fn process(&self, req_bytes: Vec) -> Vec { + if req_bytes.len() > MAX_ENCODED_MSG_LEN { + return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + ProtocolError::OversizedPayload, + )) + .expect("ProtocolMsg can always be serialized. qed."); + } + + let Ok(msg_req) = ProtocolMsg::try_from_slice(&req_bytes) else { + return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + ProtocolError::ProtocolMsgDeserialization, + )) + .expect("ProtocolMsg can always be serialized. qed."); + }; + + // handle Proxy outside of the state + match msg_req { + ProtocolMsg::ProxyRequest { data } => { + let phase = self.get_phase().await; + + if phase != ProtocolPhase::QuorumKeyProvisioned { + let err = ProtocolError::NoMatchingRoute(phase); + return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + err, + )) + .expect("ProtocolMsg can always be serialized. qed."); + } + + let result = self + .app_pool + .read() + .await + .get() + .await + .call(&data) + .await + .map(|data| ProtocolMsg::ProxyResponse { data }) + .map_err(|_e| { + ProtocolMsg::ProtocolErrorResponse( + ProtocolError::IOError, + ) + }); + + match result { + Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) + .expect("ProtocolMsg can always be serialized. qed."), + } + } + _ => self.state.lock().await.handle_msg(&msg_req), + } + } +} diff --git a/src/qos_core/src/protocol/async_state.rs b/src/qos_core/src/protocol/async_state.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/src/qos_core/src/protocol/async_state.rs @@ -0,0 +1 @@ + diff --git a/src/qos_core/src/protocol/mod.rs b/src/qos_core/src/protocol/mod.rs index 550b8711..70f2f739 100644 --- a/src/qos_core/src/protocol/mod.rs +++ b/src/qos_core/src/protocol/mod.rs @@ -11,9 +11,12 @@ mod state; pub use error::ProtocolError; pub use processor::Processor; -use state::ProtocolState; +pub(crate) use state::ProtocolState; pub use state::{ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS}; +#[cfg(feature = "async")] +pub(crate) mod async_processor; + /// 256bit hash pub type Hash256 = [u8; 32]; diff --git a/src/qos_core/src/protocol/processor.rs b/src/qos_core/src/protocol/processor.rs index ba43b58a..f4ea8773 100644 --- a/src/qos_core/src/protocol/processor.rs +++ b/src/qos_core/src/protocol/processor.rs @@ -1,14 +1,18 @@ //! Quorum protocol processor use borsh::BorshDeserialize; +use nix::sys::time::{TimeVal, TimeValLike}; use qos_nsm::NsmProvider; use super::{ - error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, ProtocolPhase, + error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, + ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; use crate::io::MAX_PAYLOAD_SIZE; -use crate::{handles::Handles, io::SocketAddress, server}; +use crate::{client::Client, handles::Handles, io::SocketAddress, server}; + /// Enclave state machine that executes when given a `ProtocolMsg`. pub struct Processor { + app_client: Client, state: ProtocolState, } @@ -21,11 +25,16 @@ impl Processor { app_addr: SocketAddress, test_only_init_phase_override: Option, ) -> Self { + let app_client = Client::new( + app_addr, + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + ); + Self { + app_client, state: ProtocolState::new( attestor, handles, - app_addr, test_only_init_phase_override, ), } @@ -48,6 +57,29 @@ impl server::RequestProcessor for Processor { .expect("ProtocolMsg can always be serialized. qed."); }; - self.state.handle_msg(&msg_req) + // handle Proxy outside of the state + match msg_req { + ProtocolMsg::ProxyRequest { data } => { + let phase = self.state.get_phase(); + if phase != ProtocolPhase::QuorumKeyProvisioned { + let err = ProtocolError::NoMatchingRoute(phase); + return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + err, + )) + .expect("ProtocolMsg can always be serialized. qed."); + } + let result = self + .app_client + .send(&data) + .map(|data| ProtocolMsg::ProxyResponse { data }) + .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); + + match result { + Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) + .expect("ProtocolMsg can always be serialized. qed."), + } + } + _ => self.state.handle_msg(&msg_req), + } } } diff --git a/src/qos_core/src/protocol/services/boot.rs b/src/qos_core/src/protocol/services/boot.rs index 0341a448..91646c65 100644 --- a/src/qos_core/src/protocol/services/boot.rs +++ b/src/qos_core/src/protocol/services/boot.rs @@ -480,7 +480,7 @@ mod test { use qos_test_primitives::PathWrapper; use super::*; - use crate::{handles::Handles, io::SocketAddress}; + use crate::handles::Handles; fn get_manifest() -> (Manifest, Vec<(P256Pair, QuorumMember)>, Vec) { let quorum_pair = P256Pair::generate().unwrap(); @@ -580,12 +580,8 @@ mod test { manifest_file.clone(), pivot_file.clone(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); let _nsm_resposne = boot_standard(&mut protocol_state, &manifest_envelope, &pivot) @@ -638,12 +634,8 @@ mod test { manifest_file, pivot_file, ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); let nsm_resposne = boot_standard(&mut protocol_state, &manifest_envelope, &pivot); @@ -686,12 +678,8 @@ mod test { manifest_file, pivot_file, ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); let nsm_resposne = boot_standard(&mut protocol_state, &manifest_envelope, &pivot); @@ -736,12 +724,8 @@ mod test { (*manifest_file).to_string(), (*pivot_file).to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); let error = boot_standard(&mut protocol_state, &manifest_envelope, &pivot) @@ -796,12 +780,8 @@ mod test { (*manifest_file).to_string(), (*pivot_file).to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); let error = boot_standard(&mut protocol_state, &manifest_envelope, &pivot) diff --git a/src/qos_core/src/protocol/services/genesis.rs b/src/qos_core/src/protocol/services/genesis.rs index 95b9e654..9e302fad 100644 --- a/src/qos_core/src/protocol/services/genesis.rs +++ b/src/qos_core/src/protocol/services/genesis.rs @@ -195,7 +195,7 @@ mod test { use qos_p256::MASTER_SEED_LEN; use super::*; - use crate::{handles::Handles, io::SocketAddress}; + use crate::handles::Handles; #[test] fn boot_genesis_works() { @@ -205,12 +205,8 @@ mod test { "MAN".to_string(), "PIV".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); let member1_pair = P256Pair::generate().unwrap(); let member2_pair = P256Pair::generate().unwrap(); let member3_pair = P256Pair::generate().unwrap(); diff --git a/src/qos_core/src/protocol/services/key.rs b/src/qos_core/src/protocol/services/key.rs index eccb760b..caf80c39 100644 --- a/src/qos_core/src/protocol/services/key.rs +++ b/src/qos_core/src/protocol/services/key.rs @@ -272,7 +272,6 @@ mod test { use super::{boot_key_forward, export_key_internal, validate_manifest}; use crate::{ handles::Handles, - io::SocketAddress, protocol::{ services::{ boot::{ @@ -420,12 +419,8 @@ mod test { manifest_file.deref().to_string(), pivot_file.deref().to_string(), ); - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); let response = boot_key_forward(&mut state, &manifest_envelope, &pivot) @@ -464,12 +459,8 @@ mod test { manifest_file.deref().to_string(), pivot_file.deref().to_string(), ); - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); // Remove an approval manifest_envelope.manifest_set_approvals.pop().unwrap(); @@ -506,12 +497,8 @@ mod test { manifest_file.deref().to_string(), pivot_file.deref().to_string(), ); - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); // Use a different pivot then what is referenced in the manifest let other_pivot = b"other pivot".to_vec(); @@ -547,12 +534,8 @@ mod test { manifest_file.deref().to_string(), pivot_file.deref().to_string(), ); - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); // Change the signature to something invalid manifest_envelope.manifest_set_approvals[0].signature = vec![1; 32]; @@ -604,12 +587,8 @@ mod test { manifest_file.deref().to_string(), pivot_file.deref().to_string(), ); - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles.clone(), - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = + ProtocolState::new(Box::new(MockNsm), handles.clone(), None); // Add an approval from a random key manifest_envelope.manifest_set_approvals.push(non_member_approval); @@ -1082,12 +1061,8 @@ mod test { "pivot".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); let EncryptedQuorumKey { encrypted_quorum_key, signature } = export_key_internal( &mut protocol_state, @@ -1147,12 +1122,8 @@ mod test { manifest_file.deref().to_string(), "pivot".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); protocol_state .transition(ProtocolPhase::WaitingForForwardedKey) .unwrap(); @@ -1209,12 +1180,8 @@ mod test { manifest_file.deref().to_string(), "pivot".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); assert_eq!( inject_key( @@ -1264,12 +1231,8 @@ mod test { manifest_file.deref().to_string(), "pivot".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); assert_eq!( inject_key( @@ -1319,12 +1282,8 @@ mod test { manifest_file.deref().to_string(), "pivot".to_string(), ); - let mut protocol_state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut protocol_state = + ProtocolState::new(Box::new(MockNsm), handles, None); assert_eq!( inject_key( diff --git a/src/qos_core/src/protocol/services/provision.rs b/src/qos_core/src/protocol/services/provision.rs index fe1594dc..80e5fc2a 100644 --- a/src/qos_core/src/protocol/services/provision.rs +++ b/src/qos_core/src/protocol/services/provision.rs @@ -134,7 +134,6 @@ mod test { use crate::{ handles::Handles, - io::SocketAddress, protocol::{ services::{ boot::{ @@ -236,12 +235,7 @@ mod test { handles.put_manifest_envelope(&manifest_envelope).unwrap(); // 3) Create state with eph key and manifest - let mut state = ProtocolState::new( - Box::new(MockNsm), - handles, - SocketAddress::new_unix("./never.sock"), - None, - ); + let mut state = ProtocolState::new(Box::new(MockNsm), handles, None); state.transition(ProtocolPhase::WaitingForQuorumShards).unwrap(); Setup { quorum_pair, eph_pair, threshold, state, approvals } diff --git a/src/qos_core/src/protocol/state.rs b/src/qos_core/src/protocol/state.rs index 6df38852..691bae4f 100644 --- a/src/qos_core/src/protocol/state.rs +++ b/src/qos_core/src/protocol/state.rs @@ -1,11 +1,10 @@ //! Quorum protocol state machine -use nix::sys::time::{TimeVal, TimeValLike}; use qos_nsm::NsmProvider; use super::{ error::ProtocolError, msg::ProtocolMsg, services::provision::SecretBuilder, }; -use crate::{client::Client, handles::Handles, io::SocketAddress}; +use crate::handles::Handles; /// The timeout for the qos core when making requests to an enclave app. pub const ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS: i64 = 5; @@ -138,14 +137,6 @@ impl ProtocolRoute { ) } - pub fn proxy(current_phase: ProtocolPhase) -> Self { - ProtocolRoute::new( - Box::new(handlers::proxy), - current_phase, - current_phase, - ) - } - pub fn export_key(current_phase: ProtocolPhase) -> Self { ProtocolRoute::new( Box::new(handlers::export_key), @@ -175,7 +166,6 @@ impl ProtocolRoute { pub(crate) struct ProtocolState { pub provisioner: SecretBuilder, pub attestor: Box, - pub app_client: Client, pub handles: Handles, phase: ProtocolPhase, } @@ -184,8 +174,7 @@ impl ProtocolState { pub fn new( attestor: Box, handles: Handles, - app_addr: SocketAddress, - test_only_init_phase_override: Option, + #[allow(unused)] test_only_init_phase_override: Option, ) -> Self { let provisioner = SecretBuilder::new(); @@ -198,16 +187,7 @@ impl ProtocolState { #[cfg(not(any(feature = "mock", test)))] let init_phase = ProtocolPhase::WaitingForBootInstruction; - Self { - attestor, - provisioner, - phase: init_phase, - handles, - app_client: Client::new( - app_addr, - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - ), - } + Self { attestor, provisioner, phase: init_phase, handles } } pub fn get_phase(&self) -> ProtocolPhase { @@ -273,7 +253,6 @@ impl ProtocolState { ProtocolRoute::live_attestation_doc(self.phase), ProtocolRoute::manifest_envelope(self.phase), // phase specific routes - ProtocolRoute::proxy(self.phase), ProtocolRoute::export_key(self.phase), ] } @@ -376,22 +355,22 @@ mod handlers { } } - pub(super) fn proxy( - req: &ProtocolMsg, - state: &mut ProtocolState, - ) -> ProtocolRouteResponse { - if let ProtocolMsg::ProxyRequest { data: req_data } = req { - let result = state - .app_client - .send(req_data) - .map(|data| ProtocolMsg::ProxyResponse { data }) - .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); - - Some(result) - } else { - None - } - } + // pub(super) fn proxy( + // req: &ProtocolMsg, + // state: &mut ProtocolState, + // ) -> ProtocolRouteResponse { + // if let ProtocolMsg::ProxyRequest { data: req_data } = req { + // let result = state + // .app_client + // .send(req_data) + // .map(|data| ProtocolMsg::ProxyResponse { data }) + // .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); + + // Some(result) + // } else { + // None + // } + // } pub(super) fn provision( req: &ProtocolMsg, diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 79148ec2..083590bd 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -110,4 +110,116 @@ impl Reaper { } } +#[cfg(feature = "async")] +mod inner { + #[allow(clippy::wildcard_imports)] + use super::*; + use crate::{ + async_server::AsyncSocketServer, + io::AsyncStreamPool, + protocol::{async_processor::AsyncProcessor, ProtocolState}, + }; + + impl Reaper { + /// Run the Reaper in an async way using Tokio runtime. + /// + /// # Panics + /// + /// - If spawning the pivot errors. + /// - If waiting for the pivot errors. + #[allow(dead_code)] + pub fn async_execute( + handles: &Handles, + nsm: Box, + pool: AsyncStreamPool, + app_pool: AsyncStreamPool, + test_only_init_phase_override: Option, + ) { + let handles2 = handles.clone(); + tokio::spawn(async move { + // create the state + let protocol_state = ProtocolState::new( + nsm, + handles2, + test_only_init_phase_override, + ); + // send a shared version of state and the async pool to each processor + let processor = AsyncProcessor::new( + protocol_state.shared(), + app_pool.shared(), + ); + // listen_all will multiplex the processor accross all sockets + let tasks = AsyncSocketServer::listen_all(pool, &processor) + .expect("unable to get listen task list"); + + match tokio::signal::ctrl_c().await { + Ok(()) => { + eprintln!("handling ctrl+c the tokio way"); + for task in tasks { + task.abort(); + } + } + Err(err) => panic!("{err}"), + } + }); + + loop { + if handles.quorum_key_exists() + && handles.pivot_exists() + && handles.manifest_envelope_exists() + { + // The state required to pivot exists, so we can break this + // holding pattern and start the pivot. + break; + } + + std::thread::sleep(std::time::Duration::from_secs(1)); + } + + println!("Reaper::execute about to spawn pivot"); + + let PivotConfig { args, restart, .. } = handles + .get_manifest_envelope() + .expect("Checked above that the manifest exists.") + .manifest + .pivot; + + let mut pivot = Command::new(handles.pivot_path()); + pivot.args(&args[..]); + match restart { + RestartPolicy::Always => loop { + let status = pivot + .spawn() + .expect("Failed to spawn") + .wait() + .expect("Pivot executable never started..."); + + println!("Pivot exited with status: {status}"); + + // pause to ensure OS has enough time to clean up resources + // before restarting + std::thread::sleep(std::time::Duration::from_secs( + REAPER_RESTART_DELAY_IN_SECONDS, + )); + + println!("Restarting pivot ..."); + }, + RestartPolicy::Never => { + let status = pivot + .spawn() + .expect("Failed to spawn") + .wait() + .expect("Pivot executable never started..."); + println!("Pivot exited with status: {status}"); + } + } + + std::thread::sleep(std::time::Duration::from_secs( + REAPER_EXIT_DELAY_IN_SECONDS, + )); + println!("Reaper exiting ... "); + } + } +} + // See qos_test/tests/reaper for tests diff --git a/src/qos_enclave/src/main.rs b/src/qos_enclave/src/main.rs index 400b145f..cede643e 100644 --- a/src/qos_enclave/src/main.rs +++ b/src/qos_enclave/src/main.rs @@ -73,6 +73,8 @@ fn boot() -> String { let memory_mib = std::env::var("MEMORY_MIB").unwrap_or("1024".to_string()); let cpu_count = std::env::var("CPU_COUNT").unwrap_or("2".to_string()); let debug_mode = std::env::var("DEBUG").unwrap_or("false".to_string()); + let attach_console = + std::env::var("ATTACH_CONSOLE").unwrap_or("false".to_string()); let enclave_name = std::env::var("ENCLAVE_NAME").unwrap_or("nitro".to_string()); let run_args = RunEnclavesArgs { @@ -81,7 +83,7 @@ fn boot() -> String { memory_mib: memory_mib.parse::().unwrap(), cpu_ids: None, debug_mode: debug_mode.parse::().unwrap(), - attach_console: false, + attach_console: attach_console.parse::().unwrap(), // TODO: I think we don't want this variable, remove once debug is done cpu_count: Some(cpu_count.parse::().unwrap()), enclave_name: Some(enclave_name.clone()), }; diff --git a/src/qos_host/Cargo.toml b/src/qos_host/Cargo.toml index 17bd6053..3cdb5710 100644 --- a/src/qos_host/Cargo.toml +++ b/src/qos_host/Cargo.toml @@ -16,4 +16,5 @@ serde_json = { version = "1" } serde = { version = "1", features = ["derive"], default-features = false } [features] +async = ["qos_core/async"] vm = ["qos_core/vm"] diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs new file mode 100644 index 00000000..378d7f0d --- /dev/null +++ b/src/qos_host/src/async_host.rs @@ -0,0 +1,275 @@ +//! Enclave host implementation. The host primarily consists of a HTTP server +//! that proxies requests to the enclave by establishing a client connection +//! with the enclave. +//! +//! # IMPLEMENTERS NOTE +//! +//! The host HTTP server is currently implemented using the `axum` framework. +//! This may be swapped out in the the future in favor of a lighter package in +//! order to slim the dependency tree. In the mean time, these resources can +//! help familiarize you with the abstractions: +//! +//! * Request body extractors: +//! * Response: +//! * Responding with error: +#![forbid(unsafe_code)] +#![deny(clippy::all)] +#![warn(missing_docs, clippy::pedantic)] +#![allow(clippy::missing_errors_doc)] + +use std::{net::SocketAddr, sync::Arc}; + +use axum::{ + body::Bytes, + extract::{DefaultBodyLimit, State}, + http::StatusCode, + response::{Html, IntoResponse}, + routing::{get, post}, + Json, Router, +}; +use borsh::BorshDeserialize; +use qos_core::{ + async_client::AsyncClient, + io::SharedAsyncStreamPool, + protocol::{msg::ProtocolMsg, ProtocolError, ProtocolPhase}, +}; + +use crate::{ + EnclaveInfo, EnclaveVitalStats, Error, ENCLAVE_HEALTH, ENCLAVE_INFO, + HOST_HEALTH, MAX_ENCODED_MSG_LEN, MESSAGE, +}; + +/// Resource shared across tasks in the [`HostServer`]. +#[derive(Debug)] +struct AsyncQosHostState { + enclave_client: AsyncClient, +} + +/// HTTP server for the host of the enclave; proxies requests to the enclave. +#[allow(clippy::module_name_repetitions)] +pub struct AsyncHostServer { + enclave_pool: SharedAsyncStreamPool, + addr: SocketAddr, + base_path: Option, +} + +impl AsyncHostServer { + /// Create a new [`HostServer`]. See [`Self::serve`] for starting the + /// server. + #[must_use] + pub fn new( + enclave_pool: SharedAsyncStreamPool, + addr: SocketAddr, + base_path: Option, + ) -> Self { + Self { enclave_pool, addr, base_path } + } + + fn path(&self, endpoint: &str) -> String { + if let Some(path) = self.base_path.as_ref() { + format!("/{path}{endpoint}") + } else { + format!("/qos{endpoint}") + } + } + + /// Start the server, running indefinitely. + /// + /// # Panics + /// + /// Panics if there is an issue starting the server. + // pub async fn serve(&self) -> Result<(), String> { + pub async fn serve(&self) { + let state = Arc::new(AsyncQosHostState { + enclave_client: AsyncClient::new(self.enclave_pool.clone()), + }); + + let app = Router::new() + .route(&self.path(HOST_HEALTH), get(Self::host_health)) + .route(&self.path(ENCLAVE_HEALTH), get(Self::enclave_health)) + .route(&self.path(MESSAGE), post(Self::message)) + .route(&self.path(ENCLAVE_INFO), get(Self::enclave_info)) + .layer(DefaultBodyLimit::disable()) + .with_state(state); + + println!("HostServer listening on {}", self.addr); + + axum::Server::bind(&self.addr) + .serve(app.into_make_service()) + .await + .unwrap(); + } + + /// Health route handler. + #[allow(clippy::unused_async)] + async fn host_health( + _: State>, + ) -> impl IntoResponse { + println!("Host health..."); + Html("Ok!") + } + + /// Health route handler. + #[allow(clippy::unused_async)] + async fn enclave_health( + State(state): State>, + ) -> impl IntoResponse { + println!("Enclave health..."); + + let encoded_request = borsh::to_vec(&ProtocolMsg::StatusRequest) + .expect("ProtocolMsg can always serialize. qed."); + let encoded_response = match state + .enclave_client + .call(&encoded_request) + .await + { + Ok(encoded_response) => encoded_response, + Err(e) => { + let msg = format!("Error while trying to send socket request to enclave: {e:?}"); + eprintln!("{msg}"); + return (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)); + } + }; + + let response = match ProtocolMsg::try_from_slice(&encoded_response) { + Ok(r) => r, + Err(e) => { + let msg = format!("Error deserializing response from enclave, make sure qos_host version match qos_core: {e}"); + eprintln!("{msg}"); + return (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)); + } + }; + + match response { + ProtocolMsg::StatusResponse(phase) => { + let inner = format!("{phase:?}"); + let status = match phase { + ProtocolPhase::UnrecoverableError + | ProtocolPhase::WaitingForBootInstruction + | ProtocolPhase::WaitingForQuorumShards + | ProtocolPhase::WaitingForForwardedKey => StatusCode::SERVICE_UNAVAILABLE, + ProtocolPhase::QuorumKeyProvisioned + | ProtocolPhase::GenesisBooted => StatusCode::OK, + }; + + (status, Html(inner)) + } + other => { + let msg = format!("Unexpected response: Expected a ProtocolMsg::StatusResponse, but got: {other:?}"); + eprintln!("{msg}"); + (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)) + } + } + } + + #[allow(clippy::unused_async)] + async fn enclave_info( + State(state): State>, + ) -> Result, Error> { + println!("Enclave info..."); + + let enc_status_req = borsh::to_vec(&ProtocolMsg::StatusRequest) + .expect("ProtocolMsg can always serialize. qed."); + let enc_status_resp = + state.enclave_client.call(&enc_status_req).await.map_err(|e| { + Error(format!("error sending status request to enclave: {e:?}")) + })?; + + let status_resp = match ProtocolMsg::try_from_slice(&enc_status_resp) { + Ok(status_resp) => status_resp, + Err(e) => { + return Err(Error(format!("error deserializing status response from enclave, make sure qos_host version match qos_core: {e:?}"))); + } + }; + let phase = match status_resp { + ProtocolMsg::StatusResponse(phase) => phase, + other => { + return Err(Error(format!("unexpected response: expected a ProtocolMsg::StatusResponse, but got: {other:?}"))); + } + }; + + let enc_manifest_envelope_req = + borsh::to_vec(&ProtocolMsg::ManifestEnvelopeRequest) + .expect("ProtocolMsg can always serialize. qed."); + let enc_manifest_envelope_resp = state + .enclave_client + .call(&enc_manifest_envelope_req) + .await + .map_err(|e| { + Error(format!( + "error while trying to send manifest envelope socket request to enclave: {e:?}" + )) + })?; + + let manifest_envelope_resp = ProtocolMsg::try_from_slice( + &enc_manifest_envelope_resp, + ) + .map_err(|e| + Error(format!("error deserializing manifest envelope response from enclave, make sure qos_host version match qos_core: {e}")) + )?; + + let manifest_envelope = match manifest_envelope_resp { + ProtocolMsg::ManifestEnvelopeResponse { manifest_envelope } => { + *manifest_envelope + } + other => { + return Err( + Error(format!("unexpected response: expected a ProtocolMsg::ManifestEnvelopeResponse, but got: {other:?}")) + ); + } + }; + + let vitals_log = if let Some(m) = manifest_envelope.as_ref() { + serde_json::to_string(&EnclaveVitalStats { + phase, + namespace: m.manifest.namespace.name.clone(), + nonce: m.manifest.namespace.nonce, + pivot_hash: m.manifest.pivot.hash, + pcr0: m.manifest.enclave.pcr0.clone(), + pivot_args: m.manifest.pivot.args.clone(), + }) + .expect("always valid json. qed.") + } else { + serde_json::to_string(&phase).expect("always valid json. qed.") + }; + println!("{vitals_log}"); + + let info = EnclaveInfo { phase, manifest_envelope }; + + Ok(Json(info)) + } + + /// Message route handler. + #[allow(clippy::unused_async)] + async fn message( + State(state): State>, + encoded_request: Bytes, + ) -> impl IntoResponse { + if encoded_request.len() > MAX_ENCODED_MSG_LEN { + return ( + StatusCode::BAD_REQUEST, + borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + ProtocolError::OversizeMsg, + )) + .expect("ProtocolMsg can always serialize. qed."), + ); + } + + match state.enclave_client.call(&encoded_request).await { + Ok(encoded_response) => (StatusCode::OK, encoded_response), + Err(e) => { + let msg = + format!("Error while trying to send request over socket to enclave: {e:?}"); + eprint!("{msg}"); + + ( + StatusCode::INTERNAL_SERVER_ERROR, + borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( + ProtocolError::EnclaveClient, + )) + .expect("ProtocolMsg can always serialize. qed."), + ) + } + } + } +} diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 818d2e30..527b7140 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -12,12 +12,14 @@ use qos_core::{ parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; -use crate::HostServer; +#[cfg(feature = "async")] +use qos_core::io::AsyncStreamPool; const HOST_IP: &str = "host-ip"; const HOST_PORT: &str = "host-port"; const ENDPOINT_BASE_PATH: &str = "endpoint-base-path"; const VSOCK_TO_HOST: &str = "vsock-to-host"; +const POOL_SIZE: &str = "pool-size"; struct HostParser; impl GetParserForOptions for HostParser { @@ -54,6 +56,11 @@ impl GetParserForOptions for HostParser { Token::new(ENDPOINT_BASE_PATH, "base path for all endpoints. e.g. /enclave-health") .takes_value(true) ) + .token( + Token::new(POOL_SIZE, "pool size for USOCK/VSOCK sockets") + .takes_value(true) + .default_value(qos_core::DEFAULT_POOL_SIZE) + ) .token( Token::new(VSOCK_TO_HOST, "whether to add the to-host svm flag to the enclave vsock connection. Valid options are `true` or `false`") .takes_value(true) @@ -106,6 +113,45 @@ impl HostOpts { SocketAddr::new(IpAddr::V4(ip), port) } + /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and + /// return the new `AsyncPool`. + #[cfg(feature = "async")] + pub(crate) fn enclave_pool(&self) -> AsyncStreamPool { + use qos_core::io::{TimeVal, TimeValLike}; + let pool_size: u32 = self + .parsed + .single(POOL_SIZE) + .expect("invalid pool options") + .parse() + .expect("invalid pool_size specified"); + match ( + self.parsed.single(CID), + self.parsed.single(PORT), + self.parsed.single(USOCK), + ) { + #[cfg(feature = "vm")] + (Some(c), Some(p), None) => { + let c = c.parse::().unwrap(); + let start_port = p.parse::().unwrap(); + + let addresses = (start_port..start_port + pool_size).map(|p| { + SocketAddress::new_vsock(c, p, self.to_host_flag()) + }); + + AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + } + (None, None, Some(u)) => { + let addresses = (0..pool_size).map(|i| { + let u = format!("{u}_{i}"); // add _X suffix for pooling + SocketAddress::new_unix(&u) + }); + + AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + } + _ => panic!("Invalid socket opts"), + } + } + /// Get the `SocketAddress` for the enclave server. /// /// # Panics @@ -176,13 +222,23 @@ impl CLI { } else if options.parsed.help() { println!("{}", options.parsed.info()); } else { - HostServer::new( + #[cfg(not(feature = "async"))] + crate::HostServer::new( options.enclave_addr(), options.host_addr(), options.base_path(), ) .serve() .await; + + #[cfg(feature = "async")] + crate::async_host::AsyncHostServer::new( + options.enclave_pool().shared(), + options.host_addr(), + options.base_path(), + ) + .serve() + .await; } } } diff --git a/src/qos_host/src/lib.rs b/src/qos_host/src/lib.rs index 21bc6004..264474ed 100644 --- a/src/qos_host/src/lib.rs +++ b/src/qos_host/src/lib.rs @@ -37,6 +37,8 @@ use qos_core::{ }, }; +#[cfg(feature = "async")] +pub mod async_host; pub mod cli; const MEGABYTE: usize = 1024 * 1024; diff --git a/src/qos_net/Cargo.toml b/src/qos_net/Cargo.toml index ba49e5c0..b6713e63 100644 --- a/src/qos_net/Cargo.toml +++ b/src/qos_net/Cargo.toml @@ -11,6 +11,7 @@ borsh = { version = "1.0", features = [ "std", "derive", ], default-features = false } +futures = { version = "0.3.30", optional = false } serde = { version = "1", features = ["derive"], default-features = false } hickory-resolver = { version = "0.25.2", features = [ "tokio", # for async @@ -19,19 +20,25 @@ hickory-resolver = { version = "0.25.2", features = [ rand = { version = "0.9.1", features = [ "thread_rng", ], default-features = false, optional = true } -tokio = { version = "1.38.0", default-features = false } +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time"], default-features = false, optional = true } [dev-dependencies] qos_test_primitives = { path = "../qos_test_primitives" } httparse = { version = "1.9.4", default-features = false } chunked_transfer = { version = "1.5.0", default-features = false } +rustls = { version = "0.23.5" } serde_json = { version = "1.0.121", features = [ "std", ], default-features = false } -rustls = { version = "0.23.5" } webpki-roots = { version = "0.26.1" } [features] -default = ["proxy"] # keep this as a default feature ensures we lint by default -proxy = ["rand", "hickory-resolver"] -vm = [] +default = ["proxy"] # keep this as a default feature ensures we lint by default +async_proxy = ["hickory-resolver", "rand", "tokio", "qos_core/async"] +proxy = ["rand", "hickory-resolver", "tokio"] +vm = ["qos_core/vm"] + +[[bin]] +name = "async_qos_net" +path = "src/bin/async_qos_net.rs" +required-features = ["async_proxy"] diff --git a/src/qos_net/src/async_cli.rs b/src/qos_net/src/async_cli.rs new file mode 100644 index 00000000..4351ffa6 --- /dev/null +++ b/src/qos_net/src/async_cli.rs @@ -0,0 +1,35 @@ +//! Async extension to the CLI +use crate::{ + async_proxy::AsyncProxyServer, + cli::{ProxyOpts, CLI}, +}; + +impl CLI { + /// Execute the enclave proxy CLI with the environment args in an async way. + pub async fn async_execute() { + use qos_core::async_server::AsyncSocketServer; + + let mut args: Vec = std::env::args().collect(); + let opts = ProxyOpts::new(&mut args); + + if opts.parsed.version() { + println!("version: {}", env!("CARGO_PKG_VERSION")); + } else if opts.parsed.help() { + println!("{}", opts.parsed.info()); + } else { + let tasks = AsyncSocketServer::listen_proxy(opts.async_pool()) + .await + .expect("unable to get listen join handles"); + + match tokio::signal::ctrl_c().await { + Ok(_) => { + eprintln!("handling ctrl+c the tokio way"); + for task in tasks { + task.abort(); + } + } + Err(err) => panic!("{err}"), + } + } + } +} diff --git a/src/qos_net/src/async_proxy.rs b/src/qos_net/src/async_proxy.rs new file mode 100644 index 00000000..fb6d4413 --- /dev/null +++ b/src/qos_net/src/async_proxy.rs @@ -0,0 +1,193 @@ +//! Protocol proxy for our remote QOS net proxy +use borsh::BorshDeserialize; +use futures::Future; +use qos_core::{ + async_server::AsyncSocketServer, + io::{AsyncListener, AsyncStream, AsyncStreamPool, IOError}, + server::SocketServerError, +}; +use tokio::task::JoinHandle; + +use crate::{ + async_proxy_connection::AsyncProxyConnection, error::QosNetError, + proxy_msg::ProxyMsg, +}; + +const MEGABYTE: usize = 1024 * 1024; +const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; + +/// Socket<>TCP proxy to enable remote connections +pub struct AsyncProxy { + tcp_connection: Option, + sock_stream: AsyncStream, +} + +impl AsyncProxy { + /// Create a new AsyncProxy from the given AsyncStream, with empty tcp_connection + pub fn new(sock_stream: AsyncStream) -> Self { + Self { tcp_connection: None, sock_stream } + } + + /// Create a new connection by resolving a name into an IP + /// address. The TCP connection is opened and saved in internal state. + async fn connect_by_name( + &mut self, + hostname: String, + port: u16, + dns_resolvers: Vec, + dns_port: u16, + ) -> ProxyMsg { + match AsyncProxyConnection::new_from_name( + hostname.clone(), + port, + dns_resolvers.clone(), + dns_port, + ) + .await + { + Ok(conn) => { + let connection_id = conn.id; + let remote_ip = conn.ip.clone(); + self.tcp_connection = Some(conn); + println!("Connection to {hostname} established"); + ProxyMsg::ConnectResponse { connection_id, remote_ip } + } + Err(e) => { + println!("error while establishing connection: {e:?}"); + ProxyMsg::ProxyError(e) + } + } + } + + /// Create a new connection, targeting an IP address directly. + /// address. The TCP connection is opened and saved in internal state. + async fn connect_by_ip(&mut self, ip: String, port: u16) -> ProxyMsg { + match AsyncProxyConnection::new_from_ip(ip.clone(), port).await { + Ok(conn) => { + let connection_id = conn.id; + let remote_ip = conn.ip.clone(); + self.tcp_connection = Some(conn); + println!("Connection to {ip} established and saved as ID {connection_id}"); + ProxyMsg::ConnectResponse { connection_id, remote_ip } + } + Err(e) => { + println!("error while establishing connection: {e:?}"); + ProxyMsg::ProxyError(e) + } + } + } + + // processes given `ProxyMsg` if it's a connection request or returns a `ProxyError` otherwise. + async fn process_req(&mut self, req_bytes: Vec) -> Vec { + if req_bytes.len() > MAX_ENCODED_MSG_LEN { + return borsh::to_vec(&ProxyMsg::ProxyError( + QosNetError::OversizedPayload, + )) + .expect("ProtocolMsg can always be serialized. qed."); + } + + let resp = match ProxyMsg::try_from_slice(&req_bytes) { + Ok(req) => match req { + // TODO: do we need this?? + ProxyMsg::StatusRequest => ProxyMsg::StatusResponse(0), + ProxyMsg::ConnectByNameRequest { + hostname, + port, + dns_resolvers, + dns_port, + } => { + self.connect_by_name( + hostname, + port, + dns_resolvers, + dns_port, + ) + .await + } + ProxyMsg::ConnectByIpRequest { ip, port } => { + self.connect_by_ip(ip, port).await + } + _ => ProxyMsg::ProxyError(QosNetError::InvalidMsg), + }, + Err(_) => ProxyMsg::ProxyError(QosNetError::InvalidMsg), + }; + + borsh::to_vec(&resp) + .expect("Protocol message can always be serialized. qed!") + } +} + +impl AsyncProxy { + async fn run(&mut self) -> Result<(), IOError> { + loop { + // Only try to process ProxyMsg content on the USOCK/VSOCK if we're not connected to TCP yet. + // If we're connected, we should be a "dumb pipe" using the `tokio::io::copy_bidirectional` + // which is handled in the connect functions above + if self.tcp_connection.is_none() { + let req_bytes = self.sock_stream.recv().await?; + let resp_bytes = self.process_req(req_bytes).await; + self.sock_stream.send(&resp_bytes).await?; + if let Some(tcp_connection) = &mut self.tcp_connection { + let (_, _) = tokio::io::copy_bidirectional( + &mut self.sock_stream, + &mut tcp_connection.tcp_stream, + ) + .await?; + + // Once the "dumb pipe" is closed we need to clear our tcp_connection and refresh + // the proxy socket stream by using shutdown + self.tcp_connection = None; + + break Ok(()); // return to the accept loop + } + } + } + } +} + +pub trait AsyncProxyServer { + fn listen_proxy( + pool: AsyncStreamPool, + ) -> impl Future< + Output = Result< + Vec>>, + SocketServerError, + >, + > + Send; +} + +impl AsyncProxyServer for AsyncSocketServer { + /// Listen on a tcp proxy server in a way that allows the USOCK/VSOCK to be used as a + /// dumb pipe after getting the `connect*` calls. + async fn listen_proxy( + pool: AsyncStreamPool, + ) -> Result>>, SocketServerError> + { + println!( + "`AsyncSocketServer` proxy listening on pool size {}", + pool.len() + ); + + let listeners = pool.listen()?; + + let mut tasks = Vec::new(); + for listener in listeners { + let task = + tokio::spawn(async move { accept_loop_proxy(listener).await }); + + tasks.push(task); + } + + Ok(tasks) + } +} + +async fn accept_loop_proxy( + listener: AsyncListener, +) -> Result<(), SocketServerError> { + loop { + let stream = listener.accept().await?; + let mut proxy = AsyncProxy::new(stream); + proxy.run().await?; + } +} diff --git a/src/qos_net/src/async_proxy_connection.rs b/src/qos_net/src/async_proxy_connection.rs new file mode 100644 index 00000000..ebee0139 --- /dev/null +++ b/src/qos_net/src/async_proxy_connection.rs @@ -0,0 +1,132 @@ +//! Contains logic for remote connection establishment: DNS resolution and TCP +//! connection. +use std::net::{AddrParseError, IpAddr, SocketAddr}; + +use hickory_resolver::{ + config::{NameServerConfigGroup, ResolverConfig}, + name_server::TokioConnectionProvider, + TokioResolver, +}; +use rand::Rng; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +use crate::error::QosNetError; + +/// Struct representing a TCP connection held on our proxy +pub struct AsyncProxyConnection { + /// Unsigned integer with the connection ID (random positive int) + pub id: u128, + /// IP address of the remote host + pub ip: String, + /// TCP stream object + pub(crate) tcp_stream: TcpStream, +} + +impl AsyncProxyConnection { + /// Create a new `ProxyConnection` from a name. This results in a DNS + /// request + TCP connection + pub async fn new_from_name( + hostname: String, + port: u16, + dns_resolvers: Vec, + dns_port: u16, + ) -> Result { + let ip = resolve_hostname(hostname, dns_resolvers, dns_port).await?; + + // Generate a new random u32 to get an ID. We'll use it to name our + // socket. This will be our connection ID. + let connection_id = { + let mut rng = rand::rng(); + rng.random::() + }; + + let tcp_addr = SocketAddr::new(ip, port); + let tcp_stream = TcpStream::connect(tcp_addr).await?; + Ok(AsyncProxyConnection { + id: connection_id, + ip: ip.to_string(), + tcp_stream, + }) + } + + /// Create a new `ProxyConnection` from an IP address. This results in a + /// new TCP connection + pub async fn new_from_ip( + ip: String, + port: u16, + ) -> Result { + // Generate a new random u32 to get an ID. We'll use it to name our + // socket. This will be our connection ID. + let connection_id = { + let mut rng = rand::rng(); + rng.random::() + }; + + let ip_addr = ip.parse()?; + let tcp_addr = SocketAddr::new(ip_addr, port); + let tcp_stream = TcpStream::connect(tcp_addr).await?; + + Ok(AsyncProxyConnection { id: connection_id, ip, tcp_stream }) + } +} + +impl AsyncProxyConnection { + pub async fn read( + &mut self, + buf: &mut [u8], + ) -> Result { + self.tcp_stream.read(buf).await + } + + pub async fn write(&mut self, buf: &[u8]) -> Result { + self.tcp_stream.write(buf).await + } + + pub async fn flush(&mut self) -> std::io::Result<()> { + self.tcp_stream.flush().await + } +} + +// Resolve a name into an IP address +pub async fn resolve_hostname( + hostname: String, + resolver_addrs: Vec, + port: u16, +) -> Result { + let resolver_parsed_addrs = resolver_addrs + .iter() + .map(|resolver_address| { + let ip_addr: Result = + resolver_address.parse(); + ip_addr + }) + .collect::, AddrParseError>>()?; + + let resolver_config = ResolverConfig::from_parts( + None, + vec![], + NameServerConfigGroup::from_ips_clear( + &resolver_parsed_addrs, + port, + true, + ), + ); + let resolver = TokioResolver::builder_with_config( + resolver_config, + TokioConnectionProvider::default(), + ) + .build(); + + let response = resolver + .lookup_ip(hostname.clone()) + .await + .map_err(QosNetError::from)?; + response.iter().next().ok_or_else(|| { + QosNetError::DNSResolutionError(format!( + "Empty response when querying for host {hostname}" + )) + }) +} diff --git a/src/qos_net/src/async_proxy_stream.rs b/src/qos_net/src/async_proxy_stream.rs new file mode 100644 index 00000000..a648f1d5 --- /dev/null +++ b/src/qos_net/src/async_proxy_stream.rs @@ -0,0 +1,148 @@ +//! Contains an abstraction to implement the standard library's Read/Write +//! traits with `ProxyMsg`s. +use std::pin::Pin; + +use borsh::BorshDeserialize; +use qos_core::io::AsyncStream; +use tokio::{ + io::{AsyncRead, AsyncWrite}, + sync::MutexGuard, +}; + +use crate::{error::QosNetError, proxy_msg::ProxyMsg}; + +/// Struct representing a remote connection +/// This is going to be used by enclaves, on the other side of a socket +/// and plugged into the tokio-rustls via the AsyncWrite and AsyncRead traits +pub struct AsyncProxyStream<'pool> { + /// AsyncStream we hold for this connection + stream: MutexGuard<'pool, AsyncStream>, + /// Once a connection is established (successful `ConnectByName` or + /// ConnectByIp request), this connection ID is set the u32 in + /// `ConnectResponse`. + pub connection_id: u128, + /// The remote host this connection points to + pub remote_hostname: Option, + /// The remote IP this connection points to + pub remote_ip: String, +} + +impl<'pool> AsyncProxyStream<'pool> { + /// Create a new AsyncProxyStream by targeting a hostname + /// + /// # Arguments + /// + /// * `stream` - the `AsyncStream` picked from a `AsyncStreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) + /// * `hostname` - the hostname to connect to (the remote qos_net proxy will + /// resolve DNS) + /// * `port` - the port the remote qos_net proxy should connect to + /// (typically: 80 or 443 for http/https) + /// * `dns_resolvers` - array of resolvers to use to resolve `hostname` + /// * `dns_port` - DNS port to use while resolving DNS (typically: 53 or + /// 853) + pub async fn connect_by_name( + mut stream: MutexGuard<'pool, AsyncStream>, + hostname: String, + port: u16, + dns_resolvers: Vec, + dns_port: u16, + ) -> Result { + let req = borsh::to_vec(&ProxyMsg::ConnectByNameRequest { + hostname: hostname.clone(), + port, + dns_resolvers, + dns_port, + }) + .expect("ProtocolMsg can always be serialized."); + let resp_bytes = stream.call(&req).await?; + + match ProxyMsg::try_from_slice(&resp_bytes) { + Ok(resp) => match resp { + ProxyMsg::ConnectResponse { connection_id, remote_ip } => { + Ok(Self { + stream, + connection_id, + remote_ip, + remote_hostname: Some(hostname), + }) + } + _ => Err(QosNetError::InvalidMsg), + }, + Err(_) => Err(QosNetError::InvalidMsg), + } + } + + /// Create a new ProxyStream by targeting an IP address directly. + /// + /// # Arguments + /// * `stream` - the `AsyncStream` picked from a `AsyncStreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) + /// * `ip` - the IP the remote qos_net proxy should connect to + /// * `port` - the port the remote qos_net proxy should connect to + /// (typically: 80 or 443 for http/https) + pub async fn connect_by_ip( + mut stream: MutexGuard<'pool, AsyncStream>, + ip: String, + port: u16, + ) -> Result { + let req = borsh::to_vec(&ProxyMsg::ConnectByIpRequest { ip, port }) + .expect("ProtocolMsg can always be serialized."); + let resp_bytes = stream.call(&req).await?; + + match ProxyMsg::try_from_slice(&resp_bytes) { + Ok(resp) => match resp { + ProxyMsg::ConnectResponse { connection_id, remote_ip } => { + Ok(Self { + stream, + connection_id, + remote_ip, + remote_hostname: None, + }) + } + _ => Err(QosNetError::InvalidMsg), + }, + Err(_) => Err(QosNetError::InvalidMsg), + } + } + + /// Refresh this connection after a request has been completed. This MUST be called + /// after each successful rustls session. + pub async fn refresh(&mut self) -> Result<(), QosNetError> { + self.stream.reconnect().await?; + + Ok(()) + } +} + +impl AsyncRead for AsyncProxyStream<'_> { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + Pin::<&mut AsyncStream>::new(&mut self.stream).poll_read(cx, buf) + } +} + +impl AsyncWrite for AsyncProxyStream<'_> { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + Pin::<&mut AsyncStream>::new(&mut self.stream).poll_write(cx, buf) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::<&mut AsyncStream>::new(&mut self.stream).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::<&mut AsyncStream>::new(&mut self.stream).poll_shutdown(cx) + } +} diff --git a/src/qos_net/src/bin/async_qos_net.rs b/src/qos_net/src/bin/async_qos_net.rs new file mode 100644 index 00000000..9d445232 --- /dev/null +++ b/src/qos_net/src/bin/async_qos_net.rs @@ -0,0 +1,17 @@ +#[cfg(feature = "async_proxy")] +pub fn main() { + use qos_net::cli::CLI; + + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("tokio main to run") + .block_on(async { + CLI::async_execute().await; + }); +} + +#[cfg(not(feature = "async_proxy"))] +pub fn main() { + panic!("async qos_net invoked without async_proxy feature") +} diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index 14cc9f99..779aa248 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -1,14 +1,12 @@ //! CLI for running a host proxy to provide remote connections. -use std::env; - use qos_core::{ io::SocketAddress, parser::{GetParserForOptions, OptionsParser, Parser, Token}, - server::SocketServer, }; -use crate::proxy::Proxy; +#[cfg(feature = "async_proxy")] +use qos_core::io::AsyncStreamPool; /// "cid" pub const CID: &str = "cid"; @@ -16,28 +14,73 @@ pub const CID: &str = "cid"; pub const PORT: &str = "port"; /// "usock" pub const USOCK: &str = "usock"; +/// "pool-size" +pub const POOL_SIZE: &str = "pool-size"; + +const DEFAULT_POOL_SIZE: &str = "20"; /// CLI options for starting up the proxy. #[derive(Default, Clone, Debug, PartialEq)] -struct ProxyOpts { - parsed: Parser, +pub(crate) struct ProxyOpts { + pub(crate) parsed: Parser, } impl ProxyOpts { /// Create a new instance of [`Self`] with some defaults. - fn new(args: &mut Vec) -> Self { + pub(crate) fn new(args: &mut Vec) -> Self { let parsed = OptionsParser::::parse(args) .expect("Entered invalid CLI args"); Self { parsed } } + /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and + /// return the new `AsyncPool`. + #[cfg(feature = "async_proxy")] + pub(crate) fn async_pool(&self) -> AsyncStreamPool { + use qos_core::io::{TimeVal, TimeValLike}; + + let pool_size: u32 = self + .parsed + .single(POOL_SIZE) + .expect("invalid pool options") + .parse() + .expect("invalid pool_size specified"); + match ( + self.parsed.single(CID), + self.parsed.single(PORT), + self.parsed.single(USOCK), + ) { + #[cfg(feature = "vm")] + (Some(c), Some(p), None) => { + let c = c.parse::().unwrap(); + let start_port = p.parse::().unwrap(); + + let addresses = (start_port..start_port + pool_size).map(|p| { + SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS) + }); + + AsyncStreamPool::new(addresses) + } + (None, None, Some(u)) => { + let addresses = (0..pool_size).map(|i| { + let u = format!("{u}_{i}"); // add _X suffix for pooling + SocketAddress::new_unix(&u) + }); + + AsyncStreamPool::new(addresses, TimeVal::seconds(0)) + } + _ => panic!("Invalid socket opts"), + } + } + /// Get the `SocketAddress` for the proxy server. /// /// # Panics /// /// Panics if the opts are not valid for exactly one of unix or vsock. - fn addr(&self) -> SocketAddress { + #[allow(unused)] + pub(crate) fn addr(&self) -> SocketAddress { match ( self.parsed.single(CID), self.parsed.single(PORT), @@ -47,7 +90,7 @@ impl ProxyOpts { (Some(c), Some(p), None) => SocketAddress::new_vsock( c.parse::().unwrap(), p.parse::().unwrap(), - crate::io::VMADDR_NO_FLAGS, + qos_core::io::VMADDR_NO_FLAGS, ), (None, None, Some(u)) => SocketAddress::new_unix(u), _ => panic!("Invalid socket opts"), @@ -57,9 +100,14 @@ impl ProxyOpts { /// Proxy CLI. pub struct CLI; + impl CLI { /// Execute the enclave proxy CLI with the environment args. pub fn execute() { + use crate::proxy::Proxy; + use qos_core::server::SocketServer; + use std::env; + let mut args: Vec = env::args().collect(); let opts = ProxyOpts::new(&mut args); @@ -98,6 +146,15 @@ impl GetParserForOptions for ProxyParser { .takes_value(true) .forbids(vec!["port", "cid"]), ) + .token( + Token::new( + POOL_SIZE, + "the pool size to use with all socket types.", + ) + .takes_value(true) + .forbids(vec!["port", "cid"]) + .default_value(DEFAULT_POOL_SIZE), + ) } } @@ -127,6 +184,20 @@ mod test { assert_eq!(opts.addr(), SocketAddress::new_unix("./test.sock")); } + #[test] + #[cfg(feature = "async_proxy")] + fn parse_pool_size() { + let mut args: Vec<_> = + vec!["binary", "--usock", "./test.sock", "--pool-size", "7"] + .into_iter() + .map(String::from) + .collect(); + let opts = ProxyOpts::new(&mut args); + + let pool = opts.async_pool(); + assert_eq!(pool.len(), 7); + } + #[test] #[should_panic = "Entered invalid CLI args: MutuallyExclusiveInput(\"cid\", \"usock\")"] fn panic_on_too_many_opts() { diff --git a/src/qos_net/src/error.rs b/src/qos_net/src/error.rs index d91d6b88..b0d7c370 100644 --- a/src/qos_net/src/error.rs +++ b/src/qos_net/src/error.rs @@ -2,7 +2,7 @@ use std::net::AddrParseError; use borsh::{BorshDeserialize, BorshSerialize}; -#[cfg(feature = "proxy")] +#[cfg(any(feature = "proxy", feature = "async_proxy"))] use hickory_resolver::ResolveError; /// Errors related to creating and using proxy connections @@ -60,7 +60,7 @@ impl From for QosNetError { } } -#[cfg(feature = "proxy")] +#[cfg(any(feature = "proxy", feature = "async_proxy"))] impl From for QosNetError { fn from(err: ResolveError) -> Self { let msg = format!("{err:?}"); diff --git a/src/qos_net/src/lib.rs b/src/qos_net/src/lib.rs index f903c091..93642876 100644 --- a/src/qos_net/src/lib.rs +++ b/src/qos_net/src/lib.rs @@ -5,12 +5,24 @@ #![deny(clippy::all, unsafe_code)] -#[cfg(feature = "proxy")] -pub mod cli; pub mod error; +pub mod proxy_msg; + +#[cfg(any(feature = "proxy", feature = "async_proxy"))] +pub mod cli; + #[cfg(feature = "proxy")] pub mod proxy; #[cfg(feature = "proxy")] pub mod proxy_connection; -pub mod proxy_msg; +#[cfg(feature = "proxy")] pub mod proxy_stream; + +#[cfg(feature = "async_proxy")] +pub mod async_cli; +#[cfg(feature = "async_proxy")] +pub mod async_proxy; +#[cfg(feature = "async_proxy")] +pub mod async_proxy_connection; +#[cfg(feature = "async_proxy")] +pub mod async_proxy_stream; diff --git a/src/qos_net/src/main.rs b/src/qos_net/src/main.rs index 932fa198..2c8da593 100644 --- a/src/qos_net/src/main.rs +++ b/src/qos_net/src/main.rs @@ -1,12 +1,10 @@ -#[cfg(feature = "proxy")] -use qos_net::cli::CLI; - #[cfg(feature = "proxy")] pub fn main() { + use qos_net::cli::CLI; CLI::execute(); } -#[cfg(not(feature = "proxy"))] +#[cfg(not(any(feature = "proxy", feature = "async_proxy")))] pub fn main() { panic!("Cannot run qos_net CLI without proxy feature enabled") } diff --git a/src/qos_nsm/src/nsm.rs b/src/qos_nsm/src/nsm.rs index 360e6e94..7fe637a2 100644 --- a/src/qos_nsm/src/nsm.rs +++ b/src/qos_nsm/src/nsm.rs @@ -8,7 +8,7 @@ use crate::{nitro, types}; /// generic so mock providers can be subbed in for testing. In production use /// [`Nsm`]. // https://github.com/aws/aws-nitro-enclaves-nsm-api/blob/main/docs/attestation_process.md -pub trait NsmProvider { +pub trait NsmProvider: Send { /// Create a message with input data and output capacity from a given /// request, then send it to the NSM driver via `ioctl()` and wait /// for the driver's response. diff --git a/src/qos_system/src/lib.rs b/src/qos_system/src/lib.rs index 91d9f7f4..2279ef61 100644 --- a/src/qos_system/src/lib.rs +++ b/src/qos_system/src/lib.rs @@ -148,10 +148,9 @@ pub fn socket_connect( pub fn check_hwrng(rng_expected: &str) -> Result<(), SystemError> { use std::fs::read_to_string; let filename: &str = "/sys/class/misc/hw_random/rng_current"; - let rng_current_raw = read_to_string(filename) - .map_err(|_| SystemError { - message: format!("Failed to read {}", &filename), - })?; + let rng_current_raw = read_to_string(filename).map_err(|_| { + SystemError { message: format!("Failed to read {}", &filename) } + })?; let rng_current = rng_current_raw.trim(); if rng_expected != rng_current { return Err(SystemError { @@ -159,27 +158,35 @@ pub fn check_hwrng(rng_expected: &str) -> Result<(), SystemError> { "Entropy source was {} instead of {}", rng_current, rng_expected ), - }) + }); }; Ok(()) } #[cfg(any(target_env = "musl"))] -type ioctl_num_type = ::libc::c_int; +type IoctlNumType = ::libc::c_int; #[cfg(not(any(target_env = "musl")))] -type ioctl_num_type = ::libc::c_ulong; +type IoctlNumType = ::libc::c_ulong; -const IOCTL_VM_SOCKETS_GET_LOCAL_CID: ioctl_num_type = 0x7b9; +const IOCTL_VM_SOCKETS_GET_LOCAL_CID: IoctlNumType = 0x7b9; pub fn get_local_cid() -> Result { use libc::ioctl; let f = match File::open("/dev/vsock") { Ok(f) => f, - Err(e) => return Err(SystemError{ message: format!("Failed to open /dev/vsock") }), + Err(e) => { + return Err(SystemError { + message: format!("Failed to open /dev/vsock: {}", e), + }) + } }; let mut cid = 0; - if unsafe { ioctl(f.as_raw_fd(), IOCTL_VM_SOCKETS_GET_LOCAL_CID, &mut cid) } == -1 { - return Err(SystemError{ message: "Failed to fetch local CID".to_string() }); + if unsafe { ioctl(f.as_raw_fd(), IOCTL_VM_SOCKETS_GET_LOCAL_CID, &mut cid) } + == -1 + { + return Err(SystemError { + message: "Failed to fetch local CID".to_string(), + }); } return Ok(cid); } diff --git a/src/qos_test_primitives/Cargo.toml b/src/qos_test_primitives/Cargo.toml index 754674de..8378c285 100644 --- a/src/qos_test_primitives/Cargo.toml +++ b/src/qos_test_primitives/Cargo.toml @@ -6,3 +6,4 @@ publish = false [dependencies] rand = "0.8" +nix = { version = "0.26", default-features = false } diff --git a/src/qos_test_primitives/src/lib.rs b/src/qos_test_primitives/src/lib.rs index 08f5322f..c8e63fe4 100644 --- a/src/qos_test_primitives/src/lib.rs +++ b/src/qos_test_primitives/src/lib.rs @@ -27,6 +27,19 @@ impl From for ChildWrapper { impl Drop for ChildWrapper { fn drop(&mut self) { + #[cfg(unix)] + { + use nix::{sys::signal::Signal::SIGINT, unistd::Pid}; + let pid = Pid::from_raw(self.0.id() as i32); + match nix::sys::signal::kill(pid, SIGINT) { + Ok(_) => {} + Err(err) => eprintln!("error sending signal to child: {}", err), + } + + // allow clean exit + std::thread::sleep(Duration::from_millis(10)); + } + // Kill the process and explicitly ignore the result drop(self.0.kill()); } diff --git a/src/rust-toolchain.toml b/src/rust-toolchain.toml index b6b5ef05..f6ae0fd5 100644 --- a/src/rust-toolchain.toml +++ b/src/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.86" +channel = "1.81" components = ["rustfmt", "cargo", "clippy", "rust-analyzer"] profile = "minimal" From baf2b502f7df37a384266a62af54d9acd1ac64da Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 19 Jun 2025 10:10:29 -0700 Subject: [PATCH 05/20] fix vsock to host flag passing * upgrade nix to `0.29` to match `vsock` and `tokio-vsock` version * fix up nix `OwnedFd` abstraction changes --- src/Cargo.lock | 6 +-- src/integration/Cargo.toml | 2 +- src/qos_core/Cargo.toml | 2 +- src/qos_core/src/io/async_stream.rs | 28 ++++------ src/qos_core/src/io/stream.rs | 82 +++++++++++------------------ src/qos_test_primitives/Cargo.toml | 2 +- 6 files changed, 46 insertions(+), 76 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index 9999ce44..8bc351c1 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1476,7 +1476,7 @@ version = "0.1.0" dependencies = [ "aws-nitro-enclaves-nsm-api", "borsh", - "nix 0.26.4", + "nix 0.29.0", "qos_client", "qos_core", "qos_crypto", @@ -2224,7 +2224,7 @@ dependencies = [ "borsh", "futures", "libc", - "nix 0.26.4", + "nix 0.29.0", "qos_crypto", "qos_hex", "qos_nsm", @@ -2327,7 +2327,7 @@ dependencies = [ name = "qos_test_primitives" version = "0.1.0" dependencies = [ - "nix 0.26.4", + "nix 0.29.0", "rand 0.8.5", ] diff --git a/src/integration/Cargo.toml b/src/integration/Cargo.toml index 00ae7a66..e5eeb3c8 100644 --- a/src/integration/Cargo.toml +++ b/src/integration/Cargo.toml @@ -17,7 +17,7 @@ qos_test_primitives = { path = "../qos_test_primitives" } tokio = { version = "1.38.0", features = ["macros", "rt-multi-thread"], default-features = false } borsh = { version = "1.0", features = ["std", "derive"] , default-features = false} -nix = { version = "0.26", features = ["socket"], default-features = false } +nix = { version = "0.29", features = ["socket"], default-features = false } rustls = { version = "0.23.5" } tokio-rustls = { version = "0.26.2" } webpki-roots = { version = "0.26.1" } diff --git a/src/qos_core/Cargo.toml b/src/qos_core/Cargo.toml index 756f5296..e4c23876 100644 --- a/src/qos_core/Cargo.toml +++ b/src/qos_core/Cargo.toml @@ -10,7 +10,7 @@ qos_hex = { path = "../qos_hex", features = ["serde"] } qos_p256 = { path = "../qos_p256" } qos_nsm = { path = "../qos_nsm", default-features = false } -nix = { version = "0.26", features = ["socket"], default-features = false } +nix = { version = "0.29", features = ["socket"], default-features = false } libc = "=0.2.172" borsh = { version = "1.0", features = ["std", "derive"] , default-features = false} vsss-rs = { version = "5.1", default-features = false, features = ["std", "zeroize"] } diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs index f4794176..b03722d3 100644 --- a/src/qos_core/src/io/async_stream.rs +++ b/src/qos_core/src/io/async_stream.rs @@ -1,11 +1,11 @@ //! Abstractions to handle connection based socket streams. use std::{ - path::Path, pin::Pin, time::{Duration, SystemTime}, }; +use nix::sys::socket::UnixAddr; pub use nix::sys::time::TimeVal; use tokio::{ @@ -77,21 +77,13 @@ impl AsyncStream { pub async fn connect(&mut self) -> Result<(), IOError> { match self.address()? { SocketAddress::Unix(uaddr) => { - let path = - uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; - - let inner = retry_unix_connect(path, self.timeout).await?; + let inner = retry_unix_connect(uaddr, self.timeout).await?; self.inner = Some(InnerStream::Unix(inner)); } #[cfg(feature = "vm")] SocketAddress::Vsock(vaddr) => { - let inner = retry_vsock_connect( - vaddr.cid(), - vaddr.port(), - self.timeout, - ) - .await?; + let inner = retry_vsock_connect(vaddr, self.timeout).await?; self.inner = Some(InnerStream::Vsock(inner)); } @@ -331,14 +323,16 @@ impl Drop for AsyncListener { // raw unix socket connect retry with timeout, 50ms period async fn retry_unix_connect( - path: &Path, + addr: &UnixAddr, timeout: Duration, ) -> Result { let sleep_time = Duration::from_millis(50); let eot = SystemTime::now() + timeout; + let path = addr.path().ok_or(IOError::ConnectAddressInvalid)?; loop { let socket = UnixSocket::new_stream()?; + match tokio::time::timeout(timeout, socket.connect(path)).await? { Ok(stream) => return Ok(stream), Err(err) => { @@ -354,18 +348,16 @@ async fn retry_unix_connect( // raw vsock socket connect retry with timeout, 50ms period #[cfg(feature = "vm")] async fn retry_vsock_connect( - cid: u32, - port: u32, + addr: &tokio_vsock::VsockAddr, timeout: Duration, ) -> Result { - use tokio_vsock::VsockAddr; - let sleep_time = Duration::from_millis(50); let eot = SystemTime::now() + timeout; loop { - let addr = VsockAddr::new(cid, port); - match tokio::time::timeout(timeout, VsockStream::connect(addr)).await? { + eprintln!("Attempting VSOCK connect to: {:?}", addr); + match tokio::time::timeout(timeout, VsockStream::connect(*addr)).await? + { Ok(stream) => return Ok(stream), Err(err) => { if SystemTime::now() > eot { diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index b5586ddb..f27271f1 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -3,27 +3,23 @@ use std::{ io::{ErrorKind, Read, Write}, mem::size_of, - os::unix::io::RawFd, + os::fd::{AsFd, AsRawFd, FromRawFd, OwnedFd}, }; #[cfg(feature = "vm")] use nix::sys::socket::VsockAddr; -pub use nix::sys::time::{TimeVal, TimeValLike}; -use nix::{ - sys::socket::{ - accept, bind, connect, listen, recv, send, shutdown, socket, sockopt, - AddressFamily, MsgFlags, SetSockOpt, Shutdown, SockFlag, SockType, - SockaddrLike, UnixAddr, - }, - unistd::close, +use nix::sys::socket::{ + accept, bind, connect, listen, recv, send, socket, sockopt, AddressFamily, + Backlog, MsgFlags, SetSockOpt, SockFlag, SockType, SockaddrLike, UnixAddr, }; +pub use nix::sys::time::{TimeVal, TimeValLike}; use super::IOError; // 25(retries) x 10(milliseconds) = 1/4 a second of retrying const MAX_RETRY: usize = 25; const BACKOFF_MILLISECONDS: u64 = 10; -const BACKLOG: usize = 128; +const BACKLOG: i32 = 128; const MEGABYTE: usize = 1024 * 1024; @@ -136,7 +132,7 @@ impl SocketAddress { /// Handle on a stream pub struct Stream { - fd: RawFd, + fd: OwnedFd, } impl Stream { @@ -149,16 +145,16 @@ impl Stream { for _ in 0..MAX_RETRY { let fd = socket_fd(addr)?; - let stream = Self { fd }; // set `SO_RCVTIMEO` let receive_timeout = sockopt::ReceiveTimeout; - receive_timeout.set(fd, &timeout)?; + receive_timeout.set(&fd.as_fd(), &timeout)?; let send_timeout = sockopt::SendTimeout; - send_timeout.set(fd, &timeout)?; + send_timeout.set(&fd.as_fd(), &timeout)?; - match connect(stream.fd, &*addr.addr()) { + let stream = Self { fd }; + match connect(stream.fd.as_raw_fd(), &*addr.addr()) { Ok(()) => return Ok(stream), Err(e) => err = IOError::ConnectNixError(e), } @@ -182,7 +178,7 @@ impl Stream { let mut sent_bytes = 0; while sent_bytes < len_buf.len() { sent_bytes += match send( - self.fd, + self.fd.as_raw_fd(), &len_buf[sent_bytes..len_buf.len()], MsgFlags::empty(), ) { @@ -197,7 +193,7 @@ impl Stream { let mut sent_bytes = 0; while sent_bytes < len { sent_bytes += match send( - self.fd, + self.fd.as_raw_fd(), &buf[sent_bytes..len], MsgFlags::empty(), ) { @@ -221,7 +217,7 @@ impl Stream { let mut received_bytes = 0; while received_bytes < len { received_bytes += match recv( - self.fd, + self.fd.as_raw_fd(), &mut buf[received_bytes..len], MsgFlags::empty(), ) { @@ -258,7 +254,7 @@ impl Stream { let mut received_bytes = 0; while received_bytes < length { received_bytes += match recv( - self.fd, + self.fd.as_raw_fd(), &mut buf[received_bytes..length], MsgFlags::empty(), ) { @@ -284,7 +280,7 @@ impl Stream { impl Read for Stream { fn read(&mut self, buf: &mut [u8]) -> Result { - match recv(self.fd, buf, MsgFlags::empty()) { + match recv(self.fd.as_raw_fd(), buf, MsgFlags::empty()) { Ok(0) => Err(std::io::Error::new( ErrorKind::ConnectionAborted, "read 0 bytes", @@ -297,7 +293,7 @@ impl Read for Stream { impl Write for Stream { fn write(&mut self, buf: &[u8]) -> Result { - match send(self.fd, buf, MsgFlags::empty()) { + match send(self.fd.as_raw_fd(), buf, MsgFlags::empty()) { Ok(0) => Err(std::io::Error::new( ErrorKind::ConnectionAborted, "wrote 0 bytes", @@ -313,18 +309,9 @@ impl Write for Stream { } } -impl Drop for Stream { - fn drop(&mut self) { - // Its ok if either of these error - likely means the other end of the - // connection has been shutdown - let _ = shutdown(self.fd, Shutdown::Both); - let _ = close(self.fd); - } -} - /// Abstraction to listen for incoming stream connections. pub struct Listener { - fd: RawFd, + fd: OwnedFd, addr: SocketAddress, } @@ -335,16 +322,17 @@ impl Listener { Self::clean(&addr); let fd = socket_fd(&addr)?; - bind(fd, &*addr.addr())?; - listen(fd, BACKLOG)?; + bind(fd.as_raw_fd(), &*addr.addr())?; + listen(&fd.as_fd(), Backlog::new(BACKLOG)?)?; Ok(Self { fd, addr }) } + #[allow(unsafe_code)] fn accept(&self) -> Result { - let fd = accept(self.fd)?; + let fd = accept(self.fd.as_raw_fd())?; - Ok(Stream { fd }) + Ok(Stream { fd: unsafe { OwnedFd::from_raw_fd(fd) } }) } /// Remove Unix socket if it exists @@ -370,15 +358,12 @@ impl Iterator for Listener { impl Drop for Listener { fn drop(&mut self) { - // Its ok if either of these error - likely means the other end of the - // connection has been shutdown - let _ = shutdown(self.fd, Shutdown::Both); - let _ = close(self.fd); + // OwnedFd::Drop will close the socket, we just need to clear the file Self::clean(&self.addr); } } -fn socket_fd(addr: &SocketAddress) -> Result { +fn socket_fd(addr: &SocketAddress) -> Result { socket( addr.family(), // Type - sequenced, two way byte stream. (full duplexed). @@ -397,10 +382,7 @@ fn socket_fd(addr: &SocketAddress) -> Result { mod test { use std::{ - os::{fd::AsRawFd, unix::net::UnixListener}, - path::Path, - str::from_utf8, - thread, + os::unix::net::UnixListener, path::Path, str::from_utf8, thread, }; use super::*; @@ -413,18 +395,18 @@ mod test { // Then it kills itself. pub struct HarakiriPongServer { path: String, - fd: Option, + listener: Option, } impl HarakiriPongServer { pub fn new(path: String) -> Self { - Self { path, fd: None } + Self { path, listener: None } } pub fn start(&mut self) { let listener = UnixListener::bind(&self.path).unwrap(); - self.fd = Some(listener.as_raw_fd()); let (mut stream, _peer_addr) = listener.accept().unwrap(); + self.listener = Some(listener); // Read 4 bytes ("PING") let mut buf = [0u8; 4]; @@ -439,11 +421,7 @@ mod test { impl Drop for HarakiriPongServer { fn drop(&mut self) { - if let Some(fd) = &self.fd { - // Cleanup server fd if we have access to one - let _ = shutdown(fd.to_owned(), Shutdown::Both); - let _ = close(fd.to_owned()); - + if let Some(_listener) = &self.listener { let server_socket = Path::new(&self.path); if server_socket.exists() { drop(std::fs::remove_file(server_socket)); diff --git a/src/qos_test_primitives/Cargo.toml b/src/qos_test_primitives/Cargo.toml index 8378c285..df77647c 100644 --- a/src/qos_test_primitives/Cargo.toml +++ b/src/qos_test_primitives/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] rand = "0.8" -nix = { version = "0.26", default-features = false } +nix = { version = "0.29", default-features = false, features = ["signal"] } From 969b294e168807517560716d02c89f47c4ea480b Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 19 Jun 2025 11:39:08 -0700 Subject: [PATCH 06/20] unify reconnect logic --- src/qos_core/src/io/async_stream.rs | 66 +++++++++++++++-------------- src/qos_core/src/io/stream.rs | 20 +++++++++ src/qos_host/src/async_host.rs | 5 +-- 3 files changed, 56 insertions(+), 35 deletions(-) diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs index b03722d3..a5f796d3 100644 --- a/src/qos_core/src/io/async_stream.rs +++ b/src/qos_core/src/io/async_stream.rs @@ -5,7 +5,6 @@ use std::{ time::{Duration, SystemTime}, }; -use nix::sys::socket::UnixAddr; pub use nix::sys::time::TimeVal; use tokio::{ @@ -74,16 +73,20 @@ impl AsyncStream { } /// Create a new `Stream` from a `SocketAddress` and a timeout and connect using async + /// Sets `inner` to the new stream. pub async fn connect(&mut self) -> Result<(), IOError> { + let timeout = self.timeout; + let addr = self.address()?.clone(); + match self.address()? { - SocketAddress::Unix(uaddr) => { - let inner = retry_unix_connect(uaddr, self.timeout).await?; + SocketAddress::Unix(_uaddr) => { + let inner = retry_unix_connect(addr, timeout).await?; self.inner = Some(InnerStream::Unix(inner)); } #[cfg(feature = "vm")] - SocketAddress::Vsock(vaddr) => { - let inner = retry_vsock_connect(vaddr, self.timeout).await?; + SocketAddress::Vsock(_vaddr) => { + let inner = retry_vsock_connect(addr, timeout).await?; self.inner = Some(InnerStream::Vsock(inner)); } @@ -92,38 +95,18 @@ impl AsyncStream { Ok(()) } - fn address(&self) -> Result<&SocketAddress, IOError> { - self.address.as_ref().ok_or(IOError::ConnectAddressInvalid) - } - - fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { - self.inner.as_mut().ok_or(IOError::DisconnectedStream) - } - /// Reconnects this `AsyncStream` by calling `connect` again on the underlaying socket pub async fn reconnect(&mut self) -> Result<(), IOError> { let timeout = self.timeout; + let addr = self.address()?.clone(); match &mut self.inner_mut()? { InnerStream::Unix(ref mut s) => { - let addr = s - .peer_addr()? - .as_pathname() - .ok_or(IOError::ConnectAddressInvalid)? - .to_owned(); - let new_socket = UnixSocket::new_stream()?; - let new_stream = - tokio::time::timeout(timeout, new_socket.connect(addr)) - .await??; - *s = new_stream; + *s = retry_unix_connect(addr, timeout).await?; } #[cfg(feature = "vm")] InnerStream::Vsock(ref mut s) => { - let vaddr = s.peer_addr()?; - let new_stream = - tokio::time::timeout(timeout, VsockStream::connect(vaddr)) - .await??; - *s = new_stream; + *s = retry_vsock_connect(addr, timeout).await?; } } Ok(()) @@ -157,6 +140,14 @@ impl AsyncStream { self.send(req_buf).await?; self.recv().await } + + fn address(&self) -> Result<&SocketAddress, IOError> { + self.address.as_ref().ok_or(IOError::ConnectAddressInvalid) + } + + fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { + self.inner.as_mut().ok_or(IOError::DisconnectedStream) + } } async fn send( @@ -323,19 +314,25 @@ impl Drop for AsyncListener { // raw unix socket connect retry with timeout, 50ms period async fn retry_unix_connect( - addr: &UnixAddr, + addr: SocketAddress, timeout: Duration, ) -> Result { let sleep_time = Duration::from_millis(50); let eot = SystemTime::now() + timeout; + let addr = addr.usock(); let path = addr.path().ok_or(IOError::ConnectAddressInvalid)?; loop { let socket = UnixSocket::new_stream()?; + eprintln!("Attempting USOCK connect to: {:?}", addr.path()); match tokio::time::timeout(timeout, socket.connect(path)).await? { - Ok(stream) => return Ok(stream), + Ok(stream) => { + eprintln!("Connected to USOCK at: {:?}", addr.path()); + return Ok(stream); + } Err(err) => { + eprintln!("Error connecting to USOCK: {}", err); if SystemTime::now() > eot { return Err(err); } @@ -348,18 +345,23 @@ async fn retry_unix_connect( // raw vsock socket connect retry with timeout, 50ms period #[cfg(feature = "vm")] async fn retry_vsock_connect( - addr: &tokio_vsock::VsockAddr, + addr: SocketAddress, timeout: Duration, ) -> Result { let sleep_time = Duration::from_millis(50); let eot = SystemTime::now() + timeout; + let addr = addr.vsock(); loop { eprintln!("Attempting VSOCK connect to: {:?}", addr); match tokio::time::timeout(timeout, VsockStream::connect(*addr)).await? { - Ok(stream) => return Ok(stream), + Ok(stream) => { + eprintln!("Connected to VSOCK at: {:?}", addr); + return Ok(stream); + } Err(err) => { + eprintln!("Error connecting to VSOCK: {}", err); if SystemTime::now() > eot { return Err(err); } diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index f27271f1..69e66ea8 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -128,6 +128,26 @@ impl SocketAddress { } } } + + /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise + #[must_use] + pub fn usock(&self) -> &UnixAddr { + match self { + Self::Unix(usock) => usock, + #[cfg(feature = "vm")] + _ => panic!("invalid socket address requested"), + } + } + + /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise + #[must_use] + #[cfg(feature = "vm")] + pub fn vsock(&self) -> &VsockAddr { + match self { + Self::Vsock(vsock) => vsock, + _ => panic!("invalid socket address requested"), + } + } } /// Handle on a stream diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index 378d7f0d..f7f7034c 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -39,7 +39,7 @@ use crate::{ HOST_HEALTH, MAX_ENCODED_MSG_LEN, MESSAGE, }; -/// Resource shared across tasks in the [`HostServer`]. +/// Resource shared across tasks in the `AsyncHostServer`. #[derive(Debug)] struct AsyncQosHostState { enclave_client: AsyncClient, @@ -54,7 +54,7 @@ pub struct AsyncHostServer { } impl AsyncHostServer { - /// Create a new [`HostServer`]. See [`Self::serve`] for starting the + /// Create a new `HostServer`. See `Self::serve` for starting the /// server. #[must_use] pub fn new( @@ -240,7 +240,6 @@ impl AsyncHostServer { } /// Message route handler. - #[allow(clippy::unused_async)] async fn message( State(state): State>, encoded_request: Bytes, From 2ffaf6cdf0695efe336db91a72a2834bab2ad787 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 19 Jun 2025 16:14:33 -0700 Subject: [PATCH 07/20] add async integration tests implement async_qos_host for that purpose --- src/Makefile | 4 + src/init/Cargo.lock | 2 +- src/init/init.rs | 3 +- src/integration/src/bin/pivot_ok4.rs | 5 + src/integration/src/bin/pivot_ok5.rs | 5 + src/integration/src/lib.rs | 8 + src/integration/tests/async_boot.rs | 462 +++++++++++++++++++++++++ src/integration/tests/boot.rs | 4 +- src/integration/tests/dev_boot.rs | 92 ++++- src/qos_core/src/async_client.rs | 4 + src/qos_core/src/async_server.rs | 3 + src/qos_core/src/bin/async_qos_core.rs | 11 +- src/qos_core/src/cli.rs | 5 +- src/qos_core/src/io/async_pool.rs | 7 +- src/qos_core/src/io/async_stream.rs | 2 +- src/qos_core/src/io/stream.rs | 6 +- src/qos_core/src/protocol/msg.rs | 83 +++++ src/qos_core/src/reaper.rs | 26 +- src/qos_host/Cargo.toml | 5 + src/qos_host/src/async_host.rs | 26 +- src/qos_host/src/bin/async_qos_host.rs | 11 + 21 files changed, 742 insertions(+), 32 deletions(-) create mode 100644 src/integration/src/bin/pivot_ok4.rs create mode 100644 src/integration/src/bin/pivot_ok5.rs create mode 100644 src/integration/tests/async_boot.rs create mode 100644 src/qos_host/src/bin/async_qos_host.rs diff --git a/src/Makefile b/src/Makefile index 7ee92a72..50cce34c 100644 --- a/src/Makefile +++ b/src/Makefile @@ -159,6 +159,10 @@ test: @# The integration tests rely on binaries from other crates being built, so @# we build all the workspace targets. cargo build --all + @# We also need the async version of qos_core + cargo build --bin async_qos_core --features async,mock + @# We also need the async version of qos_host + cargo build --bin async_qos_host --features async @# We also need the async version of qos_net cargo build --bin async_qos_net --features async_proxy @# Run tests diff --git a/src/init/Cargo.lock b/src/init/Cargo.lock index 244c48ee..65a4b2b7 100644 --- a/src/init/Cargo.lock +++ b/src/init/Cargo.lock @@ -1226,7 +1226,7 @@ dependencies = [ "borsh", "futures", "libc", - "nix 0.26.4", + "nix 0.29.0", "qos_crypto", "qos_hex", "qos_nsm", diff --git a/src/init/init.rs b/src/init/init.rs index 5e1d7653..4c8f72f1 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -116,7 +116,8 @@ async fn main() { TimeVal::seconds(5), ); - Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None); + Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None) + .await; reboot(); } diff --git a/src/integration/src/bin/pivot_ok4.rs b/src/integration/src/bin/pivot_ok4.rs new file mode 100644 index 00000000..a235e192 --- /dev/null +++ b/src/integration/src/bin/pivot_ok4.rs @@ -0,0 +1,5 @@ +use integration::PIVOT_OK4_SUCCESS_FILE; + +fn main() { + integration::Cli::execute(PIVOT_OK4_SUCCESS_FILE); +} diff --git a/src/integration/src/bin/pivot_ok5.rs b/src/integration/src/bin/pivot_ok5.rs new file mode 100644 index 00000000..5bf20e98 --- /dev/null +++ b/src/integration/src/bin/pivot_ok5.rs @@ -0,0 +1,5 @@ +use integration::PIVOT_OK5_SUCCESS_FILE; + +fn main() { + integration::Cli::execute(PIVOT_OK5_SUCCESS_FILE); +} diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index 6b727c11..c08510d6 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -13,12 +13,20 @@ pub const PIVOT_OK_SUCCESS_FILE: &str = "./pivot_ok_works"; pub const PIVOT_OK2_SUCCESS_FILE: &str = "./pivot_ok2_works"; /// Path to the file `pivot_ok3` writes on success for tests. pub const PIVOT_OK3_SUCCESS_FILE: &str = "./pivot_ok3_works"; +/// Path to the file `pivot_ok4` writes on success for tests. +pub const PIVOT_OK4_SUCCESS_FILE: &str = "./pivot_ok4_works"; +/// Path to the file `pivot_ok5` writes on success for tests. +pub const PIVOT_OK5_SUCCESS_FILE: &str = "./pivot_ok5_works"; /// Path to pivot_ok bin for tests. pub const PIVOT_OK_PATH: &str = "../target/debug/pivot_ok"; /// Path to pivot_ok2 bin for tests. pub const PIVOT_OK2_PATH: &str = "../target/debug/pivot_ok2"; /// Path to pivot_ok3 bin for tests. pub const PIVOT_OK3_PATH: &str = "../target/debug/pivot_ok3"; +/// Path to pivot_ok4 bin for tests. +pub const PIVOT_OK4_PATH: &str = "../target/debug/pivot_ok4"; +/// Path to pivot_ok5 bin for tests. +pub const PIVOT_OK5_PATH: &str = "../target/debug/pivot_ok5"; /// Path to pivot loop bin for tests. pub const PIVOT_LOOP_PATH: &str = "../target/debug/pivot_loop"; /// Path to pivot_abort bin for tests. diff --git a/src/integration/tests/async_boot.rs b/src/integration/tests/async_boot.rs new file mode 100644 index 00000000..aac2aa37 --- /dev/null +++ b/src/integration/tests/async_boot.rs @@ -0,0 +1,462 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + path::Path, + process::{Command, Stdio}, +}; + +use borsh::de::BorshDeserialize; +use integration::{ + LOCAL_HOST, PCR3_PRE_IMAGE_PATH, PIVOT_OK5_PATH, PIVOT_OK5_SUCCESS_FILE, + QOS_DIST_DIR, +}; +use qos_core::protocol::{ + services::{ + boot::{ + Approval, Manifest, ManifestSet, Namespace, PivotConfig, + RestartPolicy, ShareSet, + }, + genesis::{GenesisMemberOutput, GenesisOutput}, + }, + ProtocolPhase, QosHash, +}; +use qos_crypto::sha_256; +use qos_host::EnclaveInfo; +use qos_p256::P256Pair; +use qos_test_primitives::{ChildWrapper, PathWrapper}; + +#[tokio::test] +async fn async_standard_boot_e2e() { + const PIVOT_HASH_PATH: &str = "/tmp/async_standard_boot_e2e-pivot-hash.txt"; + + let host_port = qos_test_primitives::find_free_port().unwrap(); + let tmp: PathWrapper = "/tmp/boot-e2e".into(); + let _: PathWrapper = PIVOT_OK5_SUCCESS_FILE.into(); + let _: PathWrapper = PIVOT_HASH_PATH.into(); + fs::create_dir_all(&*tmp).unwrap(); + + let usock: PathWrapper = "/tmp/boot-e2e/boot_e2e.sock".into(); + let secret_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.secret".into(); + let pivot_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.pivot".into(); + let manifest_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.manifest".into(); + let eph_path: PathWrapper = "/tmp/boot-e2e/ephemeral_key.secret".into(); + + let boot_dir: PathWrapper = "/tmp/boot-e2e/boot-dir".into(); + fs::create_dir_all(&*boot_dir).unwrap(); + let attestation_dir: PathWrapper = "/tmp/boot-e2e/attestation-dir".into(); + fs::create_dir_all(&*attestation_dir).unwrap(); + let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); + + let all_personal_dir = "./mock/boot-e2e/all-personal-dir"; + + let namespace = "quit-coding-to-vape"; + + let personal_dir = |user: &str| format!("{all_personal_dir}/{user}-dir"); + + let user1 = "user1"; + let user2 = "user2"; + let user3 = "user3"; + + // -- Create pivot-build-fingerprints.txt + let pivot = fs::read(PIVOT_OK5_PATH).unwrap(); + let mock_pivot_hash = sha_256(&pivot); + let pivot_hash = qos_hex::encode_to_vec(&mock_pivot_hash); + std::fs::write(PIVOT_HASH_PATH, pivot_hash).unwrap(); + + // -- CLIENT create manifest. + let msg = "testing420"; + let pivot_args = format!("[--msg,{msg}]"); + let cli_manifest_path = format!("{}/manifest", &*boot_dir); + + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest", + "--nonce", + "2", + "--namespace", + namespace, + "--restart-policy", + "never", + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-path", + &cli_manifest_path, + "--pivot-args", + &pivot_args, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // Check the manifest written to file + let manifest = + Manifest::try_from_slice(&fs::read(&cli_manifest_path).unwrap()) + .unwrap(); + + let genesis_output = { + let contents = + fs::read("./mock/boot-e2e/genesis-dir/genesis_output").unwrap(); + GenesisOutput::try_from_slice(&contents).unwrap() + }; + // For simplicity sake, we use the same keys for the share set and manifest + // set. + let mut members: Vec<_> = genesis_output + .member_outputs + .iter() + .cloned() + .map(|GenesisMemberOutput { share_set_member, .. }| share_set_member) + .collect(); + members.sort(); + + let namespace_field = Namespace { + name: namespace.to_string(), + nonce: 2, + quorum_key: genesis_output.quorum_key, + }; + assert_eq!(manifest.namespace, namespace_field); + let pivot = PivotConfig { + hash: mock_pivot_hash, + restart: RestartPolicy::Never, + args: vec!["--msg".to_string(), msg.to_string()], + }; + assert_eq!(manifest.pivot, pivot); + let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; + assert_eq!(manifest.manifest_set, manifest_set); + let share_set = ShareSet { threshold: 2, members }; + assert_eq!(manifest.share_set, share_set); + + // -- CLIENT make sure each user can run `approve-manifest` + for alias in [user1, user2, user3] { + let approval_path = format!( + "{}/{}-{}-{}.approval", + &*boot_dir, alias, namespace, manifest.namespace.nonce, + ); + + let secret_path = format!("{}/{}.secret", &personal_dir(alias), alias); + + let mut child = Command::new("../target/debug/qos_client") + .args([ + "approve-manifest", + "--secret-path", + &*secret_path, + "--manifest-path", + &cli_manifest_path, + "--manifest-approvals-dir", + &*boot_dir, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub", + "--alias", + alias, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + // On purpose, try to input a bad value, neither yes or no + stdin + .write_all("maybe\n".as_bytes()) + .expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Please answer with either \"yes\" (y) or \"no\" (n)" + ); + // Try the longer option ("yes" rather than "y") + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct pivot restart policy: RestartPolicy::Never? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Are these the correct pivot args:" + ); + assert_eq!( + &stdout.next().unwrap().unwrap(), + "[\"--msg\", \"testing420\"]?" + ); + assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + // Wait for the command to write the approval and exit + assert!(child.wait().unwrap().success()); + + // Read in the generated approval to check it was created correctly + let approval = + Approval::try_from_slice(&fs::read(approval_path).unwrap()) + .unwrap(); + let personal_pair = P256Pair::from_hex_file(format!( + "{}/{}.secret", + personal_dir(alias), + alias, + )) + .unwrap(); + + let signature = personal_pair.sign(&manifest.qos_hash()).unwrap(); + assert_eq!(approval.signature, signature); + + assert_eq!(approval.member.alias, alias); + assert_eq!( + approval.member.pub_key, + personal_pair.public_key().to_bytes(), + ); + } + + // -- ENCLAVE start enclave + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_core") + .args([ + "--usock", + &*usock, + "--quorum-file", + &*secret_path, + "--pivot-file", + &*pivot_path, + "--ephemeral-file", + &*eph_path, + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // -- HOST start host + let mut _host_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_host") + .args([ + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--usock", + &*usock, + ]) + .spawn() + .unwrap() + .into(); + + // -- Make sure the enclave and host have time to boot + qos_test_primitives::wait_until_port_is_bound(host_port); + + // -- CLIENT generate the manifest envelope + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest-envelope", + "--manifest-approvals-dir", + &*boot_dir, + "--manifest-path", + &cli_manifest_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // -- CLIENT broadcast boot standard instruction + let manifest_envelope_path = format!("{}/manifest_envelope", &*boot_dir,); + assert!(Command::new("../target/debug/qos_client") + .args([ + "boot-standard", + "--manifest-envelope-path", + &manifest_envelope_path, + "--pivot-path", + PIVOT_OK5_PATH, + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--pcr3-preimage-path", + "./mock/pcr3-preimage.txt", + "--unsafe-skip-attestation", + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // For each user, post a share, + // and sanity check the pivot has not yet executed. + assert!(!Path::new(PIVOT_OK5_SUCCESS_FILE).exists()); + for user in [&user1, &user2] { + // Get attestation doc and manifest + assert!(Command::new("../target/debug/qos_client") + .args([ + "get-attestation-doc", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--attestation-doc-path", + &*attestation_doc_path, + "--manifest-envelope-path", + "/tmp/dont_care" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + let share_path = format!("{}/{}.share", &personal_dir(user), user); + let secret_path = format!("{}/{}.secret", &personal_dir(user), user); + let eph_wrapped_share_path: PathWrapper = + format!("{}/{}.eph_wrapped.share", &*tmp, user).into(); + let approval_path: PathWrapper = + format!("{}/{}.attestation.approval", &*tmp, user).into(); + // Encrypt share to ephemeral key + let mut child = Command::new("../target/debug/qos_client") + .args([ + "proxy-re-encrypt-share", + "--share-path", + &share_path, + "--secret-path", + &secret_path, + "--attestation-doc-path", + &*attestation_doc_path, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + "--manifest-envelope-path", + &manifest_envelope_path, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--alias", + user, + "--unsafe-skip-attestation", + "--unsafe-eph-path-override", + &*eph_path, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + // Skip over a log message + stdout.next(); + + // Answer prompts with yes + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Does this AWS IAM role belong to the intended organization: arn:aws:iam::123456789012:role/Webserver? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "The following manifest set members approved:" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + // Check that it finished successfully + assert!(child.wait().unwrap().success()); + + // Post the encrypted share + assert!(Command::new("../target/debug/qos_client") + .args([ + "post-share", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + } + + // Give the enclave time to start the pivot + std::thread::sleep(std::time::Duration::from_secs(2)); + + // Check that the pivot executed + let contents = std::fs::read(PIVOT_OK5_SUCCESS_FILE).unwrap(); + assert_eq!(std::str::from_utf8(&contents).unwrap(), msg); + + let enclave_info_url = + format!("http://{LOCAL_HOST}:{}/qos/enclave-info", host_port); + let enclave_info: EnclaveInfo = + ureq::get(&enclave_info_url).call().unwrap().into_json().unwrap(); + assert_eq!(enclave_info.phase, ProtocolPhase::QuorumKeyProvisioned); + + fs::remove_file(PIVOT_OK5_SUCCESS_FILE).unwrap(); +} diff --git a/src/integration/tests/boot.rs b/src/integration/tests/boot.rs index 5b993832..bb32cef2 100644 --- a/src/integration/tests/boot.rs +++ b/src/integration/tests/boot.rs @@ -25,10 +25,10 @@ use qos_host::EnclaveInfo; use qos_p256::P256Pair; use qos_test_primitives::{ChildWrapper, PathWrapper}; -const PIVOT_HASH_PATH: &str = "/tmp/standard_boot_e2e-pivot-hash.txt"; - #[tokio::test] async fn standard_boot_e2e() { + const PIVOT_HASH_PATH: &str = "/tmp/standard_boot_e2e-pivot-hash.txt"; + let host_port = qos_test_primitives::find_free_port().unwrap(); let tmp: PathWrapper = "/tmp/boot-e2e".into(); let _: PathWrapper = PIVOT_OK2_SUCCESS_FILE.into(); diff --git a/src/integration/tests/dev_boot.rs b/src/integration/tests/dev_boot.rs index 2aed01b4..1383df2d 100644 --- a/src/integration/tests/dev_boot.rs +++ b/src/integration/tests/dev_boot.rs @@ -1,6 +1,9 @@ use std::{fs, path::Path, process::Command}; -use integration::{LOCAL_HOST, PIVOT_OK3_PATH, PIVOT_OK3_SUCCESS_FILE}; +use integration::{ + LOCAL_HOST, PIVOT_OK3_PATH, PIVOT_OK3_SUCCESS_FILE, PIVOT_OK4_PATH, + PIVOT_OK4_SUCCESS_FILE, +}; use qos_test_primitives::{ChildWrapper, PathWrapper}; #[tokio::test] @@ -87,3 +90,90 @@ async fn dev_boot_e2e() { assert_eq!(std::str::from_utf8(&contents).unwrap(), "vapers-only"); fs::remove_file(PIVOT_OK3_SUCCESS_FILE).unwrap(); } + +#[tokio::test] +async fn async_dev_boot_e2e() { + let tmp: PathWrapper = "/tmp/dev-async-boot-e2e-tmp".into(); + drop(fs::create_dir_all(&*tmp)); + let _: PathWrapper = PIVOT_OK4_SUCCESS_FILE.into(); + let usock: PathWrapper = "/tmp/dev-async-boot-e2e-tmp/sock.sock".into(); + let secret_path: PathWrapper = + "/tmp/dev-async-boot-e2e-tmp/quorum.secret".into(); + let pivot_path: PathWrapper = + "/tmp/dev-async-boot-e2e-tmp/pivot.pivot".into(); + let manifest_path: PathWrapper = + "/tmp/dev-async-boot-e2e-tmp/manifest.manifest".into(); + let eph_path: PathWrapper = "/tmp/dev-async-boot-e2e-tmp/eph.secret".into(); + + let host_port = qos_test_primitives::find_free_port().unwrap(); + + // Start Enclave + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_core") + .args([ + "--usock", + &*usock, + "--quorum-file", + &*secret_path, + "--pivot-file", + &*pivot_path, + "--ephemeral-file", + &*eph_path, + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // Start Host + let mut _host_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_host") + .args([ + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--usock", + &*usock, + ]) + .spawn() + .unwrap() + .into(); + + qos_test_primitives::wait_until_port_is_bound(host_port); + + // Run `dangerous-dev-boot` + let res = Command::new("../target/debug/qos_client") + .args([ + "dangerous-dev-boot", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--pivot-path", + PIVOT_OK4_PATH, + "--restart-policy", + "never", + "--pivot-args", + "[--msg,vapers-only]", + "--unsafe-eph-path-override", + &*eph_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap(); + + // Give the coordinator time to pivot + std::thread::sleep(std::time::Duration::from_secs(2)); + + // Make sure pivot ran + assert!(Path::new(PIVOT_OK4_SUCCESS_FILE).exists()); + assert!(res.success()); + + let contents = fs::read(PIVOT_OK4_SUCCESS_FILE).unwrap(); + assert_eq!(std::str::from_utf8(&contents).unwrap(), "vapers-only"); + fs::remove_file(PIVOT_OK4_SUCCESS_FILE).unwrap(); +} diff --git a/src/qos_core/src/async_client.rs b/src/qos_core/src/async_client.rs index 04c46682..ff875a30 100644 --- a/src/qos_core/src/async_client.rs +++ b/src/qos_core/src/async_client.rs @@ -19,8 +19,12 @@ impl AsyncClient { /// Send raw bytes and wait for a response until the clients configured /// timeout. pub async fn call(&self, request: &[u8]) -> Result, ClientError> { + // TODO: ales - remove later, debug reasons + eprintln!("AsyncClient::call - Attempting to claim pool read lock"); let pool = self.pool.read().await; + eprintln!("AsyncClient::call - Attempting to claim pool stream"); let mut stream = pool.get().await; + eprintln!("AsyncClient::call - Stream aquired"); let resp = stream.call(request).await?; Ok(resp) diff --git a/src/qos_core/src/async_server.rs b/src/qos_core/src/async_server.rs index 426375bc..e1410ea0 100644 --- a/src/qos_core/src/async_server.rs +++ b/src/qos_core/src/async_server.rs @@ -72,6 +72,9 @@ where Err(err) => match err { IOError::StdIoError(err) => { if err.kind() == std::io::ErrorKind::UnexpectedEof { + eprintln!( + "AsyncServer: unexpected eof, re-accepting" + ); break; // just re-accept } } diff --git a/src/qos_core/src/bin/async_qos_core.rs b/src/qos_core/src/bin/async_qos_core.rs index 59183526..7e87d43f 100644 --- a/src/qos_core/src/bin/async_qos_core.rs +++ b/src/qos_core/src/bin/async_qos_core.rs @@ -1,11 +1,6 @@ use qos_core::cli::CLI; -fn main() { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio main to run") - .block_on(async { - CLI::async_execute(); - }); +#[tokio::main] +async fn main() { + CLI::async_execute().await; } diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index 5d0ba782..fb005563 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -203,7 +203,7 @@ impl CLI { /// Execute the enclave server CLI with the environment args using tokio/async #[cfg(feature = "async")] - pub fn async_execute() { + pub async fn async_execute() { let mut args: Vec = env::args().collect(); let opts = EnclaveOpts::new(&mut args); @@ -223,7 +223,8 @@ impl CLI { opts.async_pool(false), opts.async_pool(true), None, - ); + ) + .await; } } } diff --git a/src/qos_core/src/io/async_pool.rs b/src/qos_core/src/io/async_pool.rs index e096caa4..69456bf0 100644 --- a/src/qos_core/src/io/async_pool.rs +++ b/src/qos_core/src/io/async_pool.rs @@ -93,9 +93,10 @@ impl AsyncPool { /// Will wait (async) if all connections are locked until one becomes available async fn get(&self) -> MutexGuard { // TODO: make this into an error - if self.handles.is_empty() { - panic!("empty handles in AsyncPool. Bad init?"); - } + assert!( + !self.handles.is_empty(), + "empty handles in AsyncPool. Bad init?" + ); let iter = self.handles.iter().map(|h| { let l = h.lock(); diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs index a5f796d3..593d5ee9 100644 --- a/src/qos_core/src/io/async_stream.rs +++ b/src/qos_core/src/io/async_stream.rs @@ -332,7 +332,7 @@ async fn retry_unix_connect( return Ok(stream); } Err(err) => { - eprintln!("Error connecting to USOCK: {}", err); + eprintln!("Error connecting to USOCK: {err}"); if SystemTime::now() > eot { return Err(err); } diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index 69e66ea8..e6620fab 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -112,6 +112,7 @@ impl SocketAddress { } /// Shows socket debug info + #[must_use] pub fn debug_info(&self) -> String { match self { #[cfg(feature = "vm")] @@ -120,10 +121,13 @@ impl SocketAddress { } Self::Unix(usock) => { format!( - "usock path: {:?}", + "usock path: {}", usock .path() .unwrap_or(&std::path::PathBuf::from("unknown/error")) + .as_os_str() + .to_str() + .unwrap_or("unable to procure") ) } } diff --git a/src/qos_core/src/protocol/msg.rs b/src/qos_core/src/protocol/msg.rs index 9b1a3e98..142d44aa 100644 --- a/src/qos_core/src/protocol/msg.rs +++ b/src/qos_core/src/protocol/msg.rs @@ -140,6 +140,89 @@ pub enum ProtocolMsg { }, } +impl std::fmt::Display for ProtocolMsg { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::ProtocolErrorResponse(_) => { + write!(f, "ProtocolErrorResponse") + } + Self::StatusRequest => write!(f, "StatusRequest"), + Self::StatusResponse(_) => { + write!(f, "StatusResponse") + } + Self::BootStandardRequest { .. } => { + write!(f, "BootStandardRequest") + } + Self::BootStandardResponse { .. } => { + write!(f, "BootStandardResponse") + } + Self::BootGenesisRequest { .. } => { + write!(f, "BootGenesisRequest") + } + Self::BootGenesisResponse { .. } => { + write!(f, "BootGenesisResponse") + } + Self::ProvisionRequest { .. } => { + write!(f, "ProvisionRequest") + } + Self::ProvisionResponse { reconstructed } => { + write!( + f, + "ProvisionResponse{{ reconstructed: {reconstructed} }}" + ) + } + Self::ProxyRequest { .. } => { + write!(f, "ProxyRequest") + } + Self::ProxyResponse { .. } => { + write!(f, "ProxyResponse") + } + Self::LiveAttestationDocRequest { .. } => { + write!(f, "LiveAttestationDocRequest") + } + Self::LiveAttestationDocResponse { .. } => { + write!(f, "LiveAttestationDocResponse") + } + Self::BootKeyForwardRequest { .. } => { + write!(f, "BootKeyForwardRequest") + } + Self::BootKeyForwardResponse { nsm_response } => match nsm_response + { + NsmResponse::Attestation { .. } => write!( + f, + "BootKeyForwardResponse {{ nsm_response: Attestation }}" + ), + NsmResponse::Error(ecode) => write!( + f, + "BootKeyForwardResponse {{ nsm_response: Error({ecode:?}) }}" + ), + _ => write!( + f, + "BootKeyForwardResponse {{ nsm_response: Other }}" // this shouldn't really show up + ), + }, + Self::ExportKeyRequest { .. } => { + write!(f, "ExportKeyRequest") + } + Self::ExportKeyResponse { .. } => { + write!(f, "ExportKeyResponse") + } + Self::InjectKeyRequest { .. } => { + write!(f, "InjectKeyRequest") + } + Self::InjectKeyResponse { .. } => { + write!(f, "InjectKeyResponse") + } + Self::ManifestEnvelopeRequest { .. } => { + write!(f, "ManifestEnvelopeRequest") + } + Self::ManifestEnvelopeResponse { .. } => { + write!(f, "ManifestEnvelopeResponse") + } + } + } +} + #[cfg(test)] mod test { use borsh::BorshDeserialize; diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 083590bd..ca37e6ca 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -112,6 +112,9 @@ impl Reaper { #[cfg(feature = "async")] mod inner { + use std::sync::Arc; + use tokio::sync::RwLock; + #[allow(clippy::wildcard_imports)] use super::*; use crate::{ @@ -128,7 +131,7 @@ mod inner { /// - If spawning the pivot errors. /// - If waiting for the pivot errors. #[allow(dead_code)] - pub fn async_execute( + pub async fn async_execute( handles: &Handles, nsm: Box, pool: AsyncStreamPool, @@ -136,6 +139,9 @@ mod inner { test_only_init_phase_override: Option, ) { let handles2 = handles.clone(); + let quit = Arc::new(RwLock::new(false)); + let inner_quit = quit.clone(); + tokio::spawn(async move { // create the state let protocol_state = ProtocolState::new( @@ -158,12 +164,18 @@ mod inner { for task in tasks { task.abort(); } + *inner_quit.write().await = true; } Err(err) => panic!("{err}"), } }); loop { + if *quit.read().await { + eprintln!("quit called by ctrl+c"); + std::process::exit(1); + } + if handles.quorum_key_exists() && handles.pivot_exists() && handles.manifest_envelope_exists() @@ -173,7 +185,7 @@ mod inner { break; } - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; } println!("Reaper::execute about to spawn pivot"); @@ -198,9 +210,10 @@ mod inner { // pause to ensure OS has enough time to clean up resources // before restarting - std::thread::sleep(std::time::Duration::from_secs( + tokio::time::sleep(std::time::Duration::from_secs( REAPER_RESTART_DELAY_IN_SECONDS, - )); + )) + .await; println!("Restarting pivot ..."); }, @@ -214,9 +227,10 @@ mod inner { } } - std::thread::sleep(std::time::Duration::from_secs( + tokio::time::sleep(std::time::Duration::from_secs( REAPER_EXIT_DELAY_IN_SECONDS, - )); + )) + .await; println!("Reaper exiting ... "); } } diff --git a/src/qos_host/Cargo.toml b/src/qos_host/Cargo.toml index 3cdb5710..e0e3f6f0 100644 --- a/src/qos_host/Cargo.toml +++ b/src/qos_host/Cargo.toml @@ -18,3 +18,8 @@ serde = { version = "1", features = ["derive"], default-features = false } [features] async = ["qos_core/async"] vm = ["qos_core/vm"] + +[[bin]] +name = "async_qos_host" +path = "src/bin/async_qos_host.rs" +required-features = ["async"] diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index f7f7034c..1640504c 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -110,7 +110,6 @@ impl AsyncHostServer { } /// Health route handler. - #[allow(clippy::unused_async)] async fn enclave_health( State(state): State>, ) -> impl IntoResponse { @@ -162,7 +161,6 @@ impl AsyncHostServer { } } - #[allow(clippy::unused_async)] async fn enclave_info( State(state): State>, ) -> Result, Error> { @@ -254,12 +252,28 @@ impl AsyncHostServer { ); } + // DEBUG: remove later + match ProtocolMsg::try_from_slice(&encoded_request) { + Ok(r) => eprintln!("Received message: {}", r), + Err(e) => eprintln!("Unable to decode request: {}", e), + } + match state.enclave_client.call(&encoded_request).await { - Ok(encoded_response) => (StatusCode::OK, encoded_response), + Ok(encoded_response) => { + // DEBUG: remove later + match ProtocolMsg::try_from_slice(&encoded_response) { + Ok(r) => { + eprintln!("Enclave responded with: {}", r); + } + Err(e) => { + eprintln!("Error deserializing response from enclave, make sure qos_host version match qos_core: {e}"); + } + }; + + (StatusCode::OK, encoded_response) + } Err(e) => { - let msg = - format!("Error while trying to send request over socket to enclave: {e:?}"); - eprint!("{msg}"); + eprintln!("Error while trying to send request over socket to enclave: {e:?}"); ( StatusCode::INTERNAL_SERVER_ERROR, diff --git a/src/qos_host/src/bin/async_qos_host.rs b/src/qos_host/src/bin/async_qos_host.rs new file mode 100644 index 00000000..6204970d --- /dev/null +++ b/src/qos_host/src/bin/async_qos_host.rs @@ -0,0 +1,11 @@ +#[tokio::main] +async fn main() { + // Development quick start + // ``` + // `cargo run --bin qos_host -- \ + // --usock tk.sock \ + // --host-port 3000 \ + // --host-ip 0.0.0.0 \ + // ``` + qos_host::cli::CLI::execute().await; +} From f304e3d56548b45a32aea37ea7a8df43bb6f1056 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Tue, 1 Jul 2025 11:18:31 -0700 Subject: [PATCH 08/20] add DifferentPCR0 logging this should be removed later --- src/qos_core/src/protocol/services/key.rs | 2 +- src/qos_enclave/src/main.rs | 4 +--- src/qos_nsm/src/nitro/error.rs | 2 +- src/qos_nsm/src/nitro/mod.rs | 22 ++++++++++++---------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/qos_core/src/protocol/services/key.rs b/src/qos_core/src/protocol/services/key.rs index caf80c39..75e9a59b 100644 --- a/src/qos_core/src/protocol/services/key.rs +++ b/src/qos_core/src/protocol/services/key.rs @@ -873,7 +873,7 @@ mod test { &manifest_envelope, &att_doc ), - Err(ProtocolError::QosAttestError("DifferentPcr0".to_string())) + Err(ProtocolError::QosAttestError("DifferentPcr0(\"8080808080808080808080808080808080808080808080808080808080808080\", \"0404040404040404040404040404040404040404040404040404040404040404\")".to_string())) ); } diff --git a/src/qos_enclave/src/main.rs b/src/qos_enclave/src/main.rs index cede643e..400b145f 100644 --- a/src/qos_enclave/src/main.rs +++ b/src/qos_enclave/src/main.rs @@ -73,8 +73,6 @@ fn boot() -> String { let memory_mib = std::env::var("MEMORY_MIB").unwrap_or("1024".to_string()); let cpu_count = std::env::var("CPU_COUNT").unwrap_or("2".to_string()); let debug_mode = std::env::var("DEBUG").unwrap_or("false".to_string()); - let attach_console = - std::env::var("ATTACH_CONSOLE").unwrap_or("false".to_string()); let enclave_name = std::env::var("ENCLAVE_NAME").unwrap_or("nitro".to_string()); let run_args = RunEnclavesArgs { @@ -83,7 +81,7 @@ fn boot() -> String { memory_mib: memory_mib.parse::().unwrap(), cpu_ids: None, debug_mode: debug_mode.parse::().unwrap(), - attach_console: attach_console.parse::().unwrap(), // TODO: I think we don't want this variable, remove once debug is done + attach_console: false, cpu_count: Some(cpu_count.parse::().unwrap()), enclave_name: Some(enclave_name.clone()), }; diff --git a/src/qos_nsm/src/nitro/error.rs b/src/qos_nsm/src/nitro/error.rs index 88639782..6ada2a55 100644 --- a/src/qos_nsm/src/nitro/error.rs +++ b/src/qos_nsm/src/nitro/error.rs @@ -55,7 +55,7 @@ pub enum AttestError { /// The attestation doc does not contain a pcr0. MissingPcr0, /// The pcr3 in the attestation doc does not match. - DifferentPcr0, + DifferentPcr0(String, String), // TODO: DEBUG: ales - remove later /// The attestation doc does not have a pcr1. MissingPcr1, /// The attestation doc has a different pcr1. diff --git a/src/qos_nsm/src/nitro/mod.rs b/src/qos_nsm/src/nitro/mod.rs index e5153f23..d44d57d7 100644 --- a/src/qos_nsm/src/nitro/mod.rs +++ b/src/qos_nsm/src/nitro/mod.rs @@ -87,15 +87,17 @@ pub fn verify_attestation_doc_against_user_input( return Err(AttestError::UnexpectedAttestationDocNonce); } - if pcr0 - != attestation_doc - .pcrs - .get(&0) - .ok_or(AttestError::MissingPcr0)? - .clone() - .into_vec() - { - return Err(AttestError::DifferentPcr0); + let doc_pcr0 = attestation_doc + .pcrs + .get(&0) + .ok_or(AttestError::MissingPcr0)? + .clone() + .into_vec(); + if pcr0 != doc_pcr0 { + return Err(AttestError::DifferentPcr0( + qos_hex::encode(pcr0), + qos_hex::encode(&doc_pcr0), + )); } // pcr1 matches @@ -707,7 +709,7 @@ mod test { .unwrap_err(); match err { - AttestError::DifferentPcr0 => (), + AttestError::DifferentPcr0(_, _) => (), _ => panic!(), } } From 87cb1599d29ce6755eb1e5267b267a391e105888 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Wed, 2 Jul 2025 13:32:25 -0700 Subject: [PATCH 09/20] use thread with Reaper::async_execute fixes a loop problem with async also adds integration test to test out qos_host and qos_core (aka Reaper) --- src/init/init.rs | 3 +- src/integration/tests/async_client.rs | 84 +++++++++++++++++++++++++ src/integration/tests/async_qos_host.rs | 66 +++++++++++++++++++ src/integration/tests/qos_host.rs | 59 +++++++++++++++++ src/qos_core/src/bin/async_qos_core.rs | 5 +- src/qos_core/src/cli.rs | 5 +- src/qos_core/src/io/async_stream.rs | 65 +++++++++++-------- src/qos_core/src/lib.rs | 3 +- src/qos_core/src/reaper.rs | 82 +++++++++++++----------- src/qos_host/src/async_host.rs | 2 +- src/qos_host/src/cli.rs | 17 ++++- 11 files changed, 317 insertions(+), 74 deletions(-) create mode 100644 src/integration/tests/async_client.rs create mode 100644 src/integration/tests/async_qos_host.rs create mode 100644 src/integration/tests/qos_host.rs diff --git a/src/init/init.rs b/src/init/init.rs index 4c8f72f1..5e1d7653 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -116,8 +116,7 @@ async fn main() { TimeVal::seconds(5), ); - Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None) - .await; + Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None); reboot(); } diff --git a/src/integration/tests/async_client.rs b/src/integration/tests/async_client.rs new file mode 100644 index 00000000..da9723b2 --- /dev/null +++ b/src/integration/tests/async_client.rs @@ -0,0 +1,84 @@ +use qos_core::{ + async_client::AsyncClient, + async_server::{AsyncRequestProcessor, AsyncSocketServer}, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + server::SocketServerError, +}; +use tokio::task::JoinHandle; + +#[derive(Clone)] +struct EchoProcessor; + +impl AsyncRequestProcessor for EchoProcessor { + async fn process(&self, request: Vec) -> Vec { + request + } +} + +async fn run_echo_server( + socket_path: &str, +) -> Result>>, SocketServerError> { + let timeout = TimeVal::milliseconds(50); + let pool = AsyncStreamPool::new( + std::iter::once(SocketAddress::new_unix(socket_path)), + timeout, + ); + let tasks = AsyncSocketServer::listen_all(pool, &EchoProcessor)?; + + Ok(tasks) +} + +#[tokio::test] +async fn direct_connect_works() { + let socket_path = "/tmp/async_client_test_direct_connect_works.sock"; + let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let timeout = TimeVal::milliseconds(50); + let pool = AsyncStreamPool::new(sockets, timeout).shared(); + + let client = AsyncClient::new(pool); + + let server_tasks = run_echo_server(socket_path).await.unwrap(); + + let r = client.call(&[0]).await; + assert!(r.is_ok()); + + for task in server_tasks { + task.abort(); + } +} + +#[tokio::test] +async fn times_out_properly() { + let socket_path = "/tmp/async_client_test_times_out_properly.sock"; + let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let timeout = TimeVal::milliseconds(50); + let pool = AsyncStreamPool::new(sockets, timeout).shared(); + let client = AsyncClient::new(pool); + + let r = client.call(&[0]).await; + assert!(r.is_err()); +} + +#[tokio::test] +async fn repeat_connect_works() { + let socket_path = "/tmp/async_client_test_repeat_connect_works.sock"; + let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let timeout = TimeVal::milliseconds(50); + let pool = AsyncStreamPool::new(sockets, timeout).shared(); + let client = AsyncClient::new(pool); + + // server not running yet, expect a connection error + let r = client.call(&[0]).await; + assert!(r.is_err()); + + // start server + let server_tasks = run_echo_server(socket_path).await.unwrap(); + + // server running, expect success + let r = client.call(&[0]).await; + assert!(r.is_ok()); + + for task in server_tasks { + task.abort(); + } +} diff --git a/src/integration/tests/async_qos_host.rs b/src/integration/tests/async_qos_host.rs new file mode 100644 index 00000000..e9a72a60 --- /dev/null +++ b/src/integration/tests/async_qos_host.rs @@ -0,0 +1,66 @@ +use std::{process::Command, time::Duration}; + +use integration::PIVOT_OK_PATH; +use qos_test_primitives::{ChildWrapper, PathWrapper}; + +const TEST_ENCLAVE_SOCKET: &str = "/tmp/async_qos_host_test.enclave.sock"; +const POOL_SIZE: &str = "1"; + +#[tokio::test] +async fn connects_and_gets_info() { + let _qos_host: ChildWrapper = + Command::new("../target/debug/async_qos_host") + .arg("--usock") + .arg(TEST_ENCLAVE_SOCKET) + .arg("--pool-size") + .arg(POOL_SIZE) + .arg("--host-ip") + .arg("127.0.0.1") + .arg("--host-port") + .arg("3323") + .arg("--socket-timeout") + .arg("50") // ms + .spawn() + .unwrap() + .into(); + + tokio::time::sleep(Duration::from_millis(100)).await; // let the qos_host start + + let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); + assert!(r.is_err()); // expect 500 here + + let enclave_socket = format!("{TEST_ENCLAVE_SOCKET}_0"); // manually pick the 1st one + let secret_path: PathWrapper = "./async_qos_host_test.secret".into(); + // let eph_path = "reaper_works.eph.key"; + let manifest_path: PathWrapper = "async_qos_host_test..manifest".into(); + + // For our sanity, ensure the secret does not yet exist + drop(std::fs::remove_file(&*secret_path)); + // Remove the socket file if it exists as well, in case of bad crashes + drop(std::fs::remove_file(&enclave_socket)); + + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_core") + .args([ + "--usock", + TEST_ENCLAVE_SOCKET, + "--quorum-file", + &*secret_path, + "--pivot-file", + PIVOT_OK_PATH, + "--ephemeral-file", + "eph_path", + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // Give the enclave server time to bind to the socket + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + + let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); + assert!(r.is_ok()); // expect 200 here +} diff --git a/src/integration/tests/qos_host.rs b/src/integration/tests/qos_host.rs new file mode 100644 index 00000000..e97a4fee --- /dev/null +++ b/src/integration/tests/qos_host.rs @@ -0,0 +1,59 @@ +use std::{process::Command, time::Duration}; + +use integration::PIVOT_OK_PATH; +use qos_test_primitives::{ChildWrapper, PathWrapper}; + +const TEST_ENCLAVE_SOCKET: &str = "/tmp/qos_host_test.enclave.sock"; + +#[test] +fn connects_and_gets_info() { + let _qos_host: ChildWrapper = Command::new("../target/debug/qos_host") + .arg("--usock") + .arg(TEST_ENCLAVE_SOCKET) + .arg("--host-ip") + .arg("127.0.0.1") + .arg("--host-port") + .arg("3323") + .arg("--socket-timeout") + .arg("50") // ms + .spawn() + .unwrap() + .into(); + + std::thread::sleep(Duration::from_millis(100)); // let the qos_host start + + let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); + assert!(r.is_err()); // expect 500 here + + let secret_path: PathWrapper = "./reaper_works.secret".into(); + // let eph_path = "reaper_works.eph.key"; + let manifest_path: PathWrapper = "reaper_works.manifest".into(); + + // For our sanity, ensure the secret does not yet exist + drop(std::fs::remove_file(&*secret_path)); + + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/qos_core") + .args([ + "--usock", + TEST_ENCLAVE_SOCKET, + "--quorum-file", + &*secret_path, + "--pivot-file", + PIVOT_OK_PATH, + "--ephemeral-file", + "eph_path", + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // Give the enclave server time to bind to the socket + std::thread::sleep(std::time::Duration::from_millis(200)); + + let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); + assert!(r.is_ok()); // expect 200 here +} diff --git a/src/qos_core/src/bin/async_qos_core.rs b/src/qos_core/src/bin/async_qos_core.rs index 7e87d43f..4fec305b 100644 --- a/src/qos_core/src/bin/async_qos_core.rs +++ b/src/qos_core/src/bin/async_qos_core.rs @@ -1,6 +1,5 @@ use qos_core::cli::CLI; -#[tokio::main] -async fn main() { - CLI::async_execute().await; +fn main() { + CLI::async_execute(); } diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index fb005563..5d0ba782 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -203,7 +203,7 @@ impl CLI { /// Execute the enclave server CLI with the environment args using tokio/async #[cfg(feature = "async")] - pub async fn async_execute() { + pub fn async_execute() { let mut args: Vec = env::args().collect(); let opts = EnclaveOpts::new(&mut args); @@ -223,8 +223,7 @@ impl CLI { opts.async_pool(false), opts.async_pool(true), None, - ) - .await; + ); } } } diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs index 593d5ee9..4b79e85f 100644 --- a/src/qos_core/src/io/async_stream.rs +++ b/src/qos_core/src/io/async_stream.rs @@ -136,7 +136,6 @@ impl AsyncStream { if self.inner.is_none() { self.connect().await?; } - self.send(req_buf).await?; self.recv().await } @@ -256,7 +255,7 @@ pub struct AsyncListener { impl AsyncListener { /// Bind and listen on the given address. pub(crate) fn listen(addr: &SocketAddress) -> Result { - let listener = match addr { + let listener = match *addr { SocketAddress::Unix(uaddr) => { let path = uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; @@ -265,8 +264,6 @@ impl AsyncListener { } #[cfg(feature = "vm")] SocketAddress::Vsock(vaddr) => { - let vaddr = - tokio_vsock::VsockAddr::new(vaddr.cid(), vaddr.port()); let inner = InnerListener::Vsock(VsockListener::bind(vaddr)?); Self { inner } } @@ -326,17 +323,26 @@ async fn retry_unix_connect( let socket = UnixSocket::new_stream()?; eprintln!("Attempting USOCK connect to: {:?}", addr.path()); - match tokio::time::timeout(timeout, socket.connect(path)).await? { - Ok(stream) => { - eprintln!("Connected to USOCK at: {:?}", addr.path()); - return Ok(stream); - } - Err(err) => { - eprintln!("Error connecting to USOCK: {err}"); - if SystemTime::now() > eot { - return Err(err); + let tr = tokio::time::timeout(timeout, socket.connect(path)).await; + match tr { + Ok(r) => match r { + Ok(stream) => { + eprintln!("Connected to USOCK at: {:?}", addr.path()); + return Ok(stream); } - tokio::time::sleep(sleep_time).await; + Err(err) => { + eprintln!("Error connecting to USOCK: {err}"); + if SystemTime::now() > eot { + return Err(err); + } + tokio::time::sleep(sleep_time).await; + } + }, + Err(err) => { + eprintln!( + "Connecting to USOCK failed with timeout error: {err}" + ); + return Err(err.into()); } } } @@ -354,18 +360,27 @@ async fn retry_vsock_connect( loop { eprintln!("Attempting VSOCK connect to: {:?}", addr); - match tokio::time::timeout(timeout, VsockStream::connect(*addr)).await? - { - Ok(stream) => { - eprintln!("Connected to VSOCK at: {:?}", addr); - return Ok(stream); - } - Err(err) => { - eprintln!("Error connecting to VSOCK: {}", err); - if SystemTime::now() > eot { - return Err(err); + let tr = + tokio::time::timeout(timeout, VsockStream::connect(*addr)).await; + match tr { + Ok(r) => match r { + Ok(stream) => { + eprintln!("Connected to VSOCK at: {:?}", addr); + return Ok(stream); + } + Err(err) => { + eprintln!("Error connecting to VSOCK: {}", err); + if SystemTime::now() > eot { + return Err(err); + } + tokio::time::sleep(sleep_time).await; } - tokio::time::sleep(sleep_time).await; + }, + Err(err) => { + eprintln!( + "Connecting to VSOCK failed with timeout error: {err}" + ); + return Err(err.into()); } } } diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index 76c08438..fc9e7cc0 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -65,7 +65,8 @@ pub const SEC_APP_SOCK: &str = "./local-enclave/sec_app.sock"; /// Default socket for enclave <-> secure app communication. #[cfg(feature = "vm")] pub const SEC_APP_SOCK: &str = "/sec_app.sock"; - +/// Default socket connect timeout in milliseconds +pub const DEFAULT_SOCKET_TIMEOUT: &str = "5000"; /// Default socket pool size is 20 #[cfg(feature = "async")] pub const DEFAULT_POOL_SIZE: &str = "1"; // DEBUG: ales - set to something real after debugging diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index ca37e6ca..39226184 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -112,8 +112,7 @@ impl Reaper { #[cfg(feature = "async")] mod inner { - use std::sync::Arc; - use tokio::sync::RwLock; + use std::sync::{Arc, RwLock}; #[allow(clippy::wildcard_imports)] use super::*; @@ -124,14 +123,14 @@ mod inner { }; impl Reaper { - /// Run the Reaper in an async way using Tokio runtime. + /// Run the Reaper using Tokio inside a thread for server processing. /// /// # Panics /// /// - If spawning the pivot errors. /// - If waiting for the pivot errors. #[allow(dead_code)] - pub async fn async_execute( + pub fn async_execute( handles: &Handles, nsm: Box, pool: AsyncStreamPool, @@ -142,36 +141,45 @@ mod inner { let quit = Arc::new(RwLock::new(false)); let inner_quit = quit.clone(); - tokio::spawn(async move { - // create the state - let protocol_state = ProtocolState::new( - nsm, - handles2, - test_only_init_phase_override, - ); - // send a shared version of state and the async pool to each processor - let processor = AsyncProcessor::new( - protocol_state.shared(), - app_pool.shared(), - ); - // listen_all will multiplex the processor accross all sockets - let tasks = AsyncSocketServer::listen_all(pool, &processor) - .expect("unable to get listen task list"); - - match tokio::signal::ctrl_c().await { - Ok(()) => { - eprintln!("handling ctrl+c the tokio way"); - for task in tasks { - task.abort(); + std::thread::spawn(move || { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async move { + // run the state processor inside a tokio runtime in this thread + // create the state + let protocol_state = ProtocolState::new( + nsm, + handles2, + test_only_init_phase_override, + ); + // send a shared version of state and the async pool to each processor + let processor = AsyncProcessor::new( + protocol_state.shared(), + app_pool.shared(), + ); + // listen_all will multiplex the processor accross all sockets + let tasks = + AsyncSocketServer::listen_all(pool, &processor) + .expect("unable to get listen task list"); + + match tokio::signal::ctrl_c().await { + Ok(()) => { + eprintln!("handling ctrl+c the tokio way"); + for task in tasks { + task.abort(); + } + *inner_quit.write().unwrap() = true; + } + Err(err) => panic!("{err}"), } - *inner_quit.write().await = true; - } - Err(err) => panic!("{err}"), - } + }); }); loop { - if *quit.read().await { + // helper for integration tests and manual runs aka qos_core binary + if *quit.read().unwrap() { eprintln!("quit called by ctrl+c"); std::process::exit(1); } @@ -185,7 +193,9 @@ mod inner { break; } - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + eprintln!("Reaper looping"); + std::thread::sleep(std::time::Duration::from_secs(1)); + eprintln!("Reaper done looping"); } println!("Reaper::execute about to spawn pivot"); @@ -210,10 +220,9 @@ mod inner { // pause to ensure OS has enough time to clean up resources // before restarting - tokio::time::sleep(std::time::Duration::from_secs( + std::thread::sleep(std::time::Duration::from_secs( REAPER_RESTART_DELAY_IN_SECONDS, - )) - .await; + )); println!("Restarting pivot ..."); }, @@ -227,10 +236,9 @@ mod inner { } } - tokio::time::sleep(std::time::Duration::from_secs( + std::thread::sleep(std::time::Duration::from_secs( REAPER_EXIT_DELAY_IN_SECONDS, - )) - .await; + )); println!("Reaper exiting ... "); } } diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index 1640504c..ee16c4a3 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -92,7 +92,7 @@ impl AsyncHostServer { .layer(DefaultBodyLimit::disable()) .with_state(state); - println!("HostServer listening on {}", self.addr); + println!("AsyncHostServer listening on {}", self.addr); axum::Server::bind(&self.addr) .serve(app.into_make_service()) diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 527b7140..b4ddce61 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -20,6 +20,7 @@ const HOST_PORT: &str = "host-port"; const ENDPOINT_BASE_PATH: &str = "endpoint-base-path"; const VSOCK_TO_HOST: &str = "vsock-to-host"; const POOL_SIZE: &str = "pool-size"; +const SOCKET_TIMEOUT: &str = "socket-timeout"; struct HostParser; impl GetParserForOptions for HostParser { @@ -61,6 +62,11 @@ impl GetParserForOptions for HostParser { .takes_value(true) .default_value(qos_core::DEFAULT_POOL_SIZE) ) + .token( + Token::new(SOCKET_TIMEOUT, "maximum time in ms a connect to the USOCK/VSOCK will take") + .takes_value(true) + .default_value(qos_core::DEFAULT_SOCKET_TIMEOUT) + ) .token( Token::new(VSOCK_TO_HOST, "whether to add the to-host svm flag to the enclave vsock connection. Valid options are `true` or `false`") .takes_value(true) @@ -118,6 +124,13 @@ impl HostOpts { #[cfg(feature = "async")] pub(crate) fn enclave_pool(&self) -> AsyncStreamPool { use qos_core::io::{TimeVal, TimeValLike}; + + let default_timeout = &qos_core::DEFAULT_SOCKET_TIMEOUT.to_owned(); + let timeout_str = + self.parsed.single(SOCKET_TIMEOUT).unwrap_or(&default_timeout); + let timeout = TimeVal::milliseconds( + timeout_str.parse().expect("invalid timeout value"), + ); let pool_size: u32 = self .parsed .single(POOL_SIZE) @@ -138,7 +151,7 @@ impl HostOpts { SocketAddress::new_vsock(c, p, self.to_host_flag()) }); - AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + AsyncStreamPool::new(addresses, timeout) } (None, None, Some(u)) => { let addresses = (0..pool_size).map(|i| { @@ -146,7 +159,7 @@ impl HostOpts { SocketAddress::new_unix(&u) }); - AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + AsyncStreamPool::new(addresses, timeout) } _ => panic!("Invalid socket opts"), } From 1949ced0f19e1536e0e3fa27dd98361eaee3aef7 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 3 Jul 2025 14:40:32 -0700 Subject: [PATCH 10/20] add boot_enclave integration example allows easier local testing --- src/integration/examples/boot_enclave.rs | 454 +++++++++++++++++++++++ src/qos_core/src/async_client.rs | 2 - 2 files changed, 454 insertions(+), 2 deletions(-) create mode 100644 src/integration/examples/boot_enclave.rs diff --git a/src/integration/examples/boot_enclave.rs b/src/integration/examples/boot_enclave.rs new file mode 100644 index 00000000..9cc892d4 --- /dev/null +++ b/src/integration/examples/boot_enclave.rs @@ -0,0 +1,454 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + process::{Command, Stdio}, +}; + +use borsh::de::BorshDeserialize; +use integration::{LOCAL_HOST, PCR3_PRE_IMAGE_PATH, QOS_DIST_DIR}; +use qos_core::protocol::{ + services::{ + boot::{Approval, Manifest, ManifestSet, Namespace, ShareSet}, + genesis::{GenesisMemberOutput, GenesisOutput}, + }, + ProtocolPhase, QosHash, +}; +use qos_crypto::sha_256; +use qos_host::EnclaveInfo; +use qos_p256::P256Pair; +use qos_test_primitives::{ChildWrapper, PathWrapper}; + +#[tokio::main] +async fn main() { + let pivot_file_path = + std::env::args().nth(1).expect("No pivot file path provided"); + + const PIVOT_HASH_PATH: &str = "/tmp/enclave-example-pivot-hash.txt"; + + let host_port = 3001; + let tmp: PathWrapper = "/tmp/enclave-example".into(); + let _: PathWrapper = PIVOT_HASH_PATH.into(); + fs::create_dir_all(&*tmp).unwrap(); + + let usock: PathWrapper = "/tmp/enclave-example/example.sock".into(); + let app_usock: PathWrapper = "/tmp/enclave-example/example-app.sock".into(); + let secret_path: PathWrapper = "/tmp/enclave-example/example.secret".into(); + let pivot_path: PathWrapper = "/tmp/enclave-example/example.pivot".into(); + let manifest_path: PathWrapper = + "/tmp/enclave-example/example.manifest".into(); + let eph_path: PathWrapper = + "/tmp/enclave-example/ephemeral_key.secret".into(); + + let boot_dir: PathWrapper = "/tmp/enclave-example/boot-dir".into(); + fs::create_dir_all(&*boot_dir).unwrap(); + let attestation_dir: PathWrapper = + "/tmp/enclave-example/attestation-dir".into(); + fs::create_dir_all(&*attestation_dir).unwrap(); + let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); + + let all_personal_dir = "./mock/boot-e2e/all-personal-dir"; + + let namespace = "quit-coding-to-vape"; + + let personal_dir = |user: &str| format!("{all_personal_dir}/{user}-dir"); + + let user1 = "user1"; + let user2 = "user2"; + let user3 = "user3"; + + // -- Create pivot-build-fingerprints.txt + let pivot = fs::read(&pivot_file_path).unwrap(); + let mock_pivot_hash = sha_256(&pivot); + let pivot_hash = qos_hex::encode_to_vec(&mock_pivot_hash); + std::fs::write(PIVOT_HASH_PATH, pivot_hash).unwrap(); + + // -- CLIENT create manifest. + let pivot_args = std::env::args().nth(2).expect("No pivot args provided"); + let cli_manifest_path = format!("{}/manifest", &*boot_dir); + + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest", + "--nonce", + "2", + "--namespace", + namespace, + "--restart-policy", + "never", + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-path", + &cli_manifest_path, + "--pivot-args", + &pivot_args, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // Check the manifest written to file + let manifest = + Manifest::try_from_slice(&fs::read(&cli_manifest_path).unwrap()) + .unwrap(); + + let genesis_output = { + let contents = + fs::read("./mock/boot-e2e/genesis-dir/genesis_output").unwrap(); + GenesisOutput::try_from_slice(&contents).unwrap() + }; + // For simplicity sake, we use the same keys for the share set and manifest + // set. + let mut members: Vec<_> = genesis_output + .member_outputs + .iter() + .cloned() + .map(|GenesisMemberOutput { share_set_member, .. }| share_set_member) + .collect(); + members.sort(); + + let namespace_field = Namespace { + name: namespace.to_string(), + nonce: 2, + quorum_key: genesis_output.quorum_key, + }; + assert_eq!(manifest.namespace, namespace_field); + let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; + assert_eq!(manifest.manifest_set, manifest_set); + let share_set = ShareSet { threshold: 2, members }; + assert_eq!(manifest.share_set, share_set); + + // -- CLIENT make sure each user can run `approve-manifest` + for alias in [user1, user2, user3] { + let approval_path = format!( + "{}/{}-{}-{}.approval", + &*boot_dir, alias, namespace, manifest.namespace.nonce, + ); + + let secret_path = format!("{}/{}.secret", &personal_dir(alias), alias); + + let mut child = Command::new("../target/debug/qos_client") + .args([ + "approve-manifest", + "--secret-path", + &*secret_path, + "--manifest-path", + &cli_manifest_path, + "--manifest-approvals-dir", + &*boot_dir, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub", + "--alias", + alias, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + // On purpose, try to input a bad value, neither yes or no + stdin + .write_all("maybe\n".as_bytes()) + .expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Please answer with either \"yes\" (y) or \"no\" (n)" + ); + // Try the longer option ("yes" rather than "y") + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct pivot restart policy: RestartPolicy::Never? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Are these the correct pivot args:" + ); + stdout.next().unwrap().unwrap(); // pivot args confirm msg + assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + // Wait for the command to write the approval and exit + assert!(child.wait().unwrap().success()); + + // Read in the generated approval to check it was created correctly + let approval = + Approval::try_from_slice(&fs::read(approval_path).unwrap()) + .unwrap(); + let personal_pair = P256Pair::from_hex_file(format!( + "{}/{}.secret", + personal_dir(alias), + alias, + )) + .unwrap(); + + let signature = personal_pair.sign(&manifest.qos_hash()).unwrap(); + assert_eq!(approval.signature, signature); + + assert_eq!(approval.member.alias, alias); + assert_eq!( + approval.member.pub_key, + personal_pair.public_key().to_bytes(), + ); + } + + // -- ENCLAVE start enclave + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_core") + .args([ + "--usock", + &*usock, + "--app-usock", + &*app_usock, + "--quorum-file", + &*secret_path, + "--pivot-file", + &*pivot_path, + "--ephemeral-file", + &*eph_path, + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // -- HOST start host + let mut _host_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_host") + .args([ + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--usock", + &*usock, + ]) + .spawn() + .unwrap() + .into(); + + // -- Make sure the enclave and host have time to boot + qos_test_primitives::wait_until_port_is_bound(host_port); + + // -- CLIENT generate the manifest envelope + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest-envelope", + "--manifest-approvals-dir", + &*boot_dir, + "--manifest-path", + &cli_manifest_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // -- CLIENT broadcast boot standard instruction + let manifest_envelope_path = format!("{}/manifest_envelope", &*boot_dir,); + assert!(Command::new("../target/debug/qos_client") + .args([ + "boot-standard", + "--manifest-envelope-path", + &manifest_envelope_path, + "--pivot-path", + &pivot_file_path, + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--pcr3-preimage-path", + "./mock/pcr3-preimage.txt", + "--unsafe-skip-attestation", + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + for user in [&user1, &user2] { + // Get attestation doc and manifest + assert!(Command::new("../target/debug/qos_client") + .args([ + "get-attestation-doc", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--attestation-doc-path", + &*attestation_doc_path, + "--manifest-envelope-path", + "/tmp/dont_care" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + let share_path = format!("{}/{}.share", &personal_dir(user), user); + let secret_path = format!("{}/{}.secret", &personal_dir(user), user); + let eph_wrapped_share_path: PathWrapper = + format!("{}/{}.eph_wrapped.share", &*tmp, user).into(); + let approval_path: PathWrapper = + format!("{}/{}.attestation.approval", &*tmp, user).into(); + // Encrypt share to ephemeral key + let mut child = Command::new("../target/debug/qos_client") + .args([ + "proxy-re-encrypt-share", + "--share-path", + &share_path, + "--secret-path", + &secret_path, + "--attestation-doc-path", + &*attestation_doc_path, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + "--manifest-envelope-path", + &manifest_envelope_path, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--alias", + user, + "--unsafe-skip-attestation", + "--unsafe-eph-path-override", + &*eph_path, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + // Skip over a log message + stdout.next(); + + // Answer prompts with yes + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Does this AWS IAM role belong to the intended organization: arn:aws:iam::123456789012:role/Webserver? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "The following manifest set members approved:" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + // Check that it finished successfully + assert!(child.wait().unwrap().success()); + + // Post the encrypted share + assert!(Command::new("../target/debug/qos_client") + .args([ + "post-share", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + } + + // Give the enclave time to start the pivot + std::thread::sleep(std::time::Duration::from_secs(2)); + + let enclave_info_url = + format!("http://{LOCAL_HOST}:{}/qos/enclave-info", host_port); + let enclave_info: EnclaveInfo = + ureq::get(&enclave_info_url).call().unwrap().into_json().unwrap(); + assert_eq!(enclave_info.phase, ProtocolPhase::QuorumKeyProvisioned); + + eprintln!("=========ENCLAVE READY WITH PIVOT RUNNING!!=========="); + eprintln!("press ctrl+c to quit"); + + // drop(_host_child_process); + + match tokio::signal::ctrl_c().await { + Ok(()) => {} + Err(err) => panic!("{err}"), + } +} diff --git a/src/qos_core/src/async_client.rs b/src/qos_core/src/async_client.rs index ff875a30..a23e9946 100644 --- a/src/qos_core/src/async_client.rs +++ b/src/qos_core/src/async_client.rs @@ -20,9 +20,7 @@ impl AsyncClient { /// timeout. pub async fn call(&self, request: &[u8]) -> Result, ClientError> { // TODO: ales - remove later, debug reasons - eprintln!("AsyncClient::call - Attempting to claim pool read lock"); let pool = self.pool.read().await; - eprintln!("AsyncClient::call - Attempting to claim pool stream"); let mut stream = pool.get().await; eprintln!("AsyncClient::call - Stream aquired"); From 58f575b68b249f188bf349e6481cfdb5e65fcc29 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 3 Jul 2025 15:14:59 -0700 Subject: [PATCH 11/20] hardcode init to 2 ports for now --- src/init/init.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/init/init.rs b/src/init/init.rs index 5e1d7653..a32b2ea2 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -99,9 +99,7 @@ async fn main() { ); let start_port = 3; - let default_pool_size = qos_core::DEFAULT_POOL_SIZE - .parse() - .expect("unable to parse default pool size"); + let default_pool_size = 2; // 1 for qos-host, 1 for -host, more are added as needed let core_pool = AsyncStreamPool::new( (start_port..start_port + default_pool_size) .into_iter() From c4a7911578f3030e2217d34a7d627fecb9f22086 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 3 Jul 2025 15:53:12 -0700 Subject: [PATCH 12/20] expand default sockets setup to 20 + 1 for qos-host to test the parallelism with tls-fetcher --- src/init/init.rs | 2 +- src/qos_core/src/lib.rs | 4 ++-- src/qos_host/src/cli.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/init/init.rs b/src/init/init.rs index a32b2ea2..6b15b35d 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -99,7 +99,7 @@ async fn main() { ); let start_port = 3; - let default_pool_size = 2; // 1 for qos-host, 1 for -host, more are added as needed + let default_pool_size = 21; // 1 for qos-host, 20 for -host, more are added as needed TODO: make the add logic dynamic let core_pool = AsyncStreamPool::new( (start_port..start_port + default_pool_size) .into_iter() diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index fc9e7cc0..aa192a9b 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -69,7 +69,7 @@ pub const SEC_APP_SOCK: &str = "/sec_app.sock"; pub const DEFAULT_SOCKET_TIMEOUT: &str = "5000"; /// Default socket pool size is 20 #[cfg(feature = "async")] -pub const DEFAULT_POOL_SIZE: &str = "1"; // DEBUG: ales - set to something real after debugging +pub const DEFAULT_POOL_SIZE: &str = "20"; // DEBUG: ales - set to something real after debugging /// Default socket pool size is 0 for sync (unused) #[cfg(not(feature = "async"))] -pub const DEFAULT_POOL_SIZE: &str = "0"; +pub const DEFAULT_POOL_SIZE: &str = "1"; diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index b4ddce61..3d8ac10b 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -60,7 +60,7 @@ impl GetParserForOptions for HostParser { .token( Token::new(POOL_SIZE, "pool size for USOCK/VSOCK sockets") .takes_value(true) - .default_value(qos_core::DEFAULT_POOL_SIZE) + .default_value("1") // qos-host should default to 1 only ) .token( Token::new(SOCKET_TIMEOUT, "maximum time in ms a connect to the USOCK/VSOCK will take") From 2857b005c23ccf79548f99d9c5b2ad7baf15dbb7 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Wed, 9 Jul 2025 10:36:05 -0700 Subject: [PATCH 13/20] implement dynamic resizing async pool and server --- src/init/init.rs | 19 +- .../src/bin/pivot_async_remote_tls.rs | 16 +- src/integration/src/lib.rs | 15 +- src/integration/tests/async_boot.rs | 30 +- src/integration/tests/async_boot_hybrid.rs | 468 ++++++++++++++++++ src/integration/tests/async_client.rs | 45 +- src/integration/tests/async_qos_host.rs | 10 +- src/integration/tests/async_remote_tls.rs | 14 +- src/qos_core/src/async_server.rs | 54 +- src/qos_core/src/cli.rs | 71 +-- src/qos_core/src/io/async_pool.rs | 178 ++++++- src/qos_core/src/io/stream.rs | 72 ++- src/qos_core/src/lib.rs | 6 - src/qos_core/src/protocol/async_processor.rs | 62 +-- src/qos_core/src/protocol/async_state.rs | 1 - src/qos_core/src/protocol/error.rs | 2 + src/qos_core/src/reaper.rs | 166 ++++++- src/qos_core/src/server.rs | 2 +- src/qos_host/src/async_host.rs | 6 +- src/qos_host/src/cli.rs | 47 +- src/qos_net/src/async_cli.rs | 12 +- src/qos_net/src/async_proxy.rs | 13 +- src/qos_net/src/cli.rs | 26 +- 23 files changed, 1070 insertions(+), 265 deletions(-) create mode 100644 src/integration/tests/async_boot_hybrid.rs delete mode 100644 src/qos_core/src/protocol/async_state.rs diff --git a/src/init/init.rs b/src/init/init.rs index 6b15b35d..9d5f44b4 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -98,21 +98,20 @@ async fn main() { PIVOT_FILE.to_string(), ); - let start_port = 3; - let default_pool_size = 21; // 1 for qos-host, 20 for -host, more are added as needed TODO: make the add logic dynamic + let start_port = 3; // used for qos-host only! others follow 4+ for the -host let core_pool = AsyncStreamPool::new( - (start_port..start_port + default_pool_size) - .into_iter() - .map(|p| SocketAddress::new_vsock(cid, p, VMADDR_NO_FLAGS)), + SocketAddress::new_vsock(cid, start_port, VMADDR_NO_FLAGS), TimeVal::seconds(0), - ); + 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) + ) + .expect("unable to create core pool"); let app_pool = AsyncStreamPool::new( - (0..default_pool_size) - .into_iter() - .map(|p| SocketAddress::new_unix(&format!("{SEC_APP_SOCK}_{p}"))), + SocketAddress::new_unix(SEC_APP_SOCK), TimeVal::seconds(5), - ); + 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) + ) + .expect("unable to create app pool"); Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None); diff --git a/src/integration/src/bin/pivot_async_remote_tls.rs b/src/integration/src/bin/pivot_async_remote_tls.rs index bee5d9fa..21a0b1b2 100644 --- a/src/integration/src/bin/pivot_async_remote_tls.rs +++ b/src/integration/src/bin/pivot_async_remote_tls.rs @@ -110,26 +110,28 @@ async fn main() { let proxy_path: &String = &args[2]; let pool = AsyncStreamPool::new( - std::iter::once(SocketAddress::new_unix(socket_path)), + SocketAddress::new_unix(socket_path), TimeVal::seconds(0), // listener, no timeout - ); + 1, + ) + .expect("unable to create async stream pool"); let proxy_pool = AsyncStreamPool::new( - std::iter::once(SocketAddress::new_unix(proxy_path)), + SocketAddress::new_unix(proxy_path), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + 1, ) + .expect("unable to create async stream pool") .shared(); - let tasks = + let server = AsyncSocketServer::listen_all(pool, &Processor::new(proxy_pool)) .unwrap(); match tokio::signal::ctrl_c().await { Ok(_) => { eprintln!("pivot handling ctrl+c the tokio way"); - for task in tasks { - task.abort(); - } + server.terminate(); } Err(err) => panic!("{err}"), } diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index c08510d6..2b030151 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -57,6 +57,7 @@ pub const QOS_DIST_DIR: &str = "./mock/dist"; pub const PCR3_PRE_IMAGE_PATH: &str = "./mock/namespaces/pcr3-preimage.txt"; const MSG: &str = "msg"; +const POOL_SIZE: &str = "pool-size"; /// Request/Response messages for "socket stress" pivot app. #[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq, Eq)] @@ -135,9 +136,17 @@ pub struct AdditionProofPayload { struct PivotParser; impl GetParserForOptions for PivotParser { fn parser() -> Parser { - Parser::new().token( - Token::new(MSG, "A msg to write").takes_value(true).required(true), - ) + Parser::new() + .token( + Token::new(MSG, "A msg to write") + .takes_value(true) + .required(true), + ) + .token( + Token::new(POOL_SIZE, "App pool size") + .takes_value(true) + .required(false), + ) } } diff --git a/src/integration/tests/async_boot.rs b/src/integration/tests/async_boot.rs index aac2aa37..319308cb 100644 --- a/src/integration/tests/async_boot.rs +++ b/src/integration/tests/async_boot.rs @@ -30,20 +30,23 @@ async fn async_standard_boot_e2e() { const PIVOT_HASH_PATH: &str = "/tmp/async_standard_boot_e2e-pivot-hash.txt"; let host_port = qos_test_primitives::find_free_port().unwrap(); - let tmp: PathWrapper = "/tmp/boot-e2e".into(); + let tmp: PathWrapper = "/tmp/async-boot-e2e".into(); let _: PathWrapper = PIVOT_OK5_SUCCESS_FILE.into(); let _: PathWrapper = PIVOT_HASH_PATH.into(); fs::create_dir_all(&*tmp).unwrap(); - let usock: PathWrapper = "/tmp/boot-e2e/boot_e2e.sock".into(); - let secret_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.secret".into(); - let pivot_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.pivot".into(); - let manifest_path: PathWrapper = "/tmp/boot-e2e/boot_e2e.manifest".into(); - let eph_path: PathWrapper = "/tmp/boot-e2e/ephemeral_key.secret".into(); + let usock: PathWrapper = "/tmp/async-boot-e2e/boot_e2e.sock".into(); + let secret_path: PathWrapper = "/tmp/async-boot-e2e/boot_e2e.secret".into(); + let pivot_path: PathWrapper = "/tmp/async-boot-e2e/boot_e2e.pivot".into(); + let manifest_path: PathWrapper = + "/tmp/async-boot-e2e/boot_e2e.manifest".into(); + let eph_path: PathWrapper = + "/tmp/async-boot-e2e/ephemeral_key.secret".into(); - let boot_dir: PathWrapper = "/tmp/boot-e2e/boot-dir".into(); + let boot_dir: PathWrapper = "/tmp/async-boot-e2e/boot-dir".into(); fs::create_dir_all(&*boot_dir).unwrap(); - let attestation_dir: PathWrapper = "/tmp/boot-e2e/attestation-dir".into(); + let attestation_dir: PathWrapper = + "/tmp/async-boot-e2e/attestation-dir".into(); fs::create_dir_all(&*attestation_dir).unwrap(); let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); @@ -65,7 +68,7 @@ async fn async_standard_boot_e2e() { // -- CLIENT create manifest. let msg = "testing420"; - let pivot_args = format!("[--msg,{msg}]"); + let pivot_args = format!("[--msg,{msg},--pool-size,20]"); let cli_manifest_path = format!("{}/manifest", &*boot_dir); assert!(Command::new("../target/debug/qos_client") @@ -131,7 +134,12 @@ async fn async_standard_boot_e2e() { let pivot = PivotConfig { hash: mock_pivot_hash, restart: RestartPolicy::Never, - args: vec!["--msg".to_string(), msg.to_string()], + args: vec![ + "--msg".to_string(), + msg.to_string(), + "--pool-size".to_string(), + "20".to_string(), + ], }; assert_eq!(manifest.pivot, pivot); let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; @@ -221,7 +229,7 @@ async fn async_standard_boot_e2e() { ); assert_eq!( &stdout.next().unwrap().unwrap(), - "[\"--msg\", \"testing420\"]?" + "[\"--msg\", \"testing420\", \"--pool-size\", \"20\"]?" ); assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); diff --git a/src/integration/tests/async_boot_hybrid.rs b/src/integration/tests/async_boot_hybrid.rs new file mode 100644 index 00000000..b2966bde --- /dev/null +++ b/src/integration/tests/async_boot_hybrid.rs @@ -0,0 +1,468 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + path::Path, + process::{Command, Stdio}, +}; + +use borsh::de::BorshDeserialize; +use integration::{ + LOCAL_HOST, PCR3_PRE_IMAGE_PATH, PIVOT_OK5_PATH, PIVOT_OK5_SUCCESS_FILE, + QOS_DIST_DIR, +}; +use qos_core::protocol::{ + services::{ + boot::{ + Approval, Manifest, ManifestSet, Namespace, PivotConfig, + RestartPolicy, ShareSet, + }, + genesis::{GenesisMemberOutput, GenesisOutput}, + }, + ProtocolPhase, QosHash, +}; +use qos_crypto::sha_256; +use qos_host::EnclaveInfo; +use qos_p256::P256Pair; +use qos_test_primitives::{ChildWrapper, PathWrapper}; + +#[tokio::test] +async fn async_standard_boot_hybrid_e2e() { + const PIVOT_HASH_PATH: &str = + "/tmp/async_standard_boot_hybrid_e2e-pivot-hash.txt"; + + let host_port = qos_test_primitives::find_free_port().unwrap(); + let tmp: PathWrapper = "/tmp/async-boot-hybrid-e2e".into(); + let _: PathWrapper = PIVOT_OK5_SUCCESS_FILE.into(); + let _: PathWrapper = PIVOT_HASH_PATH.into(); + fs::create_dir_all(&*tmp).unwrap(); + + let usock: PathWrapper = "/tmp/async-boo-hybrid-e2e/boot_e2e.sock".into(); + let secret_path: PathWrapper = + "/tmp/async-boo-hybrid-e2e/boot_e2e.secret".into(); + let pivot_path: PathWrapper = + "/tmp/async-boo-hybrid-e2e/boot_e2e.pivot".into(); + let manifest_path: PathWrapper = + "/tmp/async-boo-hybrid-e2e/boot_e2e.manifest".into(); + let eph_path: PathWrapper = + "/tmp/async-boo-hybrid-e2e/ephemeral_key.secret".into(); + + let boot_dir: PathWrapper = "/tmp/async-boo-hybrid-e2e/boot-dir".into(); + fs::create_dir_all(&*boot_dir).unwrap(); + let attestation_dir: PathWrapper = + "/tmp/async-boo-hybrid-e2e/attestation-dir".into(); + fs::create_dir_all(&*attestation_dir).unwrap(); + let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); + + let all_personal_dir = "./mock/boot-e2e/all-personal-dir"; + + let namespace = "quit-coding-to-vape"; + + let personal_dir = |user: &str| format!("{all_personal_dir}/{user}-dir"); + + let user1 = "user1"; + let user2 = "user2"; + let user3 = "user3"; + + // -- Create pivot-build-fingerprints.txt + let pivot = fs::read(PIVOT_OK5_PATH).unwrap(); + let mock_pivot_hash = sha_256(&pivot); + let pivot_hash = qos_hex::encode_to_vec(&mock_pivot_hash); + std::fs::write(PIVOT_HASH_PATH, pivot_hash).unwrap(); + + // -- CLIENT create manifest. + let msg = "testing420"; + let pivot_args = format!("[--msg,{msg}]"); + let cli_manifest_path = format!("{}/manifest", &*boot_dir); + + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest", + "--nonce", + "2", + "--namespace", + namespace, + "--restart-policy", + "never", + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-path", + &cli_manifest_path, + "--pivot-args", + &pivot_args, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // Check the manifest written to file + let manifest = + Manifest::try_from_slice(&fs::read(&cli_manifest_path).unwrap()) + .unwrap(); + + let genesis_output = { + let contents = + fs::read("./mock/boot-e2e/genesis-dir/genesis_output").unwrap(); + GenesisOutput::try_from_slice(&contents).unwrap() + }; + // For simplicity sake, we use the same keys for the share set and manifest + // set. + let mut members: Vec<_> = genesis_output + .member_outputs + .iter() + .cloned() + .map(|GenesisMemberOutput { share_set_member, .. }| share_set_member) + .collect(); + members.sort(); + + let namespace_field = Namespace { + name: namespace.to_string(), + nonce: 2, + quorum_key: genesis_output.quorum_key, + }; + assert_eq!(manifest.namespace, namespace_field); + let pivot = PivotConfig { + hash: mock_pivot_hash, + restart: RestartPolicy::Never, + args: vec!["--msg".to_string(), msg.to_string()], + }; + assert_eq!(manifest.pivot, pivot); + let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; + assert_eq!(manifest.manifest_set, manifest_set); + let share_set = ShareSet { threshold: 2, members }; + assert_eq!(manifest.share_set, share_set); + + // -- CLIENT make sure each user can run `approve-manifest` + for alias in [user1, user2, user3] { + let approval_path = format!( + "{}/{}-{}-{}.approval", + &*boot_dir, alias, namespace, manifest.namespace.nonce, + ); + + let secret_path = format!("{}/{}.secret", &personal_dir(alias), alias); + + let mut child = Command::new("../target/debug/qos_client") + .args([ + "approve-manifest", + "--secret-path", + &*secret_path, + "--manifest-path", + &cli_manifest_path, + "--manifest-approvals-dir", + &*boot_dir, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub", + "--alias", + alias, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + // On purpose, try to input a bad value, neither yes or no + stdin + .write_all("maybe\n".as_bytes()) + .expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Please answer with either \"yes\" (y) or \"no\" (n)" + ); + // Try the longer option ("yes" rather than "y") + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct pivot restart policy: RestartPolicy::Never? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Are these the correct pivot args:" + ); + assert_eq!( + &stdout.next().unwrap().unwrap(), + "[\"--msg\", \"testing420\"]?" + ); + assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + // Wait for the command to write the approval and exit + assert!(child.wait().unwrap().success()); + + // Read in the generated approval to check it was created correctly + let approval = + Approval::try_from_slice(&fs::read(approval_path).unwrap()) + .unwrap(); + let personal_pair = P256Pair::from_hex_file(format!( + "{}/{}.secret", + personal_dir(alias), + alias, + )) + .unwrap(); + + let signature = personal_pair.sign(&manifest.qos_hash()).unwrap(); + assert_eq!(approval.signature, signature); + + assert_eq!(approval.member.alias, alias); + assert_eq!( + approval.member.pub_key, + personal_pair.public_key().to_bytes(), + ); + } + + // -- ENCLAVE start enclave + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/async_qos_core") + .args([ + "--usock", + &*usock, + "--quorum-file", + &*secret_path, + "--pivot-file", + &*pivot_path, + "--ephemeral-file", + &*eph_path, + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // -- HOST start host + let mut _host_child_process: ChildWrapper = + Command::new("../target/debug/qos_host") + .args([ + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--usock", + &*usock, + ]) + .spawn() + .unwrap() + .into(); + + // -- Make sure the enclave and host have time to boot + qos_test_primitives::wait_until_port_is_bound(host_port); + + // -- CLIENT generate the manifest envelope + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest-envelope", + "--manifest-approvals-dir", + &*boot_dir, + "--manifest-path", + &cli_manifest_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // -- CLIENT broadcast boot standard instruction + let manifest_envelope_path = format!("{}/manifest_envelope", &*boot_dir,); + assert!(Command::new("../target/debug/qos_client") + .args([ + "boot-standard", + "--manifest-envelope-path", + &manifest_envelope_path, + "--pivot-path", + PIVOT_OK5_PATH, + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--pcr3-preimage-path", + "./mock/pcr3-preimage.txt", + "--unsafe-skip-attestation", + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // For each user, post a share, + // and sanity check the pivot has not yet executed. + assert!(!Path::new(PIVOT_OK5_SUCCESS_FILE).exists()); + for user in [&user1, &user2] { + // Get attestation doc and manifest + assert!(Command::new("../target/debug/qos_client") + .args([ + "get-attestation-doc", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--attestation-doc-path", + &*attestation_doc_path, + "--manifest-envelope-path", + "/tmp/dont_care" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + let share_path = format!("{}/{}.share", &personal_dir(user), user); + let secret_path = format!("{}/{}.secret", &personal_dir(user), user); + let eph_wrapped_share_path: PathWrapper = + format!("{}/{}.eph_wrapped.share", &*tmp, user).into(); + let approval_path: PathWrapper = + format!("{}/{}.attestation.approval", &*tmp, user).into(); + // Encrypt share to ephemeral key + let mut child = Command::new("../target/debug/qos_client") + .args([ + "proxy-re-encrypt-share", + "--share-path", + &share_path, + "--secret-path", + &secret_path, + "--attestation-doc-path", + &*attestation_doc_path, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + "--manifest-envelope-path", + &manifest_envelope_path, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--alias", + user, + "--unsafe-skip-attestation", + "--unsafe-eph-path-override", + &*eph_path, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + // Skip over a log message + stdout.next(); + + // Answer prompts with yes + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Does this AWS IAM role belong to the intended organization: arn:aws:iam::123456789012:role/Webserver? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "The following manifest set members approved:" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + // Check that it finished successfully + assert!(child.wait().unwrap().success()); + + // Post the encrypted share + assert!(Command::new("../target/debug/qos_client") + .args([ + "post-share", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + } + + // Give the enclave time to start the pivot + std::thread::sleep(std::time::Duration::from_secs(2)); + + // Check that the pivot executed + let contents = std::fs::read(PIVOT_OK5_SUCCESS_FILE).unwrap(); + assert_eq!(std::str::from_utf8(&contents).unwrap(), msg); + + let enclave_info_url = + format!("http://{LOCAL_HOST}:{}/qos/enclave-info", host_port); + let enclave_info: EnclaveInfo = + ureq::get(&enclave_info_url).call().unwrap().into_json().unwrap(); + assert_eq!(enclave_info.phase, ProtocolPhase::QuorumKeyProvisioned); + + fs::remove_file(PIVOT_OK5_SUCCESS_FILE).unwrap(); +} diff --git a/src/integration/tests/async_client.rs b/src/integration/tests/async_client.rs index da9723b2..0fdb94be 100644 --- a/src/integration/tests/async_client.rs +++ b/src/integration/tests/async_client.rs @@ -4,7 +4,6 @@ use qos_core::{ io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, server::SocketServerError, }; -use tokio::task::JoinHandle; #[derive(Clone)] struct EchoProcessor; @@ -17,42 +16,43 @@ impl AsyncRequestProcessor for EchoProcessor { async fn run_echo_server( socket_path: &str, -) -> Result>>, SocketServerError> { +) -> Result { let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new( - std::iter::once(SocketAddress::new_unix(socket_path)), - timeout, - ); - let tasks = AsyncSocketServer::listen_all(pool, &EchoProcessor)?; + let pool = + AsyncStreamPool::new(SocketAddress::new_unix(socket_path), timeout, 1) + .expect("unable to create async pool"); + let server = AsyncSocketServer::listen_all(pool, &EchoProcessor)?; - Ok(tasks) + Ok(server) } #[tokio::test] async fn direct_connect_works() { let socket_path = "/tmp/async_client_test_direct_connect_works.sock"; - let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(sockets, timeout).shared(); + let pool = AsyncStreamPool::new(socket, timeout, 1) + .expect("unable to create async pool") + .shared(); let client = AsyncClient::new(pool); - let server_tasks = run_echo_server(socket_path).await.unwrap(); + let server = run_echo_server(socket_path).await.unwrap(); let r = client.call(&[0]).await; assert!(r.is_ok()); - for task in server_tasks { - task.abort(); - } + server.terminate(); } #[tokio::test] async fn times_out_properly() { let socket_path = "/tmp/async_client_test_times_out_properly.sock"; - let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(sockets, timeout).shared(); + let pool = AsyncStreamPool::new(socket, timeout, 1) + .expect("unable to create async pool") + .shared(); let client = AsyncClient::new(pool); let r = client.call(&[0]).await; @@ -62,9 +62,11 @@ async fn times_out_properly() { #[tokio::test] async fn repeat_connect_works() { let socket_path = "/tmp/async_client_test_repeat_connect_works.sock"; - let sockets = std::iter::once(SocketAddress::new_unix(socket_path)); + let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(sockets, timeout).shared(); + let pool = AsyncStreamPool::new(socket, timeout, 1) + .expect("unable to create async pool") + .shared(); let client = AsyncClient::new(pool); // server not running yet, expect a connection error @@ -72,13 +74,12 @@ async fn repeat_connect_works() { assert!(r.is_err()); // start server - let server_tasks = run_echo_server(socket_path).await.unwrap(); + let server = run_echo_server(socket_path).await.unwrap(); // server running, expect success let r = client.call(&[0]).await; assert!(r.is_ok()); - for task in server_tasks { - task.abort(); - } + // cleanup + server.terminate(); } diff --git a/src/integration/tests/async_qos_host.rs b/src/integration/tests/async_qos_host.rs index e9a72a60..9d947548 100644 --- a/src/integration/tests/async_qos_host.rs +++ b/src/integration/tests/async_qos_host.rs @@ -3,17 +3,17 @@ use std::{process::Command, time::Duration}; use integration::PIVOT_OK_PATH; use qos_test_primitives::{ChildWrapper, PathWrapper}; -const TEST_ENCLAVE_SOCKET: &str = "/tmp/async_qos_host_test.enclave.sock"; -const POOL_SIZE: &str = "1"; +const TEST_ENCLAVE_SOCKET: &str = "/tmp/async_qos_host_test/enclave.sock"; #[tokio::test] async fn connects_and_gets_info() { + // prep sock pool dir + std::fs::create_dir_all("/tmp/async_qos_host_test").unwrap(); + let _qos_host: ChildWrapper = Command::new("../target/debug/async_qos_host") .arg("--usock") .arg(TEST_ENCLAVE_SOCKET) - .arg("--pool-size") - .arg(POOL_SIZE) .arg("--host-ip") .arg("127.0.0.1") .arg("--host-port") @@ -32,7 +32,7 @@ async fn connects_and_gets_info() { let enclave_socket = format!("{TEST_ENCLAVE_SOCKET}_0"); // manually pick the 1st one let secret_path: PathWrapper = "./async_qos_host_test.secret".into(); // let eph_path = "reaper_works.eph.key"; - let manifest_path: PathWrapper = "async_qos_host_test..manifest".into(); + let manifest_path: PathWrapper = "async_qos_host_test.manifest".into(); // For our sanity, ensure the secret does not yet exist drop(std::fs::remove_file(&*secret_path)); diff --git a/src/integration/tests/async_remote_tls.rs b/src/integration/tests/async_remote_tls.rs index b5b39998..0406b369 100644 --- a/src/integration/tests/async_remote_tls.rs +++ b/src/integration/tests/async_remote_tls.rs @@ -29,13 +29,9 @@ async fn fetch_async_remote_tls_content() { .unwrap() .into(); - // because qos_net's listen call uses a pool it will create a "_X" suffix, we just point to the 1st and only - // listening socket in the pool here - let socket_net_path = format!("{REMOTE_TLS_TEST_NET_PROXY_SOCKET}_0"); - let _enclave_app: ChildWrapper = Command::new(PIVOT_ASYNC_REMOTE_TLS_PATH) .arg(REMOTE_TLS_TEST_ENCLAVE_SOCKET) - .arg(&socket_net_path) + .arg(REMOTE_TLS_TEST_NET_PROXY_SOCKET) .spawn() .unwrap() .into(); @@ -46,11 +42,11 @@ async fn fetch_async_remote_tls_content() { } let enclave_pool = AsyncStreamPool::new( - std::iter::once(SocketAddress::new_unix( - REMOTE_TLS_TEST_ENCLAVE_SOCKET, - )), + SocketAddress::new_unix(REMOTE_TLS_TEST_ENCLAVE_SOCKET), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - ); + 1, + ) + .expect("unable to create enclave async pool"); let enclave_client = AsyncClient::new(enclave_pool.shared()); diff --git a/src/qos_core/src/async_server.rs b/src/qos_core/src/async_server.rs index e1410ea0..4ea24e59 100644 --- a/src/qos_core/src/async_server.rs +++ b/src/qos_core/src/async_server.rs @@ -22,25 +22,39 @@ pub trait AsyncRequestProcessor: Send { } /// A bare bones, socket based server. -pub struct AsyncSocketServer; +pub struct AsyncSocketServer { + /// `AsyncStreamPool` used to serve messages over. + pub pool: AsyncStreamPool, + /// List of tasks that are running on the server. + pub tasks: Vec>>, +} impl AsyncSocketServer { /// Listen and respond to incoming requests on all the pool's addresses with the given `processor`. - /// *NOTE*: the `POOL_SIZE` must match on both sides, since we expect ALL sockets to be connected - /// to right away (e.g. not on first use). The client side connect (above) will always connect them all. /// This method returns a list of tasks that are running as part of this listener. `JoinHandle::abort()` /// should be called on each when the program exists (e.g. on ctrl+c) - pub fn listen_all( + pub fn listen_all

( pool: AsyncStreamPool, - processor: &R, - ) -> Result>>, SocketServerError> + processor: &P, + ) -> Result where - R: AsyncRequestProcessor + 'static + Clone, + P: AsyncRequestProcessor + 'static + Clone, { println!("`AsyncSocketServer` listening on pool size {}", pool.len()); let listeners = pool.listen()?; + let tasks = Self::spawn_tasks_for_listeners(listeners, processor); + + Ok(Self { pool, tasks }) + } + fn spawn_tasks_for_listeners

( + listeners: Vec, + processor: &P, + ) -> Vec>> + where + P: AsyncRequestProcessor + 'static + Clone, + { let mut tasks = Vec::new(); for listener in listeners { let p = processor.clone(); @@ -50,7 +64,31 @@ impl AsyncSocketServer { tasks.push(task); } - Ok(tasks) + tasks + } + + /// Expand the server with listeners up to pool size. This adds new tasks as needed. + pub fn listen_to

( + &mut self, + pool_size: u32, + processor: &P, + ) -> Result<(), IOError> + where + P: AsyncRequestProcessor + 'static + Clone, + { + let listeners = self.pool.listen_to(pool_size)?; + let tasks = Self::spawn_tasks_for_listeners(listeners, processor); + + self.tasks.extend(tasks); + + Ok(()) + } + + /// Consume the socket server and terminate all running tasks. + pub fn terminate(self) { + for task in self.tasks { + task.abort(); + } } } diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index 5d0ba782..8cb94abc 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -9,12 +9,11 @@ use crate::{ io::SocketAddress, parser::{GetParserForOptions, OptionsParser, Parser, Token}, reaper::Reaper, - DEFAULT_POOL_SIZE, EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, - QUORUM_FILE, SEC_APP_SOCK, + EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; #[cfg(feature = "async")] -use crate::io::AsyncStreamPool; +use crate::io::{AsyncStreamPool, IOError}; /// "cid" pub const CID: &str = "cid"; @@ -54,15 +53,9 @@ impl EnclaveOpts { /// return the new [`AsyncPool`]. Analogous to [`Self::addr`] and [`Self::app_addr`] depending on the [`app`] parameter. #[cfg(feature = "async")] #[allow(unused)] - fn async_pool(&self, app: bool) -> AsyncStreamPool { + fn async_pool(&self, app: bool) -> Result { use nix::sys::time::{TimeVal, TimeValLike}; - let pool_size: u32 = self - .parsed - .single(POOL_SIZE) - .expect("invalid pool options") - .parse() - .expect("invalid pool_size specified"); let usock_param = if app { APP_USOCK } else { USOCK }; match ( @@ -72,23 +65,21 @@ impl EnclaveOpts { ) { #[cfg(feature = "vm")] (Some(c), Some(p), None) => { - let c = c.parse::().unwrap(); - let start_port = p.parse::().unwrap(); - - let addresses = (start_port..start_port + pool_size).map(|p| { - SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS) - }); - - AsyncStreamPool::new(addresses, TimeVal::seconds(5)) - } - (None, None, Some(u)) => { - let addresses = (0..pool_size).map(|i| { - let u = format!("{u}_{i}"); // add _X suffix for pooling - SocketAddress::new_unix(&u) - }); - - AsyncStreamPool::new(addresses, TimeVal::seconds(5)) + let c = + c.parse().map_err(|_| IOError::ConnectAddressInvalid)?; + let p = + p.parse().map_err(|_| IOError::ConnectAddressInvalid)?; + AsyncStreamPool::new( + SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS), + TimeVal::seconds(5), + 1, + ) } + (None, None, Some(u)) => AsyncStreamPool::new( + SocketAddress::new_unix(u), + TimeVal::seconds(5), + 1, + ), _ => panic!("Invalid socket opts"), } } @@ -202,6 +193,9 @@ impl CLI { } /// Execute the enclave server CLI with the environment args using tokio/async + /// + /// # Panics + /// If the socket pools cannot be created #[cfg(feature = "async")] pub fn async_execute() { let mut args: Vec = env::args().collect(); @@ -220,8 +214,10 @@ impl CLI { opts.pivot_file(), ), opts.nsm(), - opts.async_pool(false), - opts.async_pool(true), + opts.async_pool(false) + .expect("Unable to create enclave socket pool"), + opts.async_pool(true) + .expect("Unable to create enclave app pool"), None, ); } @@ -278,11 +274,6 @@ impl GetParserForOptions for EnclaveParser { .takes_value(true) .default_value(SEC_APP_SOCK) ) - .token( - Token::new(POOL_SIZE, "the pool size for use with all socket types") - .takes_value(true) - .default_value(DEFAULT_POOL_SIZE) - ) } } @@ -363,20 +354,6 @@ mod test { assert_eq!(opts.addr(), SocketAddress::new_unix("./test.sock")); } - #[test] - #[cfg(feature = "async")] - fn parse_pool_size() { - let mut args: Vec<_> = - vec!["binary", "--usock", "./test.sock", "--pool-size", "7"] - .into_iter() - .map(String::from) - .collect(); - let opts = EnclaveOpts::new(&mut args); - - let pool = opts.async_pool(false); - assert_eq!(pool.len(), 7); - } - #[test] fn parse_manifest_file() { let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] diff --git a/src/qos_core/src/io/async_pool.rs b/src/qos_core/src/io/async_pool.rs index 69456bf0..0ea5c886 100644 --- a/src/qos_core/src/io/async_pool.rs +++ b/src/qos_core/src/io/async_pool.rs @@ -1,6 +1,6 @@ -use std::sync::Arc; +use std::{path::Path, sync::Arc}; -use nix::sys::time::TimeVal; +use nix::sys::{socket::UnixAddr, time::TimeVal}; use tokio::sync::{Mutex, MutexGuard, RwLock}; use super::{AsyncListener, AsyncStream, IOError, SocketAddress}; @@ -10,6 +10,8 @@ use super::{AsyncListener, AsyncStream, IOError, SocketAddress}; pub enum PoolError { /// No addresses were provided in the pool constructor NoAddressesSpecified, + /// Invalid source address specified for `next_address` call, usually due to `path` missing in `UnixSock`. + InvalidSourceAddress, } /// Generic Async pool using tokio Mutex @@ -23,6 +25,7 @@ struct AsyncPool { pub struct AsyncStreamPool { addresses: Vec, // local copy used for `listen` only TODO: refactor listeners out of pool pool: AsyncPool, + timeout: TimeVal, } /// Helper type to wrap `AsyncStreamPool` in `Arc` and `RwLock`. Used to allow multiple processors to run across IO @@ -30,25 +33,47 @@ pub struct AsyncStreamPool { pub type SharedAsyncStreamPool = Arc>; impl AsyncStreamPool { - /// Create a new `AsyncStreamPool` which will contain all the known addresses but no connections yet. - /// Includes the connect timeout which gets used in case `get` gets called. + /// Create a new `AsyncStreamPool` with given starting `SocketAddress`, timout and number of addresses to populate. pub fn new( + start_address: SocketAddress, + timeout: TimeVal, + mut count: u32, + ) -> Result { + eprintln!( + "AsyncStreamPool start address: {:?}", + start_address.debug_info() + ); + + let mut addresses = Vec::new(); + let mut addr = start_address; + while count > 0 { + addresses.push(addr.clone()); + count -= 1; + + if count == 0 { + break; // early break to prevent needless address creation + } + addr = addr.next_address()?; + } + + Ok(Self::with_addresses(addresses, timeout)) + } + + /// Create a new `AsyncStreamPool` which will contain all the provided addresses but no connections yet. + /// Includes the connect timeout which gets used in case `get` gets called. + #[must_use] + fn with_addresses( addresses: impl IntoIterator, timeout: TimeVal, ) -> Self { let addresses: Vec = addresses.into_iter().collect(); - // TODO: DEBUG remove - for addr in &addresses { - println!("pool address: {:?}", addr.debug_info()); - } - let streams: Vec = addresses.iter().map(|a| AsyncStream::new(a, timeout)).collect(); let pool = AsyncPool::from(streams); - Self { addresses, pool } + Self { addresses, pool, timeout } } /// Helper function to get the Arc and Mutex wrapping @@ -75,17 +100,62 @@ impl AsyncStreamPool { } /// Create a new pool by listening new connection on all the addresses - pub fn listen(self) -> Result, IOError> { + pub fn listen(&self) -> Result, IOError> { let mut listeners = Vec::new(); - for addr in self.addresses { - let listener = AsyncListener::listen(&addr)?; + for addr in &self.addresses { + let listener = AsyncListener::listen(addr)?; listeners.push(listener); } Ok(listeners) } + + /// Expands the pool with new addresses using `SocketAddress::next_address` + pub fn expand_to(&mut self, size: u32) -> Result<(), IOError> { + eprintln!("expanding async pool to {size}"); + let size = size as usize; + + if let Some(last_address) = self.addresses.last().cloned() { + let mut next = last_address; + let count = self.addresses.len(); + for _ in count..size { + next = next.next_address()?; + + self.pool.push(AsyncStream::new(&next, self.timeout)); + self.addresses.push(next.clone()); + } + } + + Ok(()) + } + + /// Listen to new connections on added sockets on top of existing listeners, returning the list of new `AsyncListener` + pub fn listen_to( + &mut self, + size: u32, + ) -> Result, IOError> { + eprintln!("listening async pool to {size}"); + let size = size as usize; + let mut listeners = Vec::new(); + + if let Some(last_address) = self.addresses.last().cloned() { + let mut next = last_address; + let count = self.addresses.len(); + for _ in count..size { + next = next.next_address()?; + eprintln!("adding listener on {}", next.debug_info()); + + self.addresses.push(next.clone()); + let listener = AsyncListener::listen(&next)?; + + listeners.push(listener); + } + } + + Ok(listeners) + } } impl AsyncPool { @@ -108,6 +178,10 @@ impl AsyncPool { stream } + + fn push(&mut self, value: T) { + self.handles.push(Mutex::new(value)); + } } impl From> for AsyncPool { @@ -119,8 +193,56 @@ impl From> for AsyncPool { } } +/// Provide the "next" usock path. Given a `"*_X"` where X is a number, this function +/// will return `"*_X+1"`. If there is no `"_X"` suffix a `"_0"` will be appended instead. +fn next_usock_path(path: &Path) -> Result { + let path = + path.as_os_str().to_str().ok_or(IOError::ConnectAddressInvalid)?; + if let Some(underscore_index) = path.rfind('_') { + let num_str = &path[underscore_index + 1..]; + let num = num_str.parse::(); + Ok(match num { + Ok(index) => { + format!("{}_{}", &path[0..underscore_index], index + 1) + } + Err(_) => format!("{path}_0"), // non-numerical _X, just add _0 + }) + } else { + Ok(format!("{path}_0")) + } +} + +impl SocketAddress { + /// Creates and returns the "following" `SocketAddress`. In case of VSOCK we increment the port from the source by 1. + /// In case of USOCK we increment the postfix of the path if present, or add a `"_0"` at the end. + /// + /// This is mostly used by the `AsyncSocketPool`. + pub(crate) fn next_address(&self) -> Result { + match self { + Self::Unix(usock) => match usock.path() { + Some(path) => { + let path: &str = &next_usock_path(path)?; + let addr = UnixAddr::new(path)?; + Ok(Self::Unix(addr)) + } + None => { + Err(IOError::PoolError(PoolError::InvalidSourceAddress)) + } + }, + #[cfg(feature = "vm")] + Self::Vsock(vsock) => Ok(Self::new_vsock( + vsock.cid(), + vsock.port() + 1, + super::stream::vsock_svm_flags(*vsock), + )), + } + } +} + #[cfg(test)] mod test { + use std::path::PathBuf; + use super::*; // constructor for basic i32 with repeating 0 values for testing @@ -150,4 +272,34 @@ mod test { let third = pool.get().await; assert_eq!(*third, 0); } + + #[test] + fn next_usock_path_works() { + assert_eq!( + next_usock_path(&PathBuf::from("basic")).unwrap(), + "basic_0" + ); + assert_eq!(next_usock_path(&PathBuf::from("")).unwrap(), "_0"); + assert_eq!( + next_usock_path(&PathBuf::from("with_underscore_elsewhere")) + .unwrap(), + "with_underscore_elsewhere_0" + ); + assert_eq!( + next_usock_path(&PathBuf::from("with_underscore_at_end_")).unwrap(), + "with_underscore_at_end__0" + ); + assert_eq!( + next_usock_path(&PathBuf::from("good_num_2")).unwrap(), + "good_num_3" + ); + assert_eq!( + next_usock_path(&PathBuf::from("good_num_34")).unwrap(), + "good_num_35" + ); + assert_eq!( + next_usock_path(&PathBuf::from("good_num_999")).unwrap(), + "good_num_1000" + ); + } } diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index e6620fab..aa3b7bc9 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -19,7 +19,7 @@ use super::IOError; // 25(retries) x 10(milliseconds) = 1/4 a second of retrying const MAX_RETRY: usize = 25; const BACKOFF_MILLISECONDS: u64 = 10; -const BACKLOG: i32 = 128; +const BACKLOG: i32 = 127; // due to bug in nix::Backlog check, 128 is disallowed, fixed in https://github.com/nix-rust/nix/commit/a0869f993c0e7639b13b9bb11cb74d54a8018fbd const MEGABYTE: usize = 1024 * 1024; @@ -58,21 +58,17 @@ impl SocketAddress { /// /// For flags see: [Add flags field in the vsock address](). #[cfg(feature = "vm")] - #[allow(unsafe_code)] pub fn new_vsock(cid: u32, port: u32, flags: u8) -> Self { - #[repr(C)] - struct sockaddr_vm { - svm_family: libc::sa_family_t, - svm_reserved1: libc::c_ushort, - svm_port: libc::c_uint, - svm_cid: libc::c_uint, - // Field added [here](https://github.com/torvalds/linux/commit/3a9c049a81f6bd7c78436d7f85f8a7b97b0821e6) - // but not yet in a version of libc we can use. - svm_flags: u8, - svm_zero: [u8; 3], - } + Self::Vsock(Self::new_vsock_raw(cid, port, flags)) + } - let vsock_addr = sockaddr_vm { + /// Create a new raw VsockAddr. + /// + /// For flags see: [Add flags field in the vsock address](). + #[cfg(feature = "vm")] + #[allow(unsafe_code)] + pub fn new_vsock_raw(cid: u32, port: u32, flags: u8) -> VsockAddr { + let vsock_addr = SockAddrVm { svm_family: AddressFamily::Vsock as libc::sa_family_t, svm_reserved1: 0, svm_cid: cid, @@ -80,15 +76,15 @@ impl SocketAddress { svm_flags: flags, svm_zero: [0; 3], }; - let vsock_addr_len = size_of::() as libc::socklen_t; + let vsock_addr_len = size_of::() as libc::socklen_t; let addr = unsafe { VsockAddr::from_raw( - &vsock_addr as *const sockaddr_vm as *const libc::sockaddr, + &vsock_addr as *const SockAddrVm as *const libc::sockaddr, Some(vsock_addr_len), ) .unwrap() }; - Self::Vsock(addr) + addr } /// Get the `AddressFamily` of the socket. @@ -154,6 +150,29 @@ impl SocketAddress { } } +/// Extract svm_flags field value from existing VSOCK. +#[cfg(all(feature = "vm", feature = "async"))] +#[allow(unsafe_code)] +pub fn vsock_svm_flags(vsock: VsockAddr) -> u8 { + unsafe { + let cast: SockAddrVm = std::mem::transmute(vsock); + cast.svm_flags + } +} + +#[cfg(feature = "vm")] +#[repr(C)] +struct SockAddrVm { + svm_family: libc::sa_family_t, + svm_reserved1: libc::c_ushort, + svm_port: libc::c_uint, + svm_cid: libc::c_uint, + // Field added [here](https://github.com/torvalds/linux/commit/3a9c049a81f6bd7c78436d7f85f8a7b97b0821e6) + // but not yet in a version of libc we can use. + svm_flags: u8, + svm_zero: [u8; 3], +} + /// Handle on a stream pub struct Stream { fd: OwnedFd, @@ -347,6 +366,7 @@ impl Listener { let fd = socket_fd(&addr)?; bind(fd.as_raw_fd(), &*addr.addr())?; + listen(&fd.as_fd(), Backlog::new(BACKLOG)?)?; Ok(Self { fd, addr }) @@ -599,4 +619,22 @@ mod test { // N.B: we do not call _handler.join().unwrap() here, because the handler is blocking (indefinitely) on "send" // Once the test exits, Rust/OS checks will pick up the slack and clean up this thread when this test exits. } + + #[cfg(feature = "vm")] + #[test] + fn vsock_svm_flags_are_not_lost() { + let vsock = SocketAddress::new_vsock_raw(1, 1, VMADDR_FLAG_TO_HOST); + + assert_eq!(vsock_svm_flags(vsock), VMADDR_FLAG_TO_HOST); + + let first = SocketAddress::new_vsock(3, 3, VMADDR_FLAG_TO_HOST); + let second = first.next_address().unwrap(); + + match second { + SocketAddress::Vsock(second_vsock) => { + assert_eq!(vsock_svm_flags(second_vsock), VMADDR_FLAG_TO_HOST) + } + _ => panic!("not a vsock??"), + } + } } diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index aa192a9b..a1d1390f 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -67,9 +67,3 @@ pub const SEC_APP_SOCK: &str = "./local-enclave/sec_app.sock"; pub const SEC_APP_SOCK: &str = "/sec_app.sock"; /// Default socket connect timeout in milliseconds pub const DEFAULT_SOCKET_TIMEOUT: &str = "5000"; -/// Default socket pool size is 20 -#[cfg(feature = "async")] -pub const DEFAULT_POOL_SIZE: &str = "20"; // DEBUG: ales - set to something real after debugging -/// Default socket pool size is 0 for sync (unused) -#[cfg(not(feature = "async"))] -pub const DEFAULT_POOL_SIZE: &str = "1"; diff --git a/src/qos_core/src/protocol/async_processor.rs b/src/qos_core/src/protocol/async_processor.rs index 79336bfb..36a90d19 100644 --- a/src/qos_core/src/protocol/async_processor.rs +++ b/src/qos_core/src/protocol/async_processor.rs @@ -7,7 +7,10 @@ use tokio::sync::Mutex; use super::{ error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, ProtocolPhase, }; -use crate::{async_server::AsyncRequestProcessor, io::SharedAsyncStreamPool}; +use crate::{ + async_server::AsyncRequestProcessor, + io::{IOError, SharedAsyncStreamPool}, +}; const MEGABYTE: usize = 1024 * 1024; const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; @@ -43,6 +46,11 @@ impl AsyncProcessor { async fn get_phase(&self) -> ProtocolPhase { self.state.lock().await.get_phase() } + + /// Expands the app pool to given pool size + pub async fn expand_to(&mut self, pool_size: u32) -> Result<(), IOError> { + self.app_pool.write().await.expand_to(pool_size) + } } impl AsyncRequestProcessor for AsyncProcessor { @@ -62,39 +70,35 @@ impl AsyncRequestProcessor for AsyncProcessor { }; // handle Proxy outside of the state - match msg_req { - ProtocolMsg::ProxyRequest { data } => { - let phase = self.get_phase().await; + if let ProtocolMsg::ProxyRequest { data } = msg_req { + let phase = self.get_phase().await; - if phase != ProtocolPhase::QuorumKeyProvisioned { - let err = ProtocolError::NoMatchingRoute(phase); - return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - err, - )) + if phase != ProtocolPhase::QuorumKeyProvisioned { + let err = ProtocolError::NoMatchingRoute(phase); + return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse(err)) .expect("ProtocolMsg can always be serialized. qed."); - } + } - let result = self - .app_pool - .read() - .await - .get() - .await - .call(&data) - .await - .map(|data| ProtocolMsg::ProxyResponse { data }) - .map_err(|_e| { - ProtocolMsg::ProtocolErrorResponse( - ProtocolError::IOError, - ) - }); + let result = self + .app_pool + .read() + .await + .get() + .await + .call(&data) + .await + .map(|data| ProtocolMsg::ProxyResponse { data }) + .map_err(|_e| { + ProtocolMsg::ProtocolErrorResponse(ProtocolError::IOError) + }); - match result { - Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) - .expect("ProtocolMsg can always be serialized. qed."), - } + match result { + Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) + .expect("ProtocolMsg can always be serialized. qed."), } - _ => self.state.lock().await.handle_msg(&msg_req), + } else { + // handle all the others here + self.state.lock().await.handle_msg(&msg_req) } } } diff --git a/src/qos_core/src/protocol/async_state.rs b/src/qos_core/src/protocol/async_state.rs deleted file mode 100644 index 8b137891..00000000 --- a/src/qos_core/src/protocol/async_state.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/qos_core/src/protocol/error.rs b/src/qos_core/src/protocol/error.rs index 93fe6b61..84f79d4d 100644 --- a/src/qos_core/src/protocol/error.rs +++ b/src/qos_core/src/protocol/error.rs @@ -147,6 +147,8 @@ pub enum ProtocolError { DifferentManifest, /// Error from the qos crypto library. QosCrypto(String), + /// Error during expanding the `AsyncPool`. + PoolExpandError, } impl From for ProtocolError { diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 39226184..1d69018f 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -112,7 +112,10 @@ impl Reaper { #[cfg(feature = "async")] mod inner { - use std::sync::{Arc, RwLock}; + use std::{ + sync::{Arc, RwLock}, + time::Duration, + }; #[allow(clippy::wildcard_imports)] use super::*; @@ -122,6 +125,17 @@ mod inner { protocol::{async_processor::AsyncProcessor, ProtocolState}, }; + // basic helper for x-thread comms in Reaper + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + enum InterState { + // We're booting, no pivot yet + Booting, + // We've booted and pivot is ready + PivotReady, + // We're quitting (ctrl+c for tests and such) + Quitting, + } + impl Reaper { /// Run the Reaper using Tokio inside a thread for server processing. /// @@ -130,6 +144,7 @@ mod inner { /// - If spawning the pivot errors. /// - If waiting for the pivot errors. #[allow(dead_code)] + #[allow(clippy::too_many_lines)] pub fn async_execute( handles: &Handles, nsm: Box, @@ -138,8 +153,8 @@ mod inner { test_only_init_phase_override: Option, ) { let handles2 = handles.clone(); - let quit = Arc::new(RwLock::new(false)); - let inner_quit = quit.clone(); + let inter_state = Arc::new(RwLock::new(InterState::Booting)); + let server_state = inter_state.clone(); std::thread::spawn(move || { tokio::runtime::Builder::new_current_thread() @@ -151,35 +166,66 @@ mod inner { // create the state let protocol_state = ProtocolState::new( nsm, - handles2, + handles2.clone(), test_only_init_phase_override, ); // send a shared version of state and the async pool to each processor - let processor = AsyncProcessor::new( + let mut processor = AsyncProcessor::new( protocol_state.shared(), app_pool.shared(), ); // listen_all will multiplex the processor accross all sockets - let tasks = + let mut server = AsyncSocketServer::listen_all(pool, &processor) .expect("unable to get listen task list"); - match tokio::signal::ctrl_c().await { - Ok(()) => { - eprintln!("handling ctrl+c the tokio way"); - for task in tasks { - task.abort(); - } - *inner_quit.write().unwrap() = true; + loop { + let (manifest_present, pool_size) = + get_pool_size_from_pivot_args(&handles2); + let pool_size = pool_size.unwrap_or(1); + // expand server to pool_size + 1 (due to qos-host extra socket) + server.listen_to(pool_size + 1, &processor).expect( + "unable to listen_to on the running server", + ); + // expand app connections to pool_size + processor.expand_to(pool_size).await.expect( + "unable to expand_to on the processor app pool", + ); + + if manifest_present { + *server_state.write().unwrap() = + InterState::PivotReady; + eprintln!("manifest is present, breaking out of server check loop"); + break; + } + + // sleep up to 1s, checking for ctrl+c, if it happens break out + if let Ok(ctrl_res) = tokio::time::timeout( + Duration::from_secs(1), + tokio::signal::ctrl_c(), + ) + .await + { + return ctrl_c_handler( + ctrl_res, + server, + &server_state, + ); } - Err(err) => panic!("{err}"), } + // wait until ctrl+c + ctrl_c_handler( + tokio::signal::ctrl_c().await, + server, + &server_state, + ); }); }); loop { + let server_state = *inter_state.read().unwrap(); // helper for integration tests and manual runs aka qos_core binary - if *quit.read().unwrap() { + if server_state == InterState::Quitting { eprintln!("quit called by ctrl+c"); std::process::exit(1); } @@ -187,6 +233,7 @@ mod inner { if handles.quorum_key_exists() && handles.pivot_exists() && handles.manifest_envelope_exists() + && server_state == InterState::PivotReady { // The state required to pivot exists, so we can break this // holding pattern and start the pivot. @@ -242,6 +289,93 @@ mod inner { println!("Reaper exiting ... "); } } + + fn ctrl_c_handler( + ctrl_c: std::io::Result<()>, + server: AsyncSocketServer, + server_state: &Arc>, + ) { + match ctrl_c { + Ok(()) => { + server.terminate(); + *server_state.write().unwrap() = InterState::Quitting; + } + Err(err) => panic!("{err}"), + } + } + + // return if we have manifest and get pool_size args if present from it + fn get_pool_size_from_pivot_args(handles: &Handles) -> (bool, Option) { + if let Ok(envelope) = handles.get_manifest_envelope() { + (true, extract_pool_size_arg(&envelope.manifest.pivot.args)) + } else { + (false, None) + } + } + + // find the u32 value of --pool-size argument passed to the pivot if present + fn extract_pool_size_arg(args: &[String]) -> Option { + if let Some((i, _)) = + args.iter().enumerate().find(|(_, a)| *a == "--pool-size") + { + if let Some(pool_size_str) = args.get(i + 1) { + match pool_size_str.parse::() { + Ok(pool_size) => Some(pool_size), + Err(_) => None, + } + } else { + None + } + } else { + None + } + } + + #[cfg(test)] + mod test { + use super::*; + + #[test] + fn extract_pool_size_arg_works() { + // no arg + assert_eq!( + extract_pool_size_arg(&vec![ + "unrelated".to_owned(), + "--args".to_owned(), + ]), + None + ); + + // should work + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8".to_owned(), + ]), + Some(8) + ); + + // wrong number, expect None + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8a".to_owned(), + ]), + None + ); + + // duplicate arg, use 1st + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8".to_owned(), + "--pool-size".to_owned(), + "9".to_owned(), + ]), + Some(8) + ); + } + } } -// See qos_test/tests/reaper for tests +// See qos_test/tests/async_reaper for more tests diff --git a/src/qos_core/src/server.rs b/src/qos_core/src/server.rs index 238d0dbf..be38f9b4 100644 --- a/src/qos_core/src/server.rs +++ b/src/qos_core/src/server.rs @@ -39,7 +39,7 @@ impl SocketServer { addr: SocketAddress, mut processor: R, ) -> Result<(), SocketServerError> { - println!("`SocketServer` listening on {addr:?}"); + println!("`SocketServer` listening on {}", addr.debug_info()); let listener = Listener::listen(addr)?; diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index ee16c4a3..4642df21 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -254,8 +254,8 @@ impl AsyncHostServer { // DEBUG: remove later match ProtocolMsg::try_from_slice(&encoded_request) { - Ok(r) => eprintln!("Received message: {}", r), - Err(e) => eprintln!("Unable to decode request: {}", e), + Ok(r) => eprintln!("Received message: {r}"), + Err(e) => eprintln!("Unable to decode request: {e}"), } match state.enclave_client.call(&encoded_request).await { @@ -263,7 +263,7 @@ impl AsyncHostServer { // DEBUG: remove later match ProtocolMsg::try_from_slice(&encoded_response) { Ok(r) => { - eprintln!("Enclave responded with: {}", r); + eprintln!("Enclave responded with: {r}"); } Err(e) => { eprintln!("Error deserializing response from enclave, make sure qos_host version match qos_core: {e}"); diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 3d8ac10b..05c96feb 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -19,7 +19,6 @@ const HOST_IP: &str = "host-ip"; const HOST_PORT: &str = "host-port"; const ENDPOINT_BASE_PATH: &str = "endpoint-base-path"; const VSOCK_TO_HOST: &str = "vsock-to-host"; -const POOL_SIZE: &str = "pool-size"; const SOCKET_TIMEOUT: &str = "socket-timeout"; struct HostParser; @@ -57,11 +56,6 @@ impl GetParserForOptions for HostParser { Token::new(ENDPOINT_BASE_PATH, "base path for all endpoints. e.g. /enclave-health") .takes_value(true) ) - .token( - Token::new(POOL_SIZE, "pool size for USOCK/VSOCK sockets") - .takes_value(true) - .default_value("1") // qos-host should default to 1 only - ) .token( Token::new(SOCKET_TIMEOUT, "maximum time in ms a connect to the USOCK/VSOCK will take") .takes_value(true) @@ -122,21 +116,17 @@ impl HostOpts { /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and /// return the new `AsyncPool`. #[cfg(feature = "async")] - pub(crate) fn enclave_pool(&self) -> AsyncStreamPool { + pub(crate) fn enclave_pool( + &self, + ) -> Result { use qos_core::io::{TimeVal, TimeValLike}; let default_timeout = &qos_core::DEFAULT_SOCKET_TIMEOUT.to_owned(); let timeout_str = - self.parsed.single(SOCKET_TIMEOUT).unwrap_or(&default_timeout); + self.parsed.single(SOCKET_TIMEOUT).unwrap_or(default_timeout); let timeout = TimeVal::milliseconds( timeout_str.parse().expect("invalid timeout value"), ); - let pool_size: u32 = self - .parsed - .single(POOL_SIZE) - .expect("invalid pool options") - .parse() - .expect("invalid pool_size specified"); match ( self.parsed.single(CID), self.parsed.single(PORT), @@ -144,22 +134,22 @@ impl HostOpts { ) { #[cfg(feature = "vm")] (Some(c), Some(p), None) => { - let c = c.parse::().unwrap(); - let start_port = p.parse::().unwrap(); + let c = c.parse().map_err(|_| { + qos_core::io::IOError::ConnectAddressInvalid + })?; + let p = p.parse().map_err(|_| { + qos_core::io::IOError::ConnectAddressInvalid + })?; - let addresses = (start_port..start_port + pool_size).map(|p| { - SocketAddress::new_vsock(c, p, self.to_host_flag()) - }); + let address = + SocketAddress::new_vsock(c, p, self.to_host_flag()); - AsyncStreamPool::new(addresses, timeout) + AsyncStreamPool::new(address, timeout, 1) // qos_host needs only 1 } (None, None, Some(u)) => { - let addresses = (0..pool_size).map(|i| { - let u = format!("{u}_{i}"); // add _X suffix for pooling - SocketAddress::new_unix(&u) - }); + let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(addresses, timeout) + AsyncStreamPool::new(address, timeout, 1) } _ => panic!("Invalid socket opts"), } @@ -226,6 +216,8 @@ impl HostOpts { pub struct CLI; impl CLI { /// Execute the command line interface. + /// # Panics + /// If pool creation fails pub async fn execute() { let mut args: Vec = env::args().collect(); let options = HostOpts::new(&mut args); @@ -246,7 +238,10 @@ impl CLI { #[cfg(feature = "async")] crate::async_host::AsyncHostServer::new( - options.enclave_pool().shared(), + options + .enclave_pool() + .expect("unable to create enclave pool") + .shared(), options.host_addr(), options.base_path(), ) diff --git a/src/qos_net/src/async_cli.rs b/src/qos_net/src/async_cli.rs index 4351ffa6..af96dfb3 100644 --- a/src/qos_net/src/async_cli.rs +++ b/src/qos_net/src/async_cli.rs @@ -17,16 +17,16 @@ impl CLI { } else if opts.parsed.help() { println!("{}", opts.parsed.info()); } else { - let tasks = AsyncSocketServer::listen_proxy(opts.async_pool()) - .await - .expect("unable to get listen join handles"); + let server = AsyncSocketServer::listen_proxy( + opts.async_pool().expect("unable to create async socket pool"), + ) + .await + .expect("unable to get listen join handles"); match tokio::signal::ctrl_c().await { Ok(_) => { eprintln!("handling ctrl+c the tokio way"); - for task in tasks { - task.abort(); - } + server.terminate(); } Err(err) => panic!("{err}"), } diff --git a/src/qos_net/src/async_proxy.rs b/src/qos_net/src/async_proxy.rs index fb6d4413..ba72fb9d 100644 --- a/src/qos_net/src/async_proxy.rs +++ b/src/qos_net/src/async_proxy.rs @@ -6,7 +6,6 @@ use qos_core::{ io::{AsyncListener, AsyncStream, AsyncStreamPool, IOError}, server::SocketServerError, }; -use tokio::task::JoinHandle; use crate::{ async_proxy_connection::AsyncProxyConnection, error::QosNetError, @@ -148,12 +147,7 @@ impl AsyncProxy { pub trait AsyncProxyServer { fn listen_proxy( pool: AsyncStreamPool, - ) -> impl Future< - Output = Result< - Vec>>, - SocketServerError, - >, - > + Send; + ) -> impl Future, SocketServerError>> + Send; } impl AsyncProxyServer for AsyncSocketServer { @@ -161,8 +155,7 @@ impl AsyncProxyServer for AsyncSocketServer { /// dumb pipe after getting the `connect*` calls. async fn listen_proxy( pool: AsyncStreamPool, - ) -> Result>>, SocketServerError> - { + ) -> Result, SocketServerError> { println!( "`AsyncSocketServer` proxy listening on pool size {}", pool.len() @@ -178,7 +171,7 @@ impl AsyncProxyServer for AsyncSocketServer { tasks.push(task); } - Ok(tasks) + Ok(Box::new(Self { pool, tasks })) } } diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index 779aa248..ccb9fd9f 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -17,8 +17,6 @@ pub const USOCK: &str = "usock"; /// "pool-size" pub const POOL_SIZE: &str = "pool-size"; -const DEFAULT_POOL_SIZE: &str = "20"; - /// CLI options for starting up the proxy. #[derive(Default, Clone, Debug, PartialEq)] pub(crate) struct ProxyOpts { @@ -37,7 +35,9 @@ impl ProxyOpts { /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and /// return the new `AsyncPool`. #[cfg(feature = "async_proxy")] - pub(crate) fn async_pool(&self) -> AsyncStreamPool { + pub(crate) fn async_pool( + &self, + ) -> Result { use qos_core::io::{TimeVal, TimeValLike}; let pool_size: u32 = self @@ -54,21 +54,17 @@ impl ProxyOpts { #[cfg(feature = "vm")] (Some(c), Some(p), None) => { let c = c.parse::().unwrap(); - let start_port = p.parse::().unwrap(); + let p = p.parse::().unwrap(); - let addresses = (start_port..start_port + pool_size).map(|p| { - SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS) - }); + let address = + SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS); - AsyncStreamPool::new(addresses) + AsyncStreamPool::new(address, TimeVal::seconds(5), pool_size) } (None, None, Some(u)) => { - let addresses = (0..pool_size).map(|i| { - let u = format!("{u}_{i}"); // add _X suffix for pooling - SocketAddress::new_unix(&u) - }); + let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(addresses, TimeVal::seconds(0)) + AsyncStreamPool::new(address, TimeVal::seconds(0), pool_size) } _ => panic!("Invalid socket opts"), } @@ -153,7 +149,7 @@ impl GetParserForOptions for ProxyParser { ) .takes_value(true) .forbids(vec!["port", "cid"]) - .default_value(DEFAULT_POOL_SIZE), + .default_value("1"), ) } } @@ -194,7 +190,7 @@ mod test { .collect(); let opts = ProxyOpts::new(&mut args); - let pool = opts.async_pool(); + let pool = opts.async_pool().unwrap(); assert_eq!(pool.len(), 7); } From dd57bb9ef716572c665bc6162122e4cd7eb81665 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Tue, 15 Jul 2025 11:21:53 -0700 Subject: [PATCH 14/20] qos_net: remove async_proxy feature and cleanup --- src/Cargo.lock | 1 + src/Makefile | 2 - src/integration/Cargo.toml | 2 +- src/integration/src/bin/pivot_remote_tls.rs | 116 ---- src/integration/src/lib.rs | 2 - src/integration/tests/async_remote_tls.rs | 4 +- src/integration/tests/remote_tls.rs | 111 ---- src/qos_net/Cargo.toml | 11 +- src/qos_net/src/async_cli.rs | 35 -- src/qos_net/src/bin/async_qos_net.rs | 17 - src/qos_net/src/cli.rs | 29 +- src/qos_net/src/error.rs | 4 +- src/qos_net/src/lib.rs | 19 +- src/qos_net/src/main.rs | 7 +- src/qos_net/src/proxy.rs | 563 ------------------ src/qos_net/src/proxy_connection.rs | 209 ------- src/qos_net/src/proxy_stream.rs | 595 -------------------- 17 files changed, 37 insertions(+), 1690 deletions(-) delete mode 100644 src/integration/src/bin/pivot_remote_tls.rs delete mode 100644 src/integration/tests/remote_tls.rs delete mode 100644 src/qos_net/src/async_cli.rs delete mode 100644 src/qos_net/src/bin/async_qos_net.rs delete mode 100644 src/qos_net/src/proxy.rs delete mode 100644 src/qos_net/src/proxy_connection.rs delete mode 100644 src/qos_net/src/proxy_stream.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index 8bc351c1..932c93ca 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2287,6 +2287,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "tokio-rustls", "webpki-roots", ] diff --git a/src/Makefile b/src/Makefile index 50cce34c..abc2369a 100644 --- a/src/Makefile +++ b/src/Makefile @@ -163,8 +163,6 @@ test: cargo build --bin async_qos_core --features async,mock @# We also need the async version of qos_host cargo build --bin async_qos_host --features async - @# We also need the async version of qos_net - cargo build --bin async_qos_net --features async_proxy @# Run tests cargo test @# When we build the workspace it resolves with the qos_core mock feature diff --git a/src/integration/Cargo.toml b/src/integration/Cargo.toml index e5eeb3c8..a1f33695 100644 --- a/src/integration/Cargo.toml +++ b/src/integration/Cargo.toml @@ -9,7 +9,7 @@ qos_core = { path = "../qos_core", features = ["mock"], default-features = false qos_nsm = { path = "../qos_nsm", features = ["mock"], default-features = false } qos_host = { path = "../qos_host", default-features = false } qos_client = { path = "../qos_client", default-features = false } -qos_net = { path = "../qos_net", features = ["proxy", "async_proxy"], default-features = false } +qos_net = { path = "../qos_net", features = ["proxy"], default-features = false } qos_crypto = { path = "../qos_crypto" } qos_hex = { path = "../qos_hex" } qos_p256 = { path = "../qos_p256", features = ["mock"] } diff --git a/src/integration/src/bin/pivot_remote_tls.rs b/src/integration/src/bin/pivot_remote_tls.rs deleted file mode 100644 index 2373d53e..00000000 --- a/src/integration/src/bin/pivot_remote_tls.rs +++ /dev/null @@ -1,116 +0,0 @@ -use core::panic; -use std::{ - io::{ErrorKind, Read, Write}, - sync::Arc, -}; - -use borsh::BorshDeserialize; -use integration::PivotRemoteTlsMsg; -use qos_core::{ - io::{SocketAddress, TimeVal}, - server::{RequestProcessor, SocketServer}, -}; -use qos_net::proxy_stream::ProxyStream; -use rustls::RootCertStore; - -struct Processor { - net_proxy: SocketAddress, -} - -impl Processor { - fn new(proxy_address: String) -> Self { - Processor { net_proxy: SocketAddress::new_unix(&proxy_address) } - } -} - -impl RequestProcessor for Processor { - fn process(&mut self, request: Vec) -> Vec { - let msg = PivotRemoteTlsMsg::try_from_slice(&request) - .expect("Received invalid message - test is broken!"); - - match msg { - PivotRemoteTlsMsg::RemoteTlsRequest { host, path } => { - let timeout = TimeVal::new(1, 0); - let mut stream = ProxyStream::connect_by_name( - &self.net_proxy, - timeout, - host.clone(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ) - .unwrap(); - - let root_store = RootCertStore { - roots: webpki_roots::TLS_SERVER_ROOTS.into(), - }; - - let server_name: rustls::pki_types::ServerName<'_> = - host.clone().try_into().unwrap(); - let config: rustls::ClientConfig = - rustls::ClientConfig::builder() - .with_root_certificates(root_store) - .with_no_client_auth(); - let mut conn = rustls::ClientConnection::new( - Arc::new(config), - server_name, - ) - .unwrap(); - let mut tls = rustls::Stream::new(&mut conn, &mut stream); - - let http_request = - format!("GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n"); - - tls.write_all(http_request.as_bytes()).unwrap(); - - let mut response_bytes = Vec::new(); - let read_to_end_result = tls.read_to_end(&mut response_bytes); - match read_to_end_result { - Ok(read_size) => { - assert!(read_size > 0); - - // Assert the connection isn't closed yet, and close it. - assert!(!stream.is_closed()); - stream.close().expect("unable to close stream"); - assert!(stream.is_closed()); - } - Err(e) => { - // Only EOF errors are expected. This means the - // connection was closed by the remote server https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof - if e.kind() != ErrorKind::UnexpectedEof { - panic!( - "unexpected error trying to read_to_end: {e:?}" - ); - } - } - } - - let fetched_content = - std::str::from_utf8(&response_bytes).unwrap(); - borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsResponse(format!( - "Content fetched successfully: {fetched_content}" - ))) - .expect("RemoteTlsResponse is valid borsh") - } - PivotRemoteTlsMsg::RemoteTlsResponse(_) => { - panic!("Unexpected RemoteTlsResponse - test is broken") - } - } - } -} - -fn main() { - // Parse args: - // - first argument is the socket to bind to (normal server server) - // - second argument is the socket to use for remote proxying - let args: Vec = std::env::args().collect(); - - let socket_path: &String = &args[1]; - let proxy_path: &String = &args[2]; - - SocketServer::listen( - SocketAddress::new_unix(socket_path), - Processor::new(proxy_path.to_string()), - ) - .unwrap(); -} diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index 2b030151..a7db3600 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -40,8 +40,6 @@ pub const PIVOT_ASYNC_REMOTE_TLS_PATH: &str = "../target/debug/pivot_async_remote_tls"; /// Path to an enclave app that has routes to test remote connection features. pub const QOS_NET_PATH: &str = "../target/debug/qos_net"; -/// Path to an enclave app that has routes to test async remote connection features. -pub const ASYNC_QOS_NET_PATH: &str = "../target/debug/async_qos_net"; /// Path to an enclave app that has routes to stress our socket. pub const PIVOT_SOCKET_STRESS_PATH: &str = "../target/debug/pivot_socket_stress"; diff --git a/src/integration/tests/async_remote_tls.rs b/src/integration/tests/async_remote_tls.rs index 0406b369..03d55e95 100644 --- a/src/integration/tests/async_remote_tls.rs +++ b/src/integration/tests/async_remote_tls.rs @@ -2,7 +2,7 @@ use std::{process::Command, time::Duration}; use borsh::BorshDeserialize; use integration::{ - PivotRemoteTlsMsg, ASYNC_QOS_NET_PATH, PIVOT_ASYNC_REMOTE_TLS_PATH, + PivotRemoteTlsMsg, PIVOT_ASYNC_REMOTE_TLS_PATH, QOS_NET_PATH, }; use qos_core::{ async_client::AsyncClient, @@ -20,7 +20,7 @@ const POOL_SIZE: &str = "1"; #[tokio::test] async fn fetch_async_remote_tls_content() { - let _net_proxy: ChildWrapper = Command::new(ASYNC_QOS_NET_PATH) + let _net_proxy: ChildWrapper = Command::new(QOS_NET_PATH) .arg("--usock") .arg(REMOTE_TLS_TEST_NET_PROXY_SOCKET) .arg("--pool-size") diff --git a/src/integration/tests/remote_tls.rs b/src/integration/tests/remote_tls.rs deleted file mode 100644 index 4e57415b..00000000 --- a/src/integration/tests/remote_tls.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::os::unix::net::UnixStream; -use std::time::{Duration, Instant}; -use std::{path::Path, process::Command, str}; - -use borsh::BorshDeserialize; -use integration::{PivotRemoteTlsMsg, PIVOT_REMOTE_TLS_PATH, QOS_NET_PATH}; -use qos_core::{ - client::Client, - io::{SocketAddress, TimeVal, TimeValLike}, - protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, -}; - -use qos_test_primitives::ChildWrapper; - -const REMOTE_TLS_TEST_NET_PROXY_SOCKET: &str = "/tmp/remote_tls_test.net.sock"; -const REMOTE_TLS_TEST_ENCLAVE_SOCKET: &str = - "/tmp/remote_tls_test.enclave.sock"; - -/// Waits for socket at `path` until it becomes ready. -/// If the socket isn't ready after `timeout`, this function panics. -fn wait_for_socket_ready>(path: P, timeout: Duration) { - let start = Instant::now(); - while start.elapsed() < timeout { - match UnixStream::connect(&path) { - Ok(_) => return, // socket is ready - Err(e) => { - // Error while connecting. Retry. - println!( - "[retrying] error while connecting at {}: {}", - path.as_ref().display(), - e - ) - } - } - std::thread::sleep(Duration::from_millis(50)); - } - panic!( - "Unable to connect to {}: timing out after retrying for {} seconds.", - path.as_ref().display(), - timeout.as_secs() - ); -} - -#[test] -fn fetch_remote_tls_content() { - let _net_proxy: ChildWrapper = Command::new(QOS_NET_PATH) - .arg("--usock") - .arg(REMOTE_TLS_TEST_NET_PROXY_SOCKET) - .spawn() - .unwrap() - .into(); - - let _enclave_app: ChildWrapper = Command::new(PIVOT_REMOTE_TLS_PATH) - .arg(REMOTE_TLS_TEST_ENCLAVE_SOCKET) - .arg(REMOTE_TLS_TEST_NET_PROXY_SOCKET) - .spawn() - .unwrap() - .into(); - - let enclave_client = Client::new( - SocketAddress::new_unix(REMOTE_TLS_TEST_ENCLAVE_SOCKET), - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - ); - - let app_request = borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsRequest { - host: "api.turnkey.com".to_string(), - path: "/health".to_string(), - }) - .unwrap(); - - wait_for_socket_ready( - REMOTE_TLS_TEST_NET_PROXY_SOCKET, - Duration::from_secs(2), - ); - wait_for_socket_ready( - REMOTE_TLS_TEST_ENCLAVE_SOCKET, - Duration::from_secs(2), - ); - - let response = enclave_client.send(&app_request).unwrap(); - let response_text = - match PivotRemoteTlsMsg::try_from_slice(&response).unwrap() { - PivotRemoteTlsMsg::RemoteTlsResponse(s) => s, - PivotRemoteTlsMsg::RemoteTlsRequest { host: _, path: _ } => { - panic!("unexpected RemoteTlsRequest sent as response") - } - }; - - assert!(response_text.contains("Content fetched successfully")); - assert!(response_text.contains("HTTP/1.1 200 OK")); - assert!(response_text.contains("currentTime")); - - let app_request = borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsRequest { - host: "www.googleapis.com".to_string(), - path: "/oauth2/v3/certs".to_string(), - }) - .unwrap(); - - let response = enclave_client.send(&app_request).unwrap(); - let response_text = - match PivotRemoteTlsMsg::try_from_slice(&response).unwrap() { - PivotRemoteTlsMsg::RemoteTlsResponse(s) => s, - PivotRemoteTlsMsg::RemoteTlsRequest { host: _, path: _ } => { - panic!("unexpected RemoteTlsRequest sent as response") - } - }; - - assert!(response_text.contains("Content fetched successfully")); - assert!(response_text.contains("HTTP/1.1 200 OK")); - assert!(response_text.contains("keys")); -} diff --git a/src/qos_net/Cargo.toml b/src/qos_net/Cargo.toml index b6713e63..eb6f8813 100644 --- a/src/qos_net/Cargo.toml +++ b/src/qos_net/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -qos_core = { path = "../qos_core", default-features = false } +qos_core = { path = "../qos_core", default-features = false, features = ["async"] } borsh = { version = "1.0", features = [ "std", @@ -21,6 +21,7 @@ rand = { version = "0.9.1", features = [ "thread_rng", ], default-features = false, optional = true } tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time"], default-features = false, optional = true } +tokio-rustls = { version = "0.26.2", optional = true } [dev-dependencies] qos_test_primitives = { path = "../qos_test_primitives" } @@ -34,11 +35,5 @@ webpki-roots = { version = "0.26.1" } [features] default = ["proxy"] # keep this as a default feature ensures we lint by default -async_proxy = ["hickory-resolver", "rand", "tokio", "qos_core/async"] -proxy = ["rand", "hickory-resolver", "tokio"] +proxy = ["hickory-resolver", "rand", "tokio", "qos_core/async", "tokio-rustls"] vm = ["qos_core/vm"] - -[[bin]] -name = "async_qos_net" -path = "src/bin/async_qos_net.rs" -required-features = ["async_proxy"] diff --git a/src/qos_net/src/async_cli.rs b/src/qos_net/src/async_cli.rs deleted file mode 100644 index af96dfb3..00000000 --- a/src/qos_net/src/async_cli.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! Async extension to the CLI -use crate::{ - async_proxy::AsyncProxyServer, - cli::{ProxyOpts, CLI}, -}; - -impl CLI { - /// Execute the enclave proxy CLI with the environment args in an async way. - pub async fn async_execute() { - use qos_core::async_server::AsyncSocketServer; - - let mut args: Vec = std::env::args().collect(); - let opts = ProxyOpts::new(&mut args); - - if opts.parsed.version() { - println!("version: {}", env!("CARGO_PKG_VERSION")); - } else if opts.parsed.help() { - println!("{}", opts.parsed.info()); - } else { - let server = AsyncSocketServer::listen_proxy( - opts.async_pool().expect("unable to create async socket pool"), - ) - .await - .expect("unable to get listen join handles"); - - match tokio::signal::ctrl_c().await { - Ok(_) => { - eprintln!("handling ctrl+c the tokio way"); - server.terminate(); - } - Err(err) => panic!("{err}"), - } - } - } -} diff --git a/src/qos_net/src/bin/async_qos_net.rs b/src/qos_net/src/bin/async_qos_net.rs deleted file mode 100644 index 9d445232..00000000 --- a/src/qos_net/src/bin/async_qos_net.rs +++ /dev/null @@ -1,17 +0,0 @@ -#[cfg(feature = "async_proxy")] -pub fn main() { - use qos_net::cli::CLI; - - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("tokio main to run") - .block_on(async { - CLI::async_execute().await; - }); -} - -#[cfg(not(feature = "async_proxy"))] -pub fn main() { - panic!("async qos_net invoked without async_proxy feature") -} diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index ccb9fd9f..996da064 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -5,9 +5,10 @@ use qos_core::{ parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; -#[cfg(feature = "async_proxy")] use qos_core::io::AsyncStreamPool; +use crate::async_proxy::AsyncProxyServer; + /// "cid" pub const CID: &str = "cid"; /// "port" @@ -34,7 +35,6 @@ impl ProxyOpts { /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and /// return the new `AsyncPool`. - #[cfg(feature = "async_proxy")] pub(crate) fn async_pool( &self, ) -> Result { @@ -98,13 +98,11 @@ impl ProxyOpts { pub struct CLI; impl CLI { - /// Execute the enclave proxy CLI with the environment args. - pub fn execute() { - use crate::proxy::Proxy; - use qos_core::server::SocketServer; - use std::env; + /// Execute the enclave proxy CLI with the environment args in an async way. + pub async fn execute() { + use qos_core::async_server::AsyncSocketServer; - let mut args: Vec = env::args().collect(); + let mut args: Vec = std::env::args().collect(); let opts = ProxyOpts::new(&mut args); if opts.parsed.version() { @@ -112,7 +110,19 @@ impl CLI { } else if opts.parsed.help() { println!("{}", opts.parsed.info()); } else { - SocketServer::listen(opts.addr(), Proxy::new()).unwrap(); + let server = AsyncSocketServer::listen_proxy( + opts.async_pool().expect("unable to create async socket pool"), + ) + .await + .expect("unable to get listen join handles"); + + match tokio::signal::ctrl_c().await { + Ok(_) => { + eprintln!("handling ctrl+c the tokio way"); + server.terminate(); + } + Err(err) => panic!("{err}"), + } } } } @@ -181,7 +191,6 @@ mod test { } #[test] - #[cfg(feature = "async_proxy")] fn parse_pool_size() { let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock", "--pool-size", "7"] diff --git a/src/qos_net/src/error.rs b/src/qos_net/src/error.rs index b0d7c370..d91d6b88 100644 --- a/src/qos_net/src/error.rs +++ b/src/qos_net/src/error.rs @@ -2,7 +2,7 @@ use std::net::AddrParseError; use borsh::{BorshDeserialize, BorshSerialize}; -#[cfg(any(feature = "proxy", feature = "async_proxy"))] +#[cfg(feature = "proxy")] use hickory_resolver::ResolveError; /// Errors related to creating and using proxy connections @@ -60,7 +60,7 @@ impl From for QosNetError { } } -#[cfg(any(feature = "proxy", feature = "async_proxy"))] +#[cfg(feature = "proxy")] impl From for QosNetError { fn from(err: ResolveError) -> Self { let msg = format!("{err:?}"); diff --git a/src/qos_net/src/lib.rs b/src/qos_net/src/lib.rs index 93642876..ea74616b 100644 --- a/src/qos_net/src/lib.rs +++ b/src/qos_net/src/lib.rs @@ -8,21 +8,12 @@ pub mod error; pub mod proxy_msg; -#[cfg(any(feature = "proxy", feature = "async_proxy"))] -pub mod cli; - -#[cfg(feature = "proxy")] -pub mod proxy; #[cfg(feature = "proxy")] -pub mod proxy_connection; -#[cfg(feature = "proxy")] -pub mod proxy_stream; - -#[cfg(feature = "async_proxy")] -pub mod async_cli; -#[cfg(feature = "async_proxy")] pub mod async_proxy; -#[cfg(feature = "async_proxy")] +#[cfg(feature = "proxy")] pub mod async_proxy_connection; -#[cfg(feature = "async_proxy")] +#[cfg(feature = "proxy")] pub mod async_proxy_stream; + +#[cfg(feature = "proxy")] +pub mod cli; diff --git a/src/qos_net/src/main.rs b/src/qos_net/src/main.rs index 2c8da593..f7d97063 100644 --- a/src/qos_net/src/main.rs +++ b/src/qos_net/src/main.rs @@ -1,10 +1,11 @@ #[cfg(feature = "proxy")] -pub fn main() { +#[tokio::main] +pub async fn main() { use qos_net::cli::CLI; - CLI::execute(); + CLI::execute().await; } -#[cfg(not(any(feature = "proxy", feature = "async_proxy")))] +#[cfg(not(any(feature = "proxy")))] pub fn main() { panic!("Cannot run qos_net CLI without proxy feature enabled") } diff --git a/src/qos_net/src/proxy.rs b/src/qos_net/src/proxy.rs deleted file mode 100644 index 9dcf2fce..00000000 --- a/src/qos_net/src/proxy.rs +++ /dev/null @@ -1,563 +0,0 @@ -//! Protocol proxy for our remote QOS net proxy -use std::{ - collections::HashMap, - io::{Read, Write}, -}; - -use borsh::BorshDeserialize; -use qos_core::server; - -use crate::{ - error::QosNetError, - proxy_connection::{self, ProxyConnection}, - proxy_msg::ProxyMsg, -}; -use rand::Rng; -use tokio::runtime::Runtime; - -const MEGABYTE: usize = 1024 * 1024; -const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; - -pub const DEFAULT_MAX_CONNECTION_SIZE: usize = 512; - -/// Socket<>TCP proxy to enable remote connections -pub struct Proxy { - connections: HashMap, - max_connections: usize, - tokio_runtime_context: Runtime, -} - -impl Default for Proxy { - fn default() -> Self { - Self::new() - } -} - -impl Proxy { - /// Create a new `Self`. - /// # Panics - /// Panics if Tokio setup fails - #[must_use] - pub fn new() -> Self { - Self { - connections: HashMap::new(), - max_connections: DEFAULT_MAX_CONNECTION_SIZE, - tokio_runtime_context: Runtime::new() - .expect("Failed to create tokio runtime"), - } - } - - #[must_use] - /// # Panics - /// Panics if Tokio setup fails - pub fn new_with_max_connections(max_connections: usize) -> Self { - Self { - connections: HashMap::new(), - max_connections, - tokio_runtime_context: Runtime::new() - .expect("Failed to create tokio runtime"), - } - } - - /// Save the connection in the proxy and assigns a connection ID - fn save_connection( - &mut self, - connection: ProxyConnection, - ) -> Result { - if self.connections.len() >= self.max_connections { - return Err(QosNetError::TooManyConnections(self.max_connections)); - } - let connection_id = self.next_id(); - if self.connections.contains_key(&connection_id) { - // This should never happen because "next_id" auto-increments - // Still, out of an abundance of caution, we error out here. - return Err(QosNetError::DuplicateConnectionId(connection_id)); - } - - match self.connections.insert(connection_id, connection) { - // Should never, ever happen because we checked above that the connection id was not present before proceeding. - // We explicitly handle this case here out of paranoia. If this happens, it means saving this connection - // overrode another. This is _very_ concerning. - Some(_) => Err(QosNetError::ConnectionOverridden(connection_id)), - // Normal case: no value was present before - None => Ok(connection_id), - } - } - - // Simple convenience method to get the next connection ID - // We use a simple strategy here: pick a random u128. - fn next_id(&mut self) -> u128 { - rand::rng().random::() - } - - fn remove_connection(&mut self, id: u128) -> Result<(), QosNetError> { - match self.get_connection(id) { - Some(_) => { - self.connections.remove(&id); - Ok(()) - } - None => Err(QosNetError::ConnectionIdNotFound(id)), - } - } - - fn get_connection(&mut self, id: u128) -> Option<&mut ProxyConnection> { - self.connections.get_mut(&id) - } - - /// Close a connection by its ID - pub fn close(&mut self, connection_id: u128) -> ProxyMsg { - match self.shutdown_and_remove_connection(connection_id) { - Ok(_) => ProxyMsg::CloseResponse { connection_id }, - Err(e) => ProxyMsg::ProxyError(e), - } - } - - fn shutdown_and_remove_connection( - &mut self, - id: u128, - ) -> Result<(), QosNetError> { - let conn = self - .get_connection(id) - .ok_or(QosNetError::ConnectionIdNotFound(id))?; - conn.shutdown()?; - self.remove_connection(id) - } - - /// Return the number of open remote connections - pub fn num_connections(&self) -> usize { - self.connections.len() - } - - /// Create a new connection by resolving a name into an IP - /// address. The TCP connection is opened and saved in internal state. - pub fn connect_by_name( - &mut self, - hostname: String, - port: u16, - dns_resolvers: Vec, - dns_port: u16, - ) -> ProxyMsg { - match proxy_connection::ProxyConnection::new_from_name( - hostname.clone(), - port, - dns_resolvers.clone(), - dns_port, - &self.tokio_runtime_context, - ) { - Ok(conn) => { - let remote_ip = conn.ip.clone(); - match self.save_connection(conn) { - Ok(connection_id) => { - println!( - "Connection to {hostname} established and saved as ID {connection_id}" - ); - ProxyMsg::ConnectResponse { connection_id, remote_ip } - } - Err(e) => { - println!("error saving connection: {e:?}"); - ProxyMsg::ProxyError(e) - } - } - } - Err(e) => { - println!("error while establishing connection: {e:?}"); - ProxyMsg::ProxyError(e) - } - } - } - - /// Create a new connection, targeting an IP address directly. - /// address. The TCP connection is opened and saved in internal state. - pub fn connect_by_ip(&mut self, ip: String, port: u16) -> ProxyMsg { - match proxy_connection::ProxyConnection::new_from_ip(ip.clone(), port) { - Ok(conn) => { - let remote_ip = conn.ip.clone(); - match self.save_connection(conn) { - Ok(connection_id) => { - println!("Connection to {ip} established and saved as ID {connection_id}"); - ProxyMsg::ConnectResponse { connection_id, remote_ip } - } - Err(e) => { - println!("error saving connection: {e:?}"); - ProxyMsg::ProxyError(e) - } - } - } - Err(e) => { - println!("error while establishing connection: {e:?}"); - ProxyMsg::ProxyError(e) - } - } - } - - /// Performs a Read on a connection - pub fn read(&mut self, connection_id: u128, size: usize) -> ProxyMsg { - if let Some(conn) = self.get_connection(connection_id) { - let mut buf: Vec = vec![0; size]; - match conn.read(&mut buf) { - Ok(0) => { - // A zero-sized read indicates a successful/graceful - // connection close. Close it on our side as well. - match self.shutdown_and_remove_connection(connection_id) { - Ok(_) => ProxyMsg::ReadResponse { - connection_id, - data: buf, - size: 0, - }, - Err(e) => ProxyMsg::ProxyError(e) - } - } - Ok(size) => { - ProxyMsg::ReadResponse { connection_id, data: buf, size } - } - Err(e) => match self.shutdown_and_remove_connection(connection_id) { - Ok(_) => ProxyMsg::ProxyError(e.into()), - // If we fail to shutdown / remove the connection we have 2 errors to communicate back up: the read error - // and the close error. We combine them under a single `IOError`, in the message. - Err(close_err) => ProxyMsg::ProxyError( - QosNetError::IOError( - format!( - "unable to read from connection: {}. Warning: unable to cleanly close to underlying connection: {:?}", - e, - close_err, - ) - ) - ), - } - } - } else { - ProxyMsg::ProxyError(QosNetError::ConnectionIdNotFound( - connection_id, - )) - } - } - - /// Performs a Write on an existing connection - pub fn write(&mut self, connection_id: u128, data: Vec) -> ProxyMsg { - if let Some(conn) = self.get_connection(connection_id) { - match conn.write(&data) { - Ok(size) => ProxyMsg::WriteResponse { connection_id, size }, - Err(e) => ProxyMsg::ProxyError(e.into()), - } - } else { - ProxyMsg::ProxyError(QosNetError::ConnectionIdNotFound( - connection_id, - )) - } - } - - /// Performs a Flush on an existing TCP connection - pub fn flush(&mut self, connection_id: u128) -> ProxyMsg { - if let Some(conn) = self.get_connection(connection_id) { - match conn.flush() { - Ok(_) => ProxyMsg::FlushResponse { connection_id }, - Err(e) => ProxyMsg::ProxyError(e.into()), - } - } else { - ProxyMsg::ProxyError(QosNetError::ConnectionIdNotFound( - connection_id, - )) - } - } -} - -impl server::RequestProcessor for Proxy { - fn process(&mut self, req_bytes: Vec) -> Vec { - if req_bytes.len() > MAX_ENCODED_MSG_LEN { - return borsh::to_vec(&ProxyMsg::ProxyError( - QosNetError::OversizedPayload, - )) - .expect("ProtocolMsg can always be serialized. qed."); - } - - let resp = match ProxyMsg::try_from_slice(&req_bytes) { - Ok(req) => match req { - ProxyMsg::StatusRequest => { - ProxyMsg::StatusResponse(self.connections.len()) - } - ProxyMsg::ConnectByNameRequest { - hostname, - port, - dns_resolvers, - dns_port, - } => self.connect_by_name( - hostname.clone(), - port, - dns_resolvers, - dns_port, - ), - ProxyMsg::ConnectByIpRequest { ip, port } => { - self.connect_by_ip(ip, port) - } - ProxyMsg::CloseRequest { connection_id } => { - self.close(connection_id) - } - ProxyMsg::ReadRequest { connection_id, size } => { - self.read(connection_id, size) - } - ProxyMsg::WriteRequest { connection_id, data } => { - self.write(connection_id, data) - } - ProxyMsg::FlushRequest { connection_id } => { - self.flush(connection_id) - } - ProxyMsg::ProxyError(_) => { - ProxyMsg::ProxyError(QosNetError::InvalidMsg) - } - ProxyMsg::StatusResponse(_) => { - ProxyMsg::ProxyError(QosNetError::InvalidMsg) - } - ProxyMsg::ConnectResponse { - connection_id: _, - remote_ip: _, - } => ProxyMsg::ProxyError(QosNetError::InvalidMsg), - ProxyMsg::CloseResponse { connection_id: _ } => { - ProxyMsg::ProxyError(QosNetError::InvalidMsg) - } - ProxyMsg::WriteResponse { connection_id: _, size: _ } => { - ProxyMsg::ProxyError(QosNetError::InvalidMsg) - } - ProxyMsg::FlushResponse { connection_id: _ } => { - ProxyMsg::ProxyError(QosNetError::InvalidMsg) - } - ProxyMsg::ReadResponse { - connection_id: _, - size: _, - data: _, - } => ProxyMsg::ProxyError(QosNetError::InvalidMsg), - }, - Err(_) => ProxyMsg::ProxyError(QosNetError::InvalidMsg), - }; - - borsh::to_vec(&resp) - .expect("Protocol message can always be serialized. qed!") - } -} - -#[cfg(test)] -mod test { - use std::str::from_utf8; - - use server::RequestProcessor; - - use super::*; - - #[test] - fn simple_status_request() { - let mut proxy = Proxy::new(); - let request = borsh::to_vec(&ProxyMsg::StatusRequest).unwrap(); - let response = proxy.process(request); - let msg = ProxyMsg::try_from_slice(&response).unwrap(); - assert_eq!(msg, ProxyMsg::StatusResponse(0)); - } - - #[test] - fn fetch_plaintext_http_from_api_turnkey_com() { - let mut proxy = Proxy::new(); - assert_eq!(proxy.num_connections(), 0); - - let request = borsh::to_vec(&ProxyMsg::ConnectByNameRequest { - hostname: "api.turnkey.com".to_string(), - port: 443, - dns_resolvers: vec!["8.8.8.8".to_string()], - dns_port: 53, - }) - .unwrap(); - let response = proxy.process(request); - let msg = ProxyMsg::try_from_slice(&response).unwrap(); - let connection_id = match msg { - ProxyMsg::ConnectResponse { connection_id, remote_ip: _ } => { - connection_id - } - _ => { - panic!("test failure: msg is not ConnectResponse") - } - }; - let http_request = - "GET / HTTP/1.1\r\nHost: api.turnkey.com\r\nConnection: close\r\n\r\n".to_string(); - - let request = borsh::to_vec(&ProxyMsg::WriteRequest { - connection_id, - data: http_request.as_bytes().to_vec(), - }) - .unwrap(); - let response = proxy.process(request); - let msg: ProxyMsg = ProxyMsg::try_from_slice(&response).unwrap(); - assert!(matches!( - msg, - ProxyMsg::WriteResponse { connection_id: _, size: _ } - )); - - // Check that we now have an active connection - assert_eq!(proxy.num_connections(), 1); - - let request = - borsh::to_vec(&ProxyMsg::ReadRequest { connection_id, size: 512 }) - .unwrap(); - let response = proxy.process(request); - let msg: ProxyMsg = ProxyMsg::try_from_slice(&response).unwrap(); - let data = match msg { - ProxyMsg::ReadResponse { connection_id: _, size: _, data } => data, - _ => { - panic!("test failure: msg is not ReadResponse") - } - }; - - let response = from_utf8(&data).unwrap(); - assert!(response.contains("HTTP/1.1 400 Bad Request")); - assert!(response.contains("plain HTTP request was sent to HTTPS port")); - } - - #[test] - fn error_when_connection_limit_is_reached() { - let mut proxy = Proxy::new_with_max_connections(2); - - let connect1 = proxy.connect_by_ip("8.8.8.8".to_string(), 53); - assert!(matches!( - connect1, - ProxyMsg::ConnectResponse { connection_id: _, remote_ip: _ } - )); - assert_eq!(proxy.num_connections(), 1); - - let connect2 = proxy.connect_by_ip("8.8.8.8".to_string(), 53); - assert!(matches!( - connect2, - ProxyMsg::ConnectResponse { connection_id: _, remote_ip: _ } - )); - assert_eq!(proxy.num_connections(), 2); - - let connect3 = proxy.connect_by_ip("8.8.8.8".to_string(), 53); - assert!(matches!( - connect3, - ProxyMsg::ProxyError(QosNetError::TooManyConnections(2)) - )); - } - - #[test] - fn closes_connections() { - let mut proxy = Proxy::new_with_max_connections(2); - - let connect = proxy.connect_by_ip("1.1.1.1".to_string(), 53); - assert_eq!(proxy.num_connections(), 1); - - match connect { - ProxyMsg::ConnectResponse { connection_id, remote_ip: _ } => { - assert_eq!( - proxy.close(connection_id), - ProxyMsg::CloseResponse { connection_id } - ); - assert_eq!(proxy.num_connections(), 0) - } - _ => panic!( - "test failure: expected ConnectResponse and got: {connect:?}" - ), - } - } - - /// Check how the upstream resolver deals with a known-bad DNSSEC protected domain - /// It does NOT actively test the security behavior of our local DNSSEC verification - #[test] - fn test_lookup_domain_bad_dnssec_record() { - let mut proxy = Proxy::new(); - assert_eq!(proxy.num_connections(), 0); - - let connect = proxy.connect_by_name( - "sigfail.ippacket.stream".to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ); - - assert_eq!(proxy.num_connections(), 0); - match connect { - ProxyMsg::ProxyError(qos_error) => { - // the upstream resolver lets us know with SERVFAIL that a DNSSEC check failed - assert_eq!( - qos_error, - QosNetError::DNSResolutionError("ResolveError { kind: Proto(ProtoError { kind: Message(\"\ - could not validate negative response missing SOA\") }) }".to_string()) - ); - } - _ => { - panic!("test failure: the resolution should fail: {connect:?}") - } - } - - // test domain seen in https://bind9.readthedocs.io/en/v9.18.14/dnssec-guide.html - let connect = proxy.connect_by_name( - "www.dnssec-failed.org".to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ); - - assert_eq!(proxy.num_connections(), 0); - match connect { - ProxyMsg::ProxyError(qos_error) => { - // the upstream resolver lets us know with SERVFAIL that a DNSSEC check failed - assert_eq!( - qos_error, - QosNetError::DNSResolutionError("ResolveError { kind: Proto(ProtoError { kind: Message(\"\ - could not validate negative response missing SOA\") }) }".to_string()) - ); - } - _ => { - panic!("test failure: the resolution should fail: {connect:?}") - } - } - } - - #[test] - /// Check how the upstream resolver deals with a known-good DNSSEC protected domain - /// It does NOT actively test the security behavior of our local DNSSEC verification - fn test_lookup_domain_good_dnssec_record() { - let mut proxy = Proxy::new(); - assert_eq!(proxy.num_connections(), 0); - - let connect = proxy.connect_by_name( - "sigok.ippacket.stream".to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ); - - assert_eq!(proxy.num_connections(), 1); - match connect { - ProxyMsg::ConnectResponse { connection_id: _, remote_ip } => { - assert_eq!(remote_ip, "195.201.14.36"); - } - _ => { - panic!( - "test failure: the resolution should succeed: {connect:?}" - ) - } - } - } - - /// Test that resolving a domain without DNSSEC records still works - #[test] - fn test_lookup_domain_no_dnssec_successful() { - let mut proxy = Proxy::new(); - assert_eq!(proxy.num_connections(), 0); - - // as of 6/2025, google.com doesn't have DNSSEC records - let connect = proxy.connect_by_name( - "google.com".to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ); - - assert_eq!(proxy.num_connections(), 1); - match connect { - ProxyMsg::ConnectResponse { connection_id: _, remote_ip: _ } => { - // any normal response is OK, we don't expect a specific IP - } - _ => { - panic!( - "test failure: the resolution should succeed: {connect:?}" - ) - } - } - } -} diff --git a/src/qos_net/src/proxy_connection.rs b/src/qos_net/src/proxy_connection.rs deleted file mode 100644 index 86dc8c1d..00000000 --- a/src/qos_net/src/proxy_connection.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! Contains logic for remote connection establishment: DNS resolution and TCP -//! connection. -use crate::error::QosNetError; -use hickory_resolver::name_server::TokioConnectionProvider; -use hickory_resolver::{ - config::{NameServerConfigGroup, ResolverConfig, ResolverOpts}, - Resolver, -}; -use std::{ - io::{Read, Write}, - net::{AddrParseError, IpAddr, SocketAddr, TcpStream}, -}; -use tokio::runtime::Runtime; - -/// Struct representing a TCP connection held on our proxy -pub struct ProxyConnection { - /// IP address of the remote host - pub ip: String, - /// TCP stream object - tcp_stream: TcpStream, -} - -impl ProxyConnection { - /// Create a new `ProxyConnection` from a name. This results in a DNS - /// request + TCP connection - pub fn new_from_name( - hostname: String, - port: u16, - dns_resolvers: Vec, - dns_port: u16, - tokio_runtime_context: &Runtime, - ) -> Result { - let ip = resolve_hostname( - hostname, - dns_resolvers, - dns_port, - tokio_runtime_context, - )?; - let tcp_addr = SocketAddr::new(ip, port); - let tcp_stream = TcpStream::connect(tcp_addr)?; - - Ok(ProxyConnection { ip: ip.to_string(), tcp_stream }) - } - - /// Create a new `ProxyConnection` from an IP address. This results in a - /// new TCP connection - pub fn new_from_ip( - ip: String, - port: u16, - ) -> Result { - let ip_addr = ip.parse()?; - let tcp_addr = SocketAddr::new(ip_addr, port); - let tcp_stream = TcpStream::connect(tcp_addr)?; - - Ok(ProxyConnection { ip, tcp_stream }) - } - - /// Closes the underlying TCP connection (`Shutdown::Both`) - pub fn shutdown(&mut self) -> Result<(), QosNetError> { - if let Err(e) = self.tcp_stream.shutdown(std::net::Shutdown::Both) { - if e.kind() == std::io::ErrorKind::NotConnected { - return Ok(()); - } - return Err(QosNetError::from(e)); - } - Ok(()) - } -} - -impl Read for ProxyConnection { - fn read(&mut self, buf: &mut [u8]) -> Result { - self.tcp_stream.read(buf) - } -} - -impl Write for ProxyConnection { - fn write(&mut self, buf: &[u8]) -> Result { - self.tcp_stream.write(buf) - } - fn flush(&mut self) -> std::io::Result<()> { - self.tcp_stream.flush() - } -} - -// Resolve a name into an IP address -fn resolve_hostname( - hostname: String, - resolver_addrs: Vec, - port: u16, - tokio_runtime_context: &Runtime, -) -> Result { - let resolver_parsed_addrs = resolver_addrs - .iter() - .map(|resolver_address| { - let ip_addr: Result = - resolver_address.parse(); - ip_addr - }) - .collect::, AddrParseError>>()?; - - let resolver_config = ResolverConfig::from_parts( - None, - vec![], - NameServerConfigGroup::from_ips_clear( - &resolver_parsed_addrs, - port, - true, - ), - ); - - let mut resolver_builder = Resolver::builder_with_config( - resolver_config, - TokioConnectionProvider::default(), - ); - let mut resolver_options = ResolverOpts::default(); - // this validates DNSSEC in responses if DNSSEC is present - // it still allows responses without DNSSEC to succeed, limiting its effectiveness - // against on-path MITM attackers, but is preferrable to not checking response validity - resolver_options.validate = true; - - // enable case randomization for improved security - // see https://developers.google.com/speed/public-dns/docs/security#randomize_case - resolver_options.case_randomization = true; - - // set our improved resolver options - *resolver_builder.options_mut() = resolver_options; - - let resolver = resolver_builder.build(); - - // needed for borrowing in async block - let cloned_hostname = hostname.clone(); - let response = tokio_runtime_context - .block_on(async move { resolver.lookup_ip(cloned_hostname).await }) - .map_err(QosNetError::from)?; - - response.iter().next().ok_or_else(|| { - QosNetError::DNSResolutionError(format!( - "Empty response when querying for host {hostname}" - )) - }) -} - -#[cfg(test)] -mod test { - - use std::{ - io::{ErrorKind, Read, Write}, - sync::Arc, - }; - - use rustls::{RootCertStore, SupportedCipherSuite}; - - use super::*; - - #[test] - fn can_fetch_tls_content_with_proxy_connection() { - let host = "api.turnkey.com"; - let path = "/health"; - - // manually set up a Tokio runtime - let runtime = Runtime::new().expect("Failed to create tokio runtime"); - - let mut remote_connection = ProxyConnection::new_from_name( - host.to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - &runtime, - ) - .unwrap(); - - drop(runtime); - - let root_store = - RootCertStore { roots: webpki_roots::TLS_SERVER_ROOTS.into() }; - - let server_name: rustls::pki_types::ServerName<'_> = - host.try_into().unwrap(); - let config: rustls::ClientConfig = rustls::ClientConfig::builder() - .with_root_certificates(root_store) - .with_no_client_auth(); - let mut conn = - rustls::ClientConnection::new(Arc::new(config), server_name) - .unwrap(); - let mut tls = rustls::Stream::new(&mut conn, &mut remote_connection); - - let http_request = format!( - "GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n" - ); - - tls.write_all(http_request.as_bytes()).unwrap(); - let ciphersuite = tls.conn.negotiated_cipher_suite().unwrap(); - assert!(matches!(ciphersuite, SupportedCipherSuite::Tls13(_))); - - let mut response_bytes = Vec::new(); - let read_to_end_result = tls.read_to_end(&mut response_bytes); - - // Ignore eof errors: https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof - assert!( - read_to_end_result.is_ok() - || (read_to_end_result - .is_err_and(|e| e.kind() == ErrorKind::UnexpectedEof)) - ); - - let response_text = std::str::from_utf8(&response_bytes).unwrap(); - assert!(response_text.contains("HTTP/1.1 200 OK")); - assert!(response_text.contains("currentTime")); - } -} diff --git a/src/qos_net/src/proxy_stream.rs b/src/qos_net/src/proxy_stream.rs deleted file mode 100644 index 490206ff..00000000 --- a/src/qos_net/src/proxy_stream.rs +++ /dev/null @@ -1,595 +0,0 @@ -//! Contains an abstraction to implement the standard library's Read/Write -//! traits with `ProxyMsg`s. -use std::io::{ErrorKind, Read, Write}; - -use borsh::BorshDeserialize; -use qos_core::io::{SocketAddress, Stream, TimeVal}; - -use crate::{error::QosNetError, proxy_msg::ProxyMsg}; - -/// Struct representing a remote connection -/// This is going to be used by enclaves, on the other side of a socket -pub struct ProxyStream { - /// socket address to create the underlying `Stream` over which we send - /// `ProxyMsg`s - addr: SocketAddress, - /// timeout to create the underlying `Stream` - timeout: TimeVal, - /// Whether the underlying stream has been closed - is_closed: bool, - /// Once a connection is established (successful `ConnectByName` or - /// `ConnectByIp` request), this connection ID is set to the u32 in - /// `ConnectResponse`. - pub connection_id: u128, - /// The remote host this connection points to - pub remote_hostname: Option, - /// The remote IP this connection points to - pub remote_ip: String, -} - -impl ProxyStream { - /// Create a new ProxyStream by targeting a hostname - /// - /// # Arguments - /// - /// * `addr` - the USOCK or VSOCK to connect to (this socket should be bound - /// to a qos_net proxy) `timeout` is the timeout applied to the socket - /// * `timeout` - the timeout to connect with - /// * `hostname` - the hostname to connect to (the remote qos_net proxy will - /// resolve DNS) - /// * `port` - the port the remote qos_net proxy should connect to - /// (typically: 80 or 443 for http/https) - /// * `dns_resolvers` - array of resolvers to use to resolve `hostname` - /// * `dns_port` - DNS port to use while resolving DNS (typically: 53 or - /// 853) - pub fn connect_by_name( - addr: &SocketAddress, - timeout: TimeVal, - hostname: String, - port: u16, - dns_resolvers: Vec, - dns_port: u16, - ) -> Result { - let stream = Stream::connect(addr, timeout)?; - let req = borsh::to_vec(&ProxyMsg::ConnectByNameRequest { - hostname: hostname.clone(), - port, - dns_resolvers, - dns_port, - }) - .expect("ProtocolMsg can always be serialized."); - stream.send(&req)?; - let resp_bytes = stream.recv()?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::ConnectResponse { connection_id, remote_ip } => { - Ok(Self { - addr: addr.clone(), - timeout, - connection_id, - remote_ip, - remote_hostname: Some(hostname), - is_closed: false, - }) - } - _ => Err(QosNetError::InvalidMsg), - }, - Err(_) => Err(QosNetError::InvalidMsg), - } - } - - /// Create a new ProxyStream by targeting an IP address directly. - /// - /// # Arguments - /// * `addr` - the USOCK or VSOCK to connect to (this socket should be bound - /// to a qos_net proxy) `timeout` is the timeout applied to the socket - /// * `timeout` - the timeout to connect with - /// * `ip` - the IP the remote qos_net proxy should connect to - /// * `port` - the port the remote qos_net proxy should connect to - /// (typically: 80 or 443 for http/https) - pub fn connect_by_ip( - addr: &SocketAddress, - timeout: TimeVal, - ip: String, - port: u16, - ) -> Result { - let stream: Stream = Stream::connect(addr, timeout)?; - let req = borsh::to_vec(&ProxyMsg::ConnectByIpRequest { ip, port }) - .expect("ProtocolMsg can always be serialized."); - stream.send(&req)?; - let resp_bytes = stream.recv()?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::ConnectResponse { connection_id, remote_ip } => { - Ok(Self { - addr: addr.clone(), - timeout, - connection_id, - remote_ip, - remote_hostname: None, - is_closed: false, - }) - } - _ => Err(QosNetError::InvalidMsg), - }, - Err(_) => Err(QosNetError::InvalidMsg), - } - } - - /// Close the remote connection - pub fn close(&mut self) -> Result<(), QosNetError> { - if self.is_closed() { - return Ok(()); - } - - let stream: Stream = Stream::connect(&self.addr, self.timeout)?; - let req = borsh::to_vec(&ProxyMsg::CloseRequest { - connection_id: self.connection_id, - }) - .expect("ProtocolMsg can always be serialized."); - stream.send(&req)?; - let resp_bytes = stream.recv()?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::CloseResponse { connection_id: _ } => { - self.is_closed = true; - Ok(()) - } - ProxyMsg::ProxyError(e) => Err(e), - _ => Err(QosNetError::InvalidMsg), - }, - Err(_) => Err(QosNetError::InvalidMsg), - } - } - - /// Getter function for the internal `is_closed` boolean. Call `.close()` to close the underlying connection. - pub fn is_closed(&self) -> bool { - self.is_closed - } -} - -impl Read for ProxyStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let stream: Stream = Stream::connect(&self.addr, self.timeout) - .map_err(|e| { - std::io::Error::new( - ErrorKind::NotConnected, - format!("Error while connecting to socket (sending read request): {:?}", e), - ) - })?; - - let req = borsh::to_vec(&ProxyMsg::ReadRequest { - connection_id: self.connection_id, - size: buf.len(), - }) - .expect("ProtocolMsg can always be serialized."); - stream.send(&req).map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError: {:?}", e), - ) - })?; - let resp_bytes = stream.recv().map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError: {:?}", e), - ) - })?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::ReadResponse { connection_id: _, size, data } => { - if data.len() > buf.len() { - return Err(std::io::Error::new( - ErrorKind::InvalidData, - format!( - "overflow: cannot read {} bytes into a buffer of {} bytes", - data.len(), - buf.len() - ), - )); - } - - // Copy data into buffer - for (i, b) in data.iter().enumerate() { - buf[i] = *b - } - - // A 0-sized read means that the remote server has closed the connection gracefully - // If this happens we're clear to consider this stream closed. - if size == 0 { - self.is_closed = true; - } - - Ok(size) - } - ProxyMsg::ProxyError(e) => Err(std::io::Error::new( - ErrorKind::InvalidData, - format!("Proxy error: {e:?}"), - )), - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } -} - -impl Write for ProxyStream { - fn write(&mut self, buf: &[u8]) -> Result { - let stream: Stream = Stream::connect(&self.addr, self.timeout) - .map_err(|e| { - std::io::Error::new( - ErrorKind::NotConnected, - format!("Error while connecting to socket (sending read request): {:?}", e), - ) - })?; - - let req = borsh::to_vec(&ProxyMsg::WriteRequest { - connection_id: self.connection_id, - data: buf.to_vec(), - }) - .expect("ProtocolMsg can always be serialized."); - stream.send(&req).map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError sending WriteRequest: {:?}", e), - ) - })?; - - let resp_bytes = stream.recv().map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError receiving bytes from stream after WriteRequest: {:?}", e), - ) - })?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::WriteResponse { connection_id: _, size } => { - if size == 0 { - return Err(std::io::Error::new( - ErrorKind::Interrupted, - "Write failed: 0 bytes written", - )); - } - Ok(size) - } - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } - - fn flush(&mut self) -> Result<(), std::io::Error> { - let stream: Stream = Stream::connect(&self.addr, self.timeout) - .map_err(|e| { - std::io::Error::new( - ErrorKind::NotConnected, - format!("Error while connecting to socket (sending read request): {:?}", e), - ) - })?; - - let req = borsh::to_vec(&ProxyMsg::FlushRequest { - connection_id: self.connection_id, - }) - .expect("ProtocolMsg can always be serialized."); - - stream.send(&req).map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError sending FlushRequest: {:?}", e), - ) - })?; - - let resp_bytes = stream.recv().map_err(|e| { - std::io::Error::new( - ErrorKind::Other, - format!("QOS IOError receiving bytes from stream after FlushRequest: {:?}", e), - ) - })?; - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::FlushResponse { connection_id: _ } => Ok(()), - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } -} - -/// Implements drop. Clients are expected to call `close()` manually if error handling is needed. -/// Otherwise this implementation will catch non-closed connections and forcefully close them on drop. -impl Drop for ProxyStream { - fn drop(&mut self) { - if !self.is_closed() { - self.close().expect("unable to close the connection cleanly") - } - } -} - -#[cfg(test)] -mod test { - - use std::{io::ErrorKind, sync::Arc}; - - use chunked_transfer::Decoder; - use httparse::Response; - use qos_core::server::RequestProcessor; - use rustls::{RootCertStore, SupportedCipherSuite}; - use serde_json::Value; - - use super::*; - use crate::proxy::Proxy; - - #[test] - fn can_fetch_and_parse_chunked_json_over_tls_with_local_stream() { - let host = "www.googleapis.com"; - let path = "/oauth2/v3/certs"; - - let mut stream = LocalStream::new_by_name( - host.to_string(), - 443, - vec!["8.8.8.8".to_string()], - 53, - ) - .unwrap(); - assert_eq!(stream.num_connections(), 1); - - assert_eq!( - stream.remote_hostname, - Some("www.googleapis.com".to_string()) - ); - - let root_store = - RootCertStore { roots: webpki_roots::TLS_SERVER_ROOTS.into() }; - - let server_name: rustls::pki_types::ServerName<'_> = - host.try_into().unwrap(); - let config: rustls::ClientConfig = rustls::ClientConfig::builder() - .with_root_certificates(root_store) - .with_no_client_auth(); - let mut conn = - rustls::ClientConnection::new(Arc::new(config), server_name) - .unwrap(); - let mut tls = rustls::Stream::new(&mut conn, &mut stream); - - let http_request = format!( - "GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n" - ); - - tls.write_all(http_request.as_bytes()).unwrap(); - let ciphersuite = tls.conn.negotiated_cipher_suite().unwrap(); - assert!(matches!(ciphersuite, SupportedCipherSuite::Tls13(_))); - - let mut response_bytes = Vec::new(); - let read_to_end_result = tls.read_to_end(&mut response_bytes); - - match read_to_end_result { - Ok(read_size) => { - assert!(read_size > 0); - // Close the connection - let closed = stream.close(); - assert!(closed.is_ok()); - } - Err(e) => { - // Only EOF errors are expected. This means the connection was - // closed by the remote server https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof - assert_eq!(e.kind(), ErrorKind::UnexpectedEof) - } - } - // We should be at 0 connections in our proxy: either the remote - // auto-closed (UnexpectedEof), or we did. - assert_eq!(stream.num_connections(), 0); - - // Parse headers with httparse - let mut headers = [httparse::EMPTY_HEADER; 16]; - let mut response = Response::new(&mut headers); - let res = httparse::ParserConfig::default() - .parse_response(&mut response, &response_bytes); - assert!(matches!(res, Ok(httparse::Status::Complete(..)))); - assert_eq!(response.code, Some(200)); - let header_byte_size = res.unwrap().unwrap(); - - // Assert that the response is chunk-encoded - let transfer_encoding_header = - response.headers.iter().find(|h| h.name == "Transfer-Encoding"); - assert!(transfer_encoding_header.is_some()); - assert_eq!( - transfer_encoding_header.unwrap().value, - "chunked".as_bytes() - ); - - // Decode the chunked content - let mut decoded = String::new(); - let mut decoder = Decoder::new(&response_bytes[header_byte_size..]); - let res = decoder.read_to_string(&mut decoded); - assert!(res.is_ok()); - - // Parse the JSON response body and make sure there is a proper "keys" - // array in it - let json_content: Value = serde_json::from_str(&decoded).unwrap(); - assert!(json_content["keys"].is_array()); - } - - /// Struct representing a stream, with direct access to the proxy. - /// Useful in tests! :) - struct LocalStream { - proxy: Box, - pub connection_id: u128, - pub remote_hostname: Option, - } - - impl LocalStream { - pub fn new_by_name( - hostname: String, - port: u16, - dns_resolvers: Vec, - dns_port: u16, - ) -> Result { - let req = borsh::to_vec(&ProxyMsg::ConnectByNameRequest { - hostname: hostname.clone(), - port, - dns_resolvers, - dns_port, - }) - .expect("ProtocolMsg can always be serialized."); - let mut proxy = Box::new(Proxy::new()); - let resp_bytes = proxy.process(req); - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::ConnectResponse { - connection_id, - remote_ip: _, - } => Ok(Self { - proxy, - connection_id, - remote_hostname: Some(hostname), - }), - _ => Err(QosNetError::InvalidMsg), - }, - Err(_) => Err(QosNetError::InvalidMsg), - } - } - - pub fn close(&mut self) -> Result<(), QosNetError> { - match self.proxy.close(self.connection_id) { - ProxyMsg::CloseResponse { connection_id: _ } => Ok(()), - _ => Err(QosNetError::InvalidMsg), - } - } - - pub fn num_connections(&self) -> usize { - self.proxy.num_connections() - } - } - - impl Read for LocalStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let req = borsh::to_vec(&ProxyMsg::ReadRequest { - connection_id: self.connection_id, - size: buf.len(), - }) - .expect("ProtocolMsg can always be serialized."); - let resp_bytes = self.proxy.process(req); - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::ReadResponse { connection_id: _, size, data } => { - if data.is_empty() { - return Err(std::io::Error::new( - ErrorKind::Interrupted, - "empty Read", - )); - } - if data.len() > buf.len() { - return Err(std::io::Error::new( - ErrorKind::InvalidData, - format!( - "overflow: cannot read {} bytes into a buffer of {} bytes", - data.len(), - buf.len() - ), - )); - } - - // Copy data into buffer - for (i, b) in data.iter().enumerate() { - buf[i] = *b - } - Ok(size) - } - ProxyMsg::ProxyError(e) => Err(std::io::Error::new( - ErrorKind::InvalidData, - format!("Proxy error: {e:?}"), - )), - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } - } - - impl Write for LocalStream { - fn write(&mut self, buf: &[u8]) -> Result { - let req = borsh::to_vec(&ProxyMsg::WriteRequest { - connection_id: self.connection_id, - data: buf.to_vec(), - }) - .expect("ProtocolMsg can always be serialized."); - let resp_bytes = self.proxy.process(req); - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::WriteResponse { connection_id: _, size } => { - if size == 0 { - return Err(std::io::Error::new( - ErrorKind::Interrupted, - "failed Write", - )); - } - Ok(size) - } - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } - - fn flush(&mut self) -> Result<(), std::io::Error> { - let req = borsh::to_vec(&ProxyMsg::FlushRequest { - connection_id: self.connection_id, - }) - .expect("ProtocolMsg can always be serialized."); - let resp_bytes = self.proxy.process(req); - - match ProxyMsg::try_from_slice(&resp_bytes) { - Ok(resp) => match resp { - ProxyMsg::FlushResponse { connection_id: _ } => Ok(()), - _ => Err(std::io::Error::new( - ErrorKind::InvalidData, - "unexpected response", - )), - }, - Err(_) => Err(std::io::Error::new( - ErrorKind::InvalidData, - "cannot deserialize message", - )), - } - } - } -} From e11bd6d71cfff998701d3de8d2cbd9104b333d4d Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Tue, 15 Jul 2025 11:23:29 -0700 Subject: [PATCH 15/20] init: remove "async" feature, cleanup --- src/init/Cargo.toml | 7 ++----- src/init/init.rs | 27 --------------------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/src/init/Cargo.toml b/src/init/Cargo.toml index dc09104d..eced8cd1 100644 --- a/src/init/Cargo.toml +++ b/src/init/Cargo.toml @@ -8,13 +8,10 @@ publish = false libc = "0.2.172" qos_aws = { path = "../qos_aws"} qos_system = { path = "../qos_system"} -qos_core = { path = "../qos_core", features = ["vm"], default-features = false } +qos_core = { path = "../qos_core", features = ["vm", "async"], default-features = false } qos_nsm = { path = "../qos_nsm", default-features = false } -tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false, optional = true} +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false } [[bin]] name = "init" path = "init.rs" - -[features] -async = ["qos_core/async", "tokio"] diff --git a/src/init/init.rs b/src/init/init.rs index 9d5f44b4..99bae988 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -54,33 +54,6 @@ fn boot() { init_platform(); } -#[cfg(not(feature = "async"))] -fn main() { - boot(); - dmesg("QuorumOS Booted".to_string()); - - let cid = get_local_cid().unwrap(); - dmesg(format!("CID is {}", cid)); - - let handles = Handles::new( - EPHEMERAL_KEY_FILE.to_string(), - QUORUM_FILE.to_string(), - MANIFEST_FILE.to_string(), - PIVOT_FILE.to_string(), - ); - - Reaper::execute( - &handles, - Box::new(Nsm), - SocketAddress::new_vsock(cid, 3, VMADDR_NO_FLAGS), - SocketAddress::new_unix(SEC_APP_SOCK), - None, - ); - - reboot(); -} - -#[cfg(feature = "async")] #[tokio::main] async fn main() { use qos_core::io::{AsyncStreamPool, TimeVal, TimeValLike}; From 37464535414eb75f1809dc9322753580ae63ff4f Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Tue, 15 Jul 2025 11:45:43 -0700 Subject: [PATCH 16/20] qos_host: remove "async" feature and cleanup --- src/Makefile | 2 - src/integration/tests/async_qos_host.rs | 25 ++- src/integration/tests/boot.rs | 2 +- src/integration/tests/dev_boot.rs | 94 +-------- src/integration/tests/genesis.rs | 2 +- src/integration/tests/key.rs | 4 +- src/integration/tests/qos_host.rs | 2 +- src/qos_host/Cargo.toml | 8 +- src/qos_host/src/bin/async_qos_host.rs | 11 -- src/qos_host/src/cli.rs | 16 +- src/qos_host/src/lib.rs | 253 +----------------------- 11 files changed, 25 insertions(+), 394 deletions(-) delete mode 100644 src/qos_host/src/bin/async_qos_host.rs diff --git a/src/Makefile b/src/Makefile index abc2369a..02306b47 100644 --- a/src/Makefile +++ b/src/Makefile @@ -161,8 +161,6 @@ test: cargo build --all @# We also need the async version of qos_core cargo build --bin async_qos_core --features async,mock - @# We also need the async version of qos_host - cargo build --bin async_qos_host --features async @# Run tests cargo test @# When we build the workspace it resolves with the qos_core mock feature diff --git a/src/integration/tests/async_qos_host.rs b/src/integration/tests/async_qos_host.rs index 9d947548..e8434e43 100644 --- a/src/integration/tests/async_qos_host.rs +++ b/src/integration/tests/async_qos_host.rs @@ -10,19 +10,18 @@ async fn connects_and_gets_info() { // prep sock pool dir std::fs::create_dir_all("/tmp/async_qos_host_test").unwrap(); - let _qos_host: ChildWrapper = - Command::new("../target/debug/async_qos_host") - .arg("--usock") - .arg(TEST_ENCLAVE_SOCKET) - .arg("--host-ip") - .arg("127.0.0.1") - .arg("--host-port") - .arg("3323") - .arg("--socket-timeout") - .arg("50") // ms - .spawn() - .unwrap() - .into(); + let _qos_host: ChildWrapper = Command::new("../target/debug/qos_host") + .arg("--usock") + .arg(TEST_ENCLAVE_SOCKET) + .arg("--host-ip") + .arg("127.0.0.1") + .arg("--host-port") + .arg("3323") + .arg("--socket-timeout") + .arg("50") // ms + .spawn() + .unwrap() + .into(); tokio::time::sleep(Duration::from_millis(100)).await; // let the qos_host start diff --git a/src/integration/tests/boot.rs b/src/integration/tests/boot.rs index bb32cef2..09e272f7 100644 --- a/src/integration/tests/boot.rs +++ b/src/integration/tests/boot.rs @@ -252,7 +252,7 @@ async fn standard_boot_e2e() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/tests/dev_boot.rs b/src/integration/tests/dev_boot.rs index 1383df2d..18360d53 100644 --- a/src/integration/tests/dev_boot.rs +++ b/src/integration/tests/dev_boot.rs @@ -1,9 +1,6 @@ use std::{fs, path::Path, process::Command}; -use integration::{ - LOCAL_HOST, PIVOT_OK3_PATH, PIVOT_OK3_SUCCESS_FILE, PIVOT_OK4_PATH, - PIVOT_OK4_SUCCESS_FILE, -}; +use integration::{LOCAL_HOST, PIVOT_OK3_PATH, PIVOT_OK3_SUCCESS_FILE}; use qos_test_primitives::{ChildWrapper, PathWrapper}; #[tokio::test] @@ -22,7 +19,7 @@ async fn dev_boot_e2e() { // Start Enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", &*usock, @@ -90,90 +87,3 @@ async fn dev_boot_e2e() { assert_eq!(std::str::from_utf8(&contents).unwrap(), "vapers-only"); fs::remove_file(PIVOT_OK3_SUCCESS_FILE).unwrap(); } - -#[tokio::test] -async fn async_dev_boot_e2e() { - let tmp: PathWrapper = "/tmp/dev-async-boot-e2e-tmp".into(); - drop(fs::create_dir_all(&*tmp)); - let _: PathWrapper = PIVOT_OK4_SUCCESS_FILE.into(); - let usock: PathWrapper = "/tmp/dev-async-boot-e2e-tmp/sock.sock".into(); - let secret_path: PathWrapper = - "/tmp/dev-async-boot-e2e-tmp/quorum.secret".into(); - let pivot_path: PathWrapper = - "/tmp/dev-async-boot-e2e-tmp/pivot.pivot".into(); - let manifest_path: PathWrapper = - "/tmp/dev-async-boot-e2e-tmp/manifest.manifest".into(); - let eph_path: PathWrapper = "/tmp/dev-async-boot-e2e-tmp/eph.secret".into(); - - let host_port = qos_test_primitives::find_free_port().unwrap(); - - // Start Enclave - let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") - .args([ - "--usock", - &*usock, - "--quorum-file", - &*secret_path, - "--pivot-file", - &*pivot_path, - "--ephemeral-file", - &*eph_path, - "--mock", - "--manifest-file", - &*manifest_path, - ]) - .spawn() - .unwrap() - .into(); - - // Start Host - let mut _host_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_host") - .args([ - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--usock", - &*usock, - ]) - .spawn() - .unwrap() - .into(); - - qos_test_primitives::wait_until_port_is_bound(host_port); - - // Run `dangerous-dev-boot` - let res = Command::new("../target/debug/qos_client") - .args([ - "dangerous-dev-boot", - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--pivot-path", - PIVOT_OK4_PATH, - "--restart-policy", - "never", - "--pivot-args", - "[--msg,vapers-only]", - "--unsafe-eph-path-override", - &*eph_path, - ]) - .spawn() - .unwrap() - .wait() - .unwrap(); - - // Give the coordinator time to pivot - std::thread::sleep(std::time::Duration::from_secs(2)); - - // Make sure pivot ran - assert!(Path::new(PIVOT_OK4_SUCCESS_FILE).exists()); - assert!(res.success()); - - let contents = fs::read(PIVOT_OK4_SUCCESS_FILE).unwrap(); - assert_eq!(std::str::from_utf8(&contents).unwrap(), "vapers-only"); - fs::remove_file(PIVOT_OK4_SUCCESS_FILE).unwrap(); -} diff --git a/src/integration/tests/genesis.rs b/src/integration/tests/genesis.rs index 42fccfee..96a74203 100644 --- a/src/integration/tests/genesis.rs +++ b/src/integration/tests/genesis.rs @@ -103,7 +103,7 @@ async fn genesis_e2e() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/tests/key.rs b/src/integration/tests/key.rs index ff1af9be..3493b7ce 100644 --- a/src/integration/tests/key.rs +++ b/src/integration/tests/key.rs @@ -71,7 +71,7 @@ async fn key_fwd_e2e() { // -- ENCLAVE start new enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", new_usock, @@ -257,7 +257,7 @@ fn boot_old_enclave(old_host_port: u16) -> (ChildWrapper, ChildWrapper) { // -- ENCLAVE start old enclave let enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", old_usock, diff --git a/src/integration/tests/qos_host.rs b/src/integration/tests/qos_host.rs index e97a4fee..0905687f 100644 --- a/src/integration/tests/qos_host.rs +++ b/src/integration/tests/qos_host.rs @@ -33,7 +33,7 @@ fn connects_and_gets_info() { drop(std::fs::remove_file(&*secret_path)); let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/qos_core") + Command::new("../target/debug/async_qos_core") .args([ "--usock", TEST_ENCLAVE_SOCKET, diff --git a/src/qos_host/Cargo.toml b/src/qos_host/Cargo.toml index e0e3f6f0..8ea1ef96 100644 --- a/src/qos_host/Cargo.toml +++ b/src/qos_host/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -qos_core = { path = "../qos_core", default-features = false } +qos_core = { path = "../qos_core", default-features = false, features = ["async"] } qos_hex = { path = "../qos_hex", features = ["serde"], default-features = false } # Third party @@ -16,10 +16,4 @@ serde_json = { version = "1" } serde = { version = "1", features = ["derive"], default-features = false } [features] -async = ["qos_core/async"] vm = ["qos_core/vm"] - -[[bin]] -name = "async_qos_host" -path = "src/bin/async_qos_host.rs" -required-features = ["async"] diff --git a/src/qos_host/src/bin/async_qos_host.rs b/src/qos_host/src/bin/async_qos_host.rs deleted file mode 100644 index 6204970d..00000000 --- a/src/qos_host/src/bin/async_qos_host.rs +++ /dev/null @@ -1,11 +0,0 @@ -#[tokio::main] -async fn main() { - // Development quick start - // ``` - // `cargo run --bin qos_host -- \ - // --usock tk.sock \ - // --host-port 3000 \ - // --host-ip 0.0.0.0 \ - // ``` - qos_host::cli::CLI::execute().await; -} diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 05c96feb..5cf2d276 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -8,13 +8,10 @@ use std::{ use qos_core::{ cli::{CID, PORT, USOCK}, - io::SocketAddress, + io::{AsyncStreamPool, SocketAddress}, parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; -#[cfg(feature = "async")] -use qos_core::io::AsyncStreamPool; - const HOST_IP: &str = "host-ip"; const HOST_PORT: &str = "host-port"; const ENDPOINT_BASE_PATH: &str = "endpoint-base-path"; @@ -115,7 +112,6 @@ impl HostOpts { /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and /// return the new `AsyncPool`. - #[cfg(feature = "async")] pub(crate) fn enclave_pool( &self, ) -> Result { @@ -227,16 +223,6 @@ impl CLI { } else if options.parsed.help() { println!("{}", options.parsed.info()); } else { - #[cfg(not(feature = "async"))] - crate::HostServer::new( - options.enclave_addr(), - options.host_addr(), - options.base_path(), - ) - .serve() - .await; - - #[cfg(feature = "async")] crate::async_host::AsyncHostServer::new( options .enclave_pool() diff --git a/src/qos_host/src/lib.rs b/src/qos_host/src/lib.rs index 264474ed..9d77102e 100644 --- a/src/qos_host/src/lib.rs +++ b/src/qos_host/src/lib.rs @@ -17,34 +17,20 @@ #![warn(missing_docs, clippy::pedantic)] #![allow(clippy::missing_errors_doc)] -use std::{net::SocketAddr, sync::Arc}; - use axum::{ - body::Bytes, - extract::{DefaultBodyLimit, State}, http::StatusCode, - response::{Html, IntoResponse, Response}, - routing::{get, post}, - Json, Router, + response::{IntoResponse, Response}, + Json, }; -use borsh::BorshDeserialize; -use qos_core::{ - client::Client, - io::{SocketAddress, TimeVal, TimeValLike}, - protocol::{ - msg::ProtocolMsg, services::boot::ManifestEnvelope, Hash256, - ProtocolError, ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, - }, +use qos_core::protocol::{ + services::boot::ManifestEnvelope, Hash256, ProtocolPhase, }; -#[cfg(feature = "async")] pub mod async_host; pub mod cli; const MEGABYTE: usize = 1024 * 1024; const MAX_ENCODED_MSG_LEN: usize = 256 * MEGABYTE; -const QOS_SOCKET_CLIENT_TIMEOUT_SECS: i64 = - ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS + 2; /// Simple error that implements [`IntoResponse`] so it can /// be returned from handlers as an http response (and not get silently @@ -63,19 +49,6 @@ impl IntoResponse for Error { } } -/// Resource shared across tasks in the [`HostServer`]. -#[derive(Debug)] -struct QosHostState { - enclave_client: Client, -} - -/// HTTP server for the host of the enclave; proxies requests to the enclave. -pub struct HostServer { - enclave_addr: SocketAddress, - addr: SocketAddr, - base_path: Option, -} - const HOST_HEALTH: &str = "/host-health"; const ENCLAVE_HEALTH: &str = "/enclave-health"; const MESSAGE: &str = "/message"; @@ -112,221 +85,3 @@ pub struct JsonError { /// Error message. pub error: String, } - -impl HostServer { - /// Create a new [`HostServer`]. See [`Self::serve`] for starting the - /// server. - #[must_use] - pub fn new( - enclave_addr: SocketAddress, - addr: SocketAddr, - base_path: Option, - ) -> Self { - Self { enclave_addr, addr, base_path } - } - - fn path(&self, endpoint: &str) -> String { - if let Some(path) = self.base_path.as_ref() { - format!("/{path}{endpoint}") - } else { - format!("/qos{endpoint}") - } - } - - /// Start the server, running indefinitely. - /// - /// # Panics - /// - /// Panics if there is an issue starting the server. - // pub async fn serve(&self) -> Result<(), String> { - pub async fn serve(&self) { - let state = Arc::new(QosHostState { - enclave_client: Client::new( - self.enclave_addr.clone(), - TimeVal::seconds(QOS_SOCKET_CLIENT_TIMEOUT_SECS), - ), - }); - - let app = Router::new() - .route(&self.path(HOST_HEALTH), get(Self::host_health)) - .route(&self.path(ENCLAVE_HEALTH), get(Self::enclave_health)) - .route(&self.path(MESSAGE), post(Self::message)) - .route(&self.path(ENCLAVE_INFO), get(Self::enclave_info)) - .layer(DefaultBodyLimit::disable()) - .with_state(state); - - println!("HostServer listening on {}", self.addr); - - axum::Server::bind(&self.addr) - .serve(app.into_make_service()) - .await - .unwrap(); - } - - /// Health route handler. - #[allow(clippy::unused_async)] - async fn host_health(_: State>) -> impl IntoResponse { - println!("Host health..."); - Html("Ok!") - } - - /// Health route handler. - #[allow(clippy::unused_async)] - async fn enclave_health( - State(state): State>, - ) -> impl IntoResponse { - println!("Enclave health..."); - - let encoded_request = borsh::to_vec(&ProtocolMsg::StatusRequest) - .expect("ProtocolMsg can always serialize. qed."); - let encoded_response = match state.enclave_client.send(&encoded_request) - { - Ok(encoded_response) => encoded_response, - Err(e) => { - let msg = format!("Error while trying to send socket request to enclave: {e:?}"); - eprintln!("{msg}"); - return (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)); - } - }; - - let response = match ProtocolMsg::try_from_slice(&encoded_response) { - Ok(r) => r, - Err(e) => { - let msg = format!("Error deserializing response from enclave, make sure qos_host version match qos_core: {e}"); - eprintln!("{msg}"); - return (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)); - } - }; - - match response { - ProtocolMsg::StatusResponse(phase) => { - let inner = format!("{phase:?}"); - let status = match phase { - ProtocolPhase::UnrecoverableError - | ProtocolPhase::WaitingForBootInstruction - | ProtocolPhase::WaitingForQuorumShards - | ProtocolPhase::WaitingForForwardedKey => StatusCode::SERVICE_UNAVAILABLE, - ProtocolPhase::QuorumKeyProvisioned - | ProtocolPhase::GenesisBooted => StatusCode::OK, - }; - - (status, Html(inner)) - } - other => { - let msg = format!("Unexpected response: Expected a ProtocolMsg::StatusResponse, but got: {other:?}"); - eprintln!("{msg}"); - (StatusCode::INTERNAL_SERVER_ERROR, Html(msg)) - } - } - } - - #[allow(clippy::unused_async)] - async fn enclave_info( - State(state): State>, - ) -> Result, Error> { - println!("Enclave info..."); - - let enc_status_req = borsh::to_vec(&ProtocolMsg::StatusRequest) - .expect("ProtocolMsg can always serialize. qed."); - let enc_status_resp = - state.enclave_client.send(&enc_status_req).map_err(|e| { - Error(format!("error sending status request to enclave: {e:?}")) - })?; - - let status_resp = match ProtocolMsg::try_from_slice(&enc_status_resp) { - Ok(status_resp) => status_resp, - Err(e) => { - return Err(Error(format!("error deserializing status response from enclave, make sure qos_host version match qos_core: {e:?}"))); - } - }; - let phase = match status_resp { - ProtocolMsg::StatusResponse(phase) => phase, - other => { - return Err(Error(format!("unexpected response: expected a ProtocolMsg::StatusResponse, but got: {other:?}"))); - } - }; - - let enc_manifest_envelope_req = - borsh::to_vec(&ProtocolMsg::ManifestEnvelopeRequest) - .expect("ProtocolMsg can always serialize. qed."); - let enc_manifest_envelope_resp = state - .enclave_client - .send(&enc_manifest_envelope_req) - .map_err(|e| { - Error(format!( - "error while trying to send manifest envelope socket request to enclave: {e:?}" - )) - })?; - - let manifest_envelope_resp = ProtocolMsg::try_from_slice( - &enc_manifest_envelope_resp, - ) - .map_err(|e| - Error(format!("error deserializing manifest envelope response from enclave, make sure qos_host version match qos_core: {e}")) - )?; - - let manifest_envelope = match manifest_envelope_resp { - ProtocolMsg::ManifestEnvelopeResponse { manifest_envelope } => { - *manifest_envelope - } - other => { - return Err( - Error(format!("unexpected response: expected a ProtocolMsg::ManifestEnvelopeResponse, but got: {other:?}")) - ); - } - }; - - let vitals_log = if let Some(m) = manifest_envelope.as_ref() { - serde_json::to_string(&EnclaveVitalStats { - phase, - namespace: m.manifest.namespace.name.clone(), - nonce: m.manifest.namespace.nonce, - pivot_hash: m.manifest.pivot.hash, - pcr0: m.manifest.enclave.pcr0.clone(), - pivot_args: m.manifest.pivot.args.clone(), - }) - .expect("always valid json. qed.") - } else { - serde_json::to_string(&phase).expect("always valid json. qed.") - }; - println!("{vitals_log}"); - - let info = EnclaveInfo { phase, manifest_envelope }; - - Ok(Json(info)) - } - - /// Message route handler. - #[allow(clippy::unused_async)] - async fn message( - State(state): State>, - encoded_request: Bytes, - ) -> impl IntoResponse { - if encoded_request.len() > MAX_ENCODED_MSG_LEN { - return ( - StatusCode::BAD_REQUEST, - borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - ProtocolError::OversizeMsg, - )) - .expect("ProtocolMsg can always serialize. qed."), - ); - } - - match state.enclave_client.send(&encoded_request) { - Ok(encoded_response) => (StatusCode::OK, encoded_response), - Err(e) => { - let msg = - format!("Error while trying to send request over socket to enclave: {e:?}"); - eprint!("{msg}"); - - ( - StatusCode::INTERNAL_SERVER_ERROR, - borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - ProtocolError::EnclaveClient, - )) - .expect("ProtocolMsg can always serialize. qed."), - ) - } - } - } -} From 873340adc91177f9000495a37274d6ee8bcd83a9 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Wed, 16 Jul 2025 13:44:19 -0700 Subject: [PATCH 17/20] qos_core: remove "async" feature and cleanup also fixes integration tests to use new code, timeouts etc. --- src/Cargo.lock | 60 +-- src/Makefile | 2 - src/init/Cargo.toml | 2 +- src/init/init.rs | 8 +- src/integration/examples/boot_enclave.rs | 2 +- .../src/bin/pivot_async_remote_tls.rs | 34 +- src/integration/src/bin/pivot_proof.rs | 25 +- .../src/bin/pivot_socket_stress.rs | 40 +- src/integration/src/lib.rs | 32 +- src/integration/tests/async_boot.rs | 4 +- src/integration/tests/async_boot_hybrid.rs | 468 ----------------- src/integration/tests/async_client.rs | 26 +- src/integration/tests/async_qos_host.rs | 2 +- src/integration/tests/async_remote_tls.rs | 6 +- src/integration/tests/boot.rs | 2 +- src/integration/tests/dev_boot.rs | 2 +- .../tests/enclave_app_client_socket_stress.rs | 46 +- src/integration/tests/genesis.rs | 2 +- src/integration/tests/key.rs | 4 +- src/integration/tests/proofs.rs | 22 +- src/integration/tests/qos_host.rs | 9 +- src/integration/tests/reaper.rs | 54 +- src/integration/tests/simple_socket_stress.rs | 46 +- src/qos_core/Cargo.toml | 10 +- src/qos_core/src/async_client.rs | 89 +++- src/qos_core/src/async_server.rs | 44 +- src/qos_core/src/bin/async_qos_core.rs | 5 - src/qos_core/src/cli.rs | 76 +-- src/qos_core/src/client.rs | 48 -- src/qos_core/src/io/async_pool.rs | 14 +- src/qos_core/src/io/async_stream.rs | 153 ++---- src/qos_core/src/io/mod.rs | 20 +- src/qos_core/src/io/stream.rs | 491 +----------------- src/qos_core/src/lib.rs | 4 - src/qos_core/src/main.rs | 5 +- src/qos_core/src/protocol/async_processor.rs | 32 +- src/qos_core/src/protocol/error.rs | 6 +- src/qos_core/src/protocol/mod.rs | 3 - src/qos_core/src/protocol/processor.rs | 85 --- src/qos_core/src/reaper.rs | 438 ++++++---------- src/qos_core/src/server.rs | 58 --- src/qos_host/Cargo.toml | 2 +- src/qos_host/src/async_host.rs | 11 +- src/qos_host/src/cli.rs | 24 +- src/qos_net/Cargo.toml | 4 +- src/qos_net/src/async_proxy.rs | 3 +- src/qos_net/src/cli.rs | 6 +- src/qos_test_primitives/src/lib.rs | 2 +- 48 files changed, 680 insertions(+), 1851 deletions(-) delete mode 100644 src/integration/tests/async_boot_hybrid.rs delete mode 100644 src/qos_core/src/bin/async_qos_core.rs delete mode 100644 src/qos_core/src/client.rs delete mode 100644 src/qos_core/src/protocol/processor.rs delete mode 100644 src/qos_core/src/server.rs diff --git a/src/Cargo.lock b/src/Cargo.lock index 932c93ca..ec9335dd 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -435,7 +435,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -1571,7 +1571,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.48.5", ] [[package]] @@ -1986,7 +1986,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3559,7 +3559,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3657,7 +3657,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3677,18 +3677,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -3708,9 +3708,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -3720,9 +3720,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -3732,15 +3732,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -3750,9 +3750,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -3762,9 +3762,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -3774,9 +3774,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -3786,9 +3786,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" diff --git a/src/Makefile b/src/Makefile index 02306b47..3f6d365a 100644 --- a/src/Makefile +++ b/src/Makefile @@ -159,8 +159,6 @@ test: @# The integration tests rely on binaries from other crates being built, so @# we build all the workspace targets. cargo build --all - @# We also need the async version of qos_core - cargo build --bin async_qos_core --features async,mock @# Run tests cargo test @# When we build the workspace it resolves with the qos_core mock feature diff --git a/src/init/Cargo.toml b/src/init/Cargo.toml index eced8cd1..17ae5748 100644 --- a/src/init/Cargo.toml +++ b/src/init/Cargo.toml @@ -8,7 +8,7 @@ publish = false libc = "0.2.172" qos_aws = { path = "../qos_aws"} qos_system = { path = "../qos_system"} -qos_core = { path = "../qos_core", features = ["vm", "async"], default-features = false } +qos_core = { path = "../qos_core", features = ["vm"], default-features = false } qos_nsm = { path = "../qos_nsm", default-features = false } tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false } diff --git a/src/init/init.rs b/src/init/init.rs index 99bae988..4834ec74 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -1,6 +1,6 @@ use qos_core::{ handles::Handles, - io::{SocketAddress, VMADDR_NO_FLAGS}, + io::{AsyncStreamPool, SocketAddress, VMADDR_NO_FLAGS}, reaper::Reaper, EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; @@ -56,8 +56,6 @@ fn boot() { #[tokio::main] async fn main() { - use qos_core::io::{AsyncStreamPool, TimeVal, TimeValLike}; - boot(); dmesg("QuorumOS Booted in Async mode".to_string()); @@ -74,19 +72,17 @@ async fn main() { let start_port = 3; // used for qos-host only! others follow 4+ for the -host let core_pool = AsyncStreamPool::new( SocketAddress::new_vsock(cid, start_port, VMADDR_NO_FLAGS), - TimeVal::seconds(0), 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) ) .expect("unable to create core pool"); let app_pool = AsyncStreamPool::new( SocketAddress::new_unix(SEC_APP_SOCK), - TimeVal::seconds(5), 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) ) .expect("unable to create app pool"); - Reaper::async_execute(&handles, Box::new(Nsm), core_pool, app_pool, None); + Reaper::execute(&handles, Box::new(Nsm), core_pool, app_pool, None); reboot(); } diff --git a/src/integration/examples/boot_enclave.rs b/src/integration/examples/boot_enclave.rs index 9cc892d4..84147e30 100644 --- a/src/integration/examples/boot_enclave.rs +++ b/src/integration/examples/boot_enclave.rs @@ -241,7 +241,7 @@ async fn main() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/src/bin/pivot_async_remote_tls.rs b/src/integration/src/bin/pivot_async_remote_tls.rs index 21a0b1b2..f22c1973 100644 --- a/src/integration/src/bin/pivot_async_remote_tls.rs +++ b/src/integration/src/bin/pivot_async_remote_tls.rs @@ -5,11 +5,7 @@ use borsh::BorshDeserialize; use integration::PivotRemoteTlsMsg; use qos_core::{ async_server::{AsyncRequestProcessor, AsyncSocketServer}, - io::{ - AsyncStreamPool, SharedAsyncStreamPool, SocketAddress, TimeVal, - TimeValLike, - }, - protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, + io::{AsyncStreamPool, SharedAsyncStreamPool, SocketAddress}, }; use qos_net::async_proxy_stream::AsyncProxyStream; use rustls::RootCertStore; @@ -109,24 +105,20 @@ async fn main() { let socket_path: &String = &args[1]; let proxy_path: &String = &args[2]; - let pool = AsyncStreamPool::new( - SocketAddress::new_unix(socket_path), - TimeVal::seconds(0), // listener, no timeout - 1, - ) - .expect("unable to create async stream pool"); + let enclave_pool = + AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create async stream pool"); - let proxy_pool = AsyncStreamPool::new( - SocketAddress::new_unix(proxy_path), - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - 1, - ) - .expect("unable to create async stream pool") - .shared(); + let proxy_pool = + AsyncStreamPool::new(SocketAddress::new_unix(proxy_path), 1) + .expect("unable to create async stream pool") + .shared(); - let server = - AsyncSocketServer::listen_all(pool, &Processor::new(proxy_pool)) - .unwrap(); + let server = AsyncSocketServer::listen_all( + enclave_pool, + &Processor::new(proxy_pool), + ) + .unwrap(); match tokio::signal::ctrl_c().await { Ok(_) => { diff --git a/src/integration/src/bin/pivot_proof.rs b/src/integration/src/bin/pivot_proof.rs index f4de0fdb..7d6e679f 100644 --- a/src/integration/src/bin/pivot_proof.rs +++ b/src/integration/src/bin/pivot_proof.rs @@ -3,17 +3,18 @@ use core::panic; use borsh::BorshDeserialize; use integration::{AdditionProof, AdditionProofPayload, PivotProofMsg}; use qos_core::{ + async_server::{AsyncRequestProcessor, AsyncSocketServer}, handles::EphemeralKeyHandle, - io::SocketAddress, - server::{RequestProcessor, SocketServer}, + io::{AsyncStreamPool, SocketAddress}, }; +#[derive(Clone)] struct Processor { ephemeral_key_handle: EphemeralKeyHandle, } -impl RequestProcessor for Processor { - fn process(&mut self, request: Vec) -> Vec { +impl AsyncRequestProcessor for Processor { + async fn process(&self, request: Vec) -> Vec { let msg = PivotProofMsg::try_from_slice(&request) .expect("Received invalid message - test is broken!"); @@ -48,17 +49,25 @@ impl RequestProcessor for Processor { } } -fn main() { +#[tokio::main] +async fn main() { let args: Vec = std::env::args().collect(); let socket_path: &String = &args[1]; - SocketServer::listen( - SocketAddress::new_unix(socket_path), - Processor { + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create app pool"); + + let server = AsyncSocketServer::listen_all( + app_pool, + &Processor { ephemeral_key_handle: EphemeralKeyHandle::new( "./mock/ephemeral_seed.secret.keep".to_string(), ), }, ) .unwrap(); + + tokio::signal::ctrl_c().await.unwrap(); + server.terminate(); } diff --git a/src/integration/src/bin/pivot_socket_stress.rs b/src/integration/src/bin/pivot_socket_stress.rs index 5b5780ea..c8e4b3e9 100644 --- a/src/integration/src/bin/pivot_socket_stress.rs +++ b/src/integration/src/bin/pivot_socket_stress.rs @@ -3,15 +3,15 @@ use core::panic; use borsh::BorshDeserialize; use integration::PivotSocketStressMsg; use qos_core::{ - io::SocketAddress, - protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, - server::{RequestProcessor, SocketServer}, + async_server::{AsyncRequestProcessor, AsyncSocketServer}, + io::{AsyncStreamPool, SocketAddress}, }; +#[derive(Clone)] struct Processor; -impl RequestProcessor for Processor { - fn process(&mut self, request: Vec) -> Vec { +impl AsyncRequestProcessor for Processor { + async fn process(&self, request: Vec) -> Vec { // Simulate just some baseline lag for all requests std::thread::sleep(std::time::Duration::from_secs(1)); @@ -24,14 +24,16 @@ impl RequestProcessor for Processor { .expect("OkResponse is valid borsh") } PivotSocketStressMsg::PanicRequest => { - panic!( - "\"socket stress\" pivot app has received a PanicRequest" - ) + eprintln!("PIVOT: panic request received, panicing"); + // panic is not enough in tokio, we need process exit + std::process::exit(1) } - PivotSocketStressMsg::SlowRequest => { - std::thread::sleep(std::time::Duration::from_secs( - ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS as u64 + 1, - )); + PivotSocketStressMsg::SlowRequest(delay) => { + eprintln!( + "PIVOT: slow request received, sleeping for {delay}ms" + ); + tokio::time::sleep(std::time::Duration::from_millis(delay)) + .await; borsh::to_vec(&PivotSocketStressMsg::SlowResponse) .expect("OkResponse is valid borsh") } @@ -45,9 +47,17 @@ impl RequestProcessor for Processor { } } -fn main() { +#[tokio::main] +async fn main() { let args: Vec = std::env::args().collect(); let socket_path = &args[1]; - SocketServer::listen(SocketAddress::new_unix(socket_path), Processor) - .unwrap(); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create app pool"); + + let server = AsyncSocketServer::listen_all(app_pool, &Processor).unwrap(); + + tokio::signal::ctrl_c().await.unwrap(); + server.terminate(); } diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index a7db3600..b8a03142 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -4,8 +4,14 @@ #![deny(clippy::all)] #![warn(missing_docs)] +use std::time::Duration; + use borsh::{BorshDeserialize, BorshSerialize}; -use qos_core::parser::{GetParserForOptions, OptionsParser, Parser, Token}; +use qos_core::{ + async_client::AsyncClient, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + parser::{GetParserForOptions, OptionsParser, Parser, Token}, +}; /// Path to the file `pivot_ok` writes on success for tests. pub const PIVOT_OK_SUCCESS_FILE: &str = "./pivot_ok_works"; @@ -66,9 +72,8 @@ pub enum PivotSocketStressMsg { OkResponse, /// Request the app to panic. Does not have a response. PanicRequest, - /// Request a response that will be slower then - /// `ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS`. - SlowRequest, + /// Request a response that will be slower than the provided `u64` value in milliseconds + SlowRequest(u64), // milliseconds /// Response to [`Self::SlowRequest`]. SlowResponse, } @@ -131,6 +136,25 @@ pub struct AdditionProofPayload { pub result: usize, } +/// Wait for a given usock file to exist and be connectible with a timeout of 5s. +/// +/// # Panics +/// Panics if fs::exists errors. +pub async fn wait_for_usock(path: &str) { + let addr = SocketAddress::new_unix(path); + let pool = AsyncStreamPool::new(addr, 1).unwrap().shared(); + let client = AsyncClient::new(pool, TimeVal::milliseconds(50)); + + for _ in 0..50 { + if std::fs::exists(path).unwrap() && client.try_connect().await.is_ok() + { + break; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + } +} + struct PivotParser; impl GetParserForOptions for PivotParser { fn parser() -> Parser { diff --git a/src/integration/tests/async_boot.rs b/src/integration/tests/async_boot.rs index 319308cb..03be96e4 100644 --- a/src/integration/tests/async_boot.rs +++ b/src/integration/tests/async_boot.rs @@ -260,7 +260,7 @@ async fn async_standard_boot_e2e() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", &*usock, @@ -280,7 +280,7 @@ async fn async_standard_boot_e2e() { // -- HOST start host let mut _host_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_host") + Command::new("../target/debug/qos_host") .args([ "--host-port", &host_port.to_string(), diff --git a/src/integration/tests/async_boot_hybrid.rs b/src/integration/tests/async_boot_hybrid.rs deleted file mode 100644 index b2966bde..00000000 --- a/src/integration/tests/async_boot_hybrid.rs +++ /dev/null @@ -1,468 +0,0 @@ -use std::{ - fs, - io::{BufRead, BufReader, Write}, - path::Path, - process::{Command, Stdio}, -}; - -use borsh::de::BorshDeserialize; -use integration::{ - LOCAL_HOST, PCR3_PRE_IMAGE_PATH, PIVOT_OK5_PATH, PIVOT_OK5_SUCCESS_FILE, - QOS_DIST_DIR, -}; -use qos_core::protocol::{ - services::{ - boot::{ - Approval, Manifest, ManifestSet, Namespace, PivotConfig, - RestartPolicy, ShareSet, - }, - genesis::{GenesisMemberOutput, GenesisOutput}, - }, - ProtocolPhase, QosHash, -}; -use qos_crypto::sha_256; -use qos_host::EnclaveInfo; -use qos_p256::P256Pair; -use qos_test_primitives::{ChildWrapper, PathWrapper}; - -#[tokio::test] -async fn async_standard_boot_hybrid_e2e() { - const PIVOT_HASH_PATH: &str = - "/tmp/async_standard_boot_hybrid_e2e-pivot-hash.txt"; - - let host_port = qos_test_primitives::find_free_port().unwrap(); - let tmp: PathWrapper = "/tmp/async-boot-hybrid-e2e".into(); - let _: PathWrapper = PIVOT_OK5_SUCCESS_FILE.into(); - let _: PathWrapper = PIVOT_HASH_PATH.into(); - fs::create_dir_all(&*tmp).unwrap(); - - let usock: PathWrapper = "/tmp/async-boo-hybrid-e2e/boot_e2e.sock".into(); - let secret_path: PathWrapper = - "/tmp/async-boo-hybrid-e2e/boot_e2e.secret".into(); - let pivot_path: PathWrapper = - "/tmp/async-boo-hybrid-e2e/boot_e2e.pivot".into(); - let manifest_path: PathWrapper = - "/tmp/async-boo-hybrid-e2e/boot_e2e.manifest".into(); - let eph_path: PathWrapper = - "/tmp/async-boo-hybrid-e2e/ephemeral_key.secret".into(); - - let boot_dir: PathWrapper = "/tmp/async-boo-hybrid-e2e/boot-dir".into(); - fs::create_dir_all(&*boot_dir).unwrap(); - let attestation_dir: PathWrapper = - "/tmp/async-boo-hybrid-e2e/attestation-dir".into(); - fs::create_dir_all(&*attestation_dir).unwrap(); - let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); - - let all_personal_dir = "./mock/boot-e2e/all-personal-dir"; - - let namespace = "quit-coding-to-vape"; - - let personal_dir = |user: &str| format!("{all_personal_dir}/{user}-dir"); - - let user1 = "user1"; - let user2 = "user2"; - let user3 = "user3"; - - // -- Create pivot-build-fingerprints.txt - let pivot = fs::read(PIVOT_OK5_PATH).unwrap(); - let mock_pivot_hash = sha_256(&pivot); - let pivot_hash = qos_hex::encode_to_vec(&mock_pivot_hash); - std::fs::write(PIVOT_HASH_PATH, pivot_hash).unwrap(); - - // -- CLIENT create manifest. - let msg = "testing420"; - let pivot_args = format!("[--msg,{msg}]"); - let cli_manifest_path = format!("{}/manifest", &*boot_dir); - - assert!(Command::new("../target/debug/qos_client") - .args([ - "generate-manifest", - "--nonce", - "2", - "--namespace", - namespace, - "--restart-policy", - "never", - "--pivot-hash-path", - PIVOT_HASH_PATH, - "--qos-release-dir", - QOS_DIST_DIR, - "--pcr3-preimage-path", - PCR3_PRE_IMAGE_PATH, - "--manifest-path", - &cli_manifest_path, - "--pivot-args", - &pivot_args, - "--manifest-set-dir", - "./mock/keys/manifest-set", - "--share-set-dir", - "./mock/keys/share-set", - "--patch-set-dir", - "./mock/keys/manifest-set", - "--quorum-key-path", - "./mock/namespaces/quit-coding-to-vape/quorum_key.pub" - ]) - .spawn() - .unwrap() - .wait() - .unwrap() - .success()); - - // Check the manifest written to file - let manifest = - Manifest::try_from_slice(&fs::read(&cli_manifest_path).unwrap()) - .unwrap(); - - let genesis_output = { - let contents = - fs::read("./mock/boot-e2e/genesis-dir/genesis_output").unwrap(); - GenesisOutput::try_from_slice(&contents).unwrap() - }; - // For simplicity sake, we use the same keys for the share set and manifest - // set. - let mut members: Vec<_> = genesis_output - .member_outputs - .iter() - .cloned() - .map(|GenesisMemberOutput { share_set_member, .. }| share_set_member) - .collect(); - members.sort(); - - let namespace_field = Namespace { - name: namespace.to_string(), - nonce: 2, - quorum_key: genesis_output.quorum_key, - }; - assert_eq!(manifest.namespace, namespace_field); - let pivot = PivotConfig { - hash: mock_pivot_hash, - restart: RestartPolicy::Never, - args: vec!["--msg".to_string(), msg.to_string()], - }; - assert_eq!(manifest.pivot, pivot); - let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; - assert_eq!(manifest.manifest_set, manifest_set); - let share_set = ShareSet { threshold: 2, members }; - assert_eq!(manifest.share_set, share_set); - - // -- CLIENT make sure each user can run `approve-manifest` - for alias in [user1, user2, user3] { - let approval_path = format!( - "{}/{}-{}-{}.approval", - &*boot_dir, alias, namespace, manifest.namespace.nonce, - ); - - let secret_path = format!("{}/{}.secret", &personal_dir(alias), alias); - - let mut child = Command::new("../target/debug/qos_client") - .args([ - "approve-manifest", - "--secret-path", - &*secret_path, - "--manifest-path", - &cli_manifest_path, - "--manifest-approvals-dir", - &*boot_dir, - "--pcr3-preimage-path", - PCR3_PRE_IMAGE_PATH, - "--pivot-hash-path", - PIVOT_HASH_PATH, - "--qos-release-dir", - QOS_DIST_DIR, - "--manifest-set-dir", - "./mock/keys/manifest-set", - "--share-set-dir", - "./mock/keys/share-set", - "--patch-set-dir", - "./mock/keys/manifest-set", - "--quorum-key-path", - "./mock/namespaces/quit-coding-to-vape/quorum_key.pub", - "--alias", - alias, - ]) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - let mut stdin = child.stdin.take().expect("Failed to open stdin"); - - let mut stdout = { - let stdout = child.stdout.as_mut().unwrap(); - let stdout_reader = BufReader::new(stdout); - stdout_reader.lines() - }; - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Is this the correct namespace name: quit-coding-to-vape? (y/n)" - ); - stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Is this the correct namespace nonce: 2? (y/n)" - ); - // On purpose, try to input a bad value, neither yes or no - stdin - .write_all("maybe\n".as_bytes()) - .expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Please answer with either \"yes\" (y) or \"no\" (n)" - ); - // Try the longer option ("yes" rather than "y") - stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Is this the correct pivot restart policy: RestartPolicy::Never? (y/n)" - ); - stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Are these the correct pivot args:" - ); - assert_eq!( - &stdout.next().unwrap().unwrap(), - "[\"--msg\", \"testing420\"]?" - ); - assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); - stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); - - // Wait for the command to write the approval and exit - assert!(child.wait().unwrap().success()); - - // Read in the generated approval to check it was created correctly - let approval = - Approval::try_from_slice(&fs::read(approval_path).unwrap()) - .unwrap(); - let personal_pair = P256Pair::from_hex_file(format!( - "{}/{}.secret", - personal_dir(alias), - alias, - )) - .unwrap(); - - let signature = personal_pair.sign(&manifest.qos_hash()).unwrap(); - assert_eq!(approval.signature, signature); - - assert_eq!(approval.member.alias, alias); - assert_eq!( - approval.member.pub_key, - personal_pair.public_key().to_bytes(), - ); - } - - // -- ENCLAVE start enclave - let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") - .args([ - "--usock", - &*usock, - "--quorum-file", - &*secret_path, - "--pivot-file", - &*pivot_path, - "--ephemeral-file", - &*eph_path, - "--mock", - "--manifest-file", - &*manifest_path, - ]) - .spawn() - .unwrap() - .into(); - - // -- HOST start host - let mut _host_child_process: ChildWrapper = - Command::new("../target/debug/qos_host") - .args([ - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--usock", - &*usock, - ]) - .spawn() - .unwrap() - .into(); - - // -- Make sure the enclave and host have time to boot - qos_test_primitives::wait_until_port_is_bound(host_port); - - // -- CLIENT generate the manifest envelope - assert!(Command::new("../target/debug/qos_client") - .args([ - "generate-manifest-envelope", - "--manifest-approvals-dir", - &*boot_dir, - "--manifest-path", - &cli_manifest_path, - ]) - .spawn() - .unwrap() - .wait() - .unwrap() - .success()); - - // -- CLIENT broadcast boot standard instruction - let manifest_envelope_path = format!("{}/manifest_envelope", &*boot_dir,); - assert!(Command::new("../target/debug/qos_client") - .args([ - "boot-standard", - "--manifest-envelope-path", - &manifest_envelope_path, - "--pivot-path", - PIVOT_OK5_PATH, - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--pcr3-preimage-path", - "./mock/pcr3-preimage.txt", - "--unsafe-skip-attestation", - ]) - .spawn() - .unwrap() - .wait() - .unwrap() - .success()); - - // For each user, post a share, - // and sanity check the pivot has not yet executed. - assert!(!Path::new(PIVOT_OK5_SUCCESS_FILE).exists()); - for user in [&user1, &user2] { - // Get attestation doc and manifest - assert!(Command::new("../target/debug/qos_client") - .args([ - "get-attestation-doc", - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--attestation-doc-path", - &*attestation_doc_path, - "--manifest-envelope-path", - "/tmp/dont_care" - ]) - .spawn() - .unwrap() - .wait() - .unwrap() - .success()); - - let share_path = format!("{}/{}.share", &personal_dir(user), user); - let secret_path = format!("{}/{}.secret", &personal_dir(user), user); - let eph_wrapped_share_path: PathWrapper = - format!("{}/{}.eph_wrapped.share", &*tmp, user).into(); - let approval_path: PathWrapper = - format!("{}/{}.attestation.approval", &*tmp, user).into(); - // Encrypt share to ephemeral key - let mut child = Command::new("../target/debug/qos_client") - .args([ - "proxy-re-encrypt-share", - "--share-path", - &share_path, - "--secret-path", - &secret_path, - "--attestation-doc-path", - &*attestation_doc_path, - "--eph-wrapped-share-path", - &eph_wrapped_share_path, - "--approval-path", - &approval_path, - "--manifest-envelope-path", - &manifest_envelope_path, - "--pcr3-preimage-path", - PCR3_PRE_IMAGE_PATH, - "--manifest-set-dir", - "./mock/keys/manifest-set", - "--alias", - user, - "--unsafe-skip-attestation", - "--unsafe-eph-path-override", - &*eph_path, - ]) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - let mut stdin = child.stdin.take().expect("Failed to open stdin"); - - let mut stdout = { - let stdout = child.stdout.as_mut().unwrap(); - let stdout_reader = BufReader::new(stdout); - stdout_reader.lines() - }; - - // Skip over a log message - stdout.next(); - - // Answer prompts with yes - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Is this the correct namespace name: quit-coding-to-vape? (y/n)" - ); - stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Is this the correct namespace nonce: 2? (y/n)" - ); - stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "Does this AWS IAM role belong to the intended organization: arn:aws:iam::123456789012:role/Webserver? (y/n)" - ); - stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); - - assert_eq!( - &stdout.next().unwrap().unwrap(), - "The following manifest set members approved:" - ); - stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); - - // Check that it finished successfully - assert!(child.wait().unwrap().success()); - - // Post the encrypted share - assert!(Command::new("../target/debug/qos_client") - .args([ - "post-share", - "--host-port", - &host_port.to_string(), - "--host-ip", - LOCAL_HOST, - "--eph-wrapped-share-path", - &eph_wrapped_share_path, - "--approval-path", - &approval_path, - ]) - .spawn() - .unwrap() - .wait() - .unwrap() - .success()); - } - - // Give the enclave time to start the pivot - std::thread::sleep(std::time::Duration::from_secs(2)); - - // Check that the pivot executed - let contents = std::fs::read(PIVOT_OK5_SUCCESS_FILE).unwrap(); - assert_eq!(std::str::from_utf8(&contents).unwrap(), msg); - - let enclave_info_url = - format!("http://{LOCAL_HOST}:{}/qos/enclave-info", host_port); - let enclave_info: EnclaveInfo = - ureq::get(&enclave_info_url).call().unwrap().into_json().unwrap(); - assert_eq!(enclave_info.phase, ProtocolPhase::QuorumKeyProvisioned); - - fs::remove_file(PIVOT_OK5_SUCCESS_FILE).unwrap(); -} diff --git a/src/integration/tests/async_client.rs b/src/integration/tests/async_client.rs index 0fdb94be..7a0564c7 100644 --- a/src/integration/tests/async_client.rs +++ b/src/integration/tests/async_client.rs @@ -1,8 +1,8 @@ use qos_core::{ async_client::AsyncClient, + async_server::SocketServerError, async_server::{AsyncRequestProcessor, AsyncSocketServer}, io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, - server::SocketServerError, }; #[derive(Clone)] @@ -17,10 +17,8 @@ impl AsyncRequestProcessor for EchoProcessor { async fn run_echo_server( socket_path: &str, ) -> Result { - let timeout = TimeVal::milliseconds(50); - let pool = - AsyncStreamPool::new(SocketAddress::new_unix(socket_path), timeout, 1) - .expect("unable to create async pool"); + let pool = AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create async pool"); let server = AsyncSocketServer::listen_all(pool, &EchoProcessor)?; Ok(server) @@ -30,12 +28,12 @@ async fn run_echo_server( async fn direct_connect_works() { let socket_path = "/tmp/async_client_test_direct_connect_works.sock"; let socket = SocketAddress::new_unix(socket_path); - let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(socket, timeout, 1) + let timeout = TimeVal::milliseconds(500); + let pool = AsyncStreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool); + let client = AsyncClient::new(pool, timeout); let server = run_echo_server(socket_path).await.unwrap(); @@ -49,11 +47,11 @@ async fn direct_connect_works() { async fn times_out_properly() { let socket_path = "/tmp/async_client_test_times_out_properly.sock"; let socket = SocketAddress::new_unix(socket_path); - let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(socket, timeout, 1) + let timeout = TimeVal::milliseconds(500); + let pool = AsyncStreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool); + let client = AsyncClient::new(pool, timeout); let r = client.call(&[0]).await; assert!(r.is_err()); @@ -63,11 +61,11 @@ async fn times_out_properly() { async fn repeat_connect_works() { let socket_path = "/tmp/async_client_test_repeat_connect_works.sock"; let socket = SocketAddress::new_unix(socket_path); - let timeout = TimeVal::milliseconds(50); - let pool = AsyncStreamPool::new(socket, timeout, 1) + let timeout = TimeVal::milliseconds(500); + let pool = AsyncStreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool); + let client = AsyncClient::new(pool, timeout); // server not running yet, expect a connection error let r = client.call(&[0]).await; diff --git a/src/integration/tests/async_qos_host.rs b/src/integration/tests/async_qos_host.rs index e8434e43..9882b93e 100644 --- a/src/integration/tests/async_qos_host.rs +++ b/src/integration/tests/async_qos_host.rs @@ -39,7 +39,7 @@ async fn connects_and_gets_info() { drop(std::fs::remove_file(&enclave_socket)); let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", TEST_ENCLAVE_SOCKET, diff --git a/src/integration/tests/async_remote_tls.rs b/src/integration/tests/async_remote_tls.rs index 03d55e95..e6eca108 100644 --- a/src/integration/tests/async_remote_tls.rs +++ b/src/integration/tests/async_remote_tls.rs @@ -43,12 +43,14 @@ async fn fetch_async_remote_tls_content() { let enclave_pool = AsyncStreamPool::new( SocketAddress::new_unix(REMOTE_TLS_TEST_ENCLAVE_SOCKET), - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), 1, ) .expect("unable to create enclave async pool"); - let enclave_client = AsyncClient::new(enclave_pool.shared()); + let enclave_client = AsyncClient::new( + enclave_pool.shared(), + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + ); let app_request = borsh::to_vec(&PivotRemoteTlsMsg::RemoteTlsRequest { host: "api.turnkey.com".to_string(), diff --git a/src/integration/tests/boot.rs b/src/integration/tests/boot.rs index 09e272f7..bb32cef2 100644 --- a/src/integration/tests/boot.rs +++ b/src/integration/tests/boot.rs @@ -252,7 +252,7 @@ async fn standard_boot_e2e() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/tests/dev_boot.rs b/src/integration/tests/dev_boot.rs index 18360d53..2aed01b4 100644 --- a/src/integration/tests/dev_boot.rs +++ b/src/integration/tests/dev_boot.rs @@ -19,7 +19,7 @@ async fn dev_boot_e2e() { // Start Enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/tests/enclave_app_client_socket_stress.rs b/src/integration/tests/enclave_app_client_socket_stress.rs index aec6c196..fd5829bc 100644 --- a/src/integration/tests/enclave_app_client_socket_stress.rs +++ b/src/integration/tests/enclave_app_client_socket_stress.rs @@ -1,9 +1,11 @@ use borsh::BorshDeserialize; -use integration::{PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH}; +use integration::{ + wait_for_usock, PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH, +}; use qos_core::{ - client::Client, + async_client::AsyncClient, handles::Handles, - io::{SocketAddress, TimeVal, TimeValLike}, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, protocol::{ msg::ProtocolMsg, services::boot::{ @@ -22,8 +24,8 @@ const TEST_TMP: &str = "/tmp/enclave_app_client_socket_stress"; const ENCLAVE_SOCK: &str = "/tmp/enclave_app_client_socket_stress/enclave.sock"; const APP_SOCK: &str = "/tmp/enclave_app_client_socket_stress/app.sock"; -#[test] -fn enclave_app_client_socket_stress() { +#[tokio::test] +async fn enclave_app_client_socket_stress() { let _: PathWrapper = TEST_TMP.into(); std::fs::create_dir_all(TEST_TMP).unwrap(); @@ -73,12 +75,18 @@ fn enclave_app_client_socket_stress() { handles.put_manifest_envelope(&manifest_envelope).unwrap(); handles.put_quorum_key(&p256_pair).unwrap(); + let enclave_pool = + AsyncStreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix(APP_SOCK), 1).unwrap(); + std::thread::spawn(move || { Reaper::execute( &handles, Box::new(MockNsm), - SocketAddress::new_unix(ENCLAVE_SOCK), - SocketAddress::new_unix(APP_SOCK), + enclave_pool, + app_pool, // Force the phase to quorum key provisioned so message proxy-ing // works Some(ProtocolPhase::QuorumKeyProvisioned), @@ -86,11 +94,13 @@ fn enclave_app_client_socket_stress() { }); // Make sure the pivot has some time to start up - std::thread::sleep(std::time::Duration::from_secs(1)); + wait_for_usock(APP_SOCK).await; - let enclave_client = Client::new( - SocketAddress::new_unix(ENCLAVE_SOCK), - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS + 1), + let enclave_client_pool = + AsyncStreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); + let enclave_client = AsyncClient::new( + enclave_client_pool.shared(), + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS + 3), // needs to be bigger than the slow request below + some time for recovery ); let app_request = @@ -98,8 +108,9 @@ fn enclave_app_client_socket_stress() { let request = borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) .unwrap(); - let raw_response = enclave_client.send(&request).unwrap(); + let raw_response = enclave_client.call(&request).await.unwrap(); let response = ProtocolMsg::try_from_slice(&raw_response).unwrap(); + assert_eq!( response, ProtocolMsg::ProtocolErrorResponse( @@ -107,15 +118,16 @@ fn enclave_app_client_socket_stress() { ) ); - std::thread::sleep(std::time::Duration::from_secs( + tokio::time::sleep(std::time::Duration::from_secs( REAPER_RESTART_DELAY_IN_SECONDS + 1, - )); + )) + .await; // The pivot panicked and should have been restarted. let app_request = borsh::to_vec(&PivotSocketStressMsg::OkRequest).unwrap(); let request = borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) .unwrap(); - let raw_response = enclave_client.send(&request).unwrap(); + let raw_response = enclave_client.call(&request).await.unwrap(); let response = { let msg = ProtocolMsg::try_from_slice(&raw_response).unwrap(); let data = match msg { @@ -128,11 +140,11 @@ fn enclave_app_client_socket_stress() { // Send a request that the app will take too long to respond to let app_request = - borsh::to_vec(&PivotSocketStressMsg::SlowRequest).unwrap(); + borsh::to_vec(&PivotSocketStressMsg::SlowRequest(5500)).unwrap(); let request = borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) .unwrap(); - let raw_response = enclave_client.send(&request).unwrap(); + let raw_response = enclave_client.call(&request).await.unwrap(); let response = ProtocolMsg::try_from_slice(&raw_response).unwrap(); assert_eq!( response, diff --git a/src/integration/tests/genesis.rs b/src/integration/tests/genesis.rs index 96a74203..42fccfee 100644 --- a/src/integration/tests/genesis.rs +++ b/src/integration/tests/genesis.rs @@ -103,7 +103,7 @@ async fn genesis_e2e() { // -- ENCLAVE start enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", &*usock, diff --git a/src/integration/tests/key.rs b/src/integration/tests/key.rs index 3493b7ce..ff1af9be 100644 --- a/src/integration/tests/key.rs +++ b/src/integration/tests/key.rs @@ -71,7 +71,7 @@ async fn key_fwd_e2e() { // -- ENCLAVE start new enclave let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", new_usock, @@ -257,7 +257,7 @@ fn boot_old_enclave(old_host_port: u16) -> (ChildWrapper, ChildWrapper) { // -- ENCLAVE start old enclave let enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", old_usock, diff --git a/src/integration/tests/proofs.rs b/src/integration/tests/proofs.rs index 6ddfe105..67af5a8b 100644 --- a/src/integration/tests/proofs.rs +++ b/src/integration/tests/proofs.rs @@ -1,10 +1,10 @@ use std::{process::Command, str}; use borsh::BorshDeserialize; -use integration::{PivotProofMsg, PIVOT_PROOF_PATH}; +use integration::{wait_for_usock, PivotProofMsg, PIVOT_PROOF_PATH}; use qos_core::{ - client::Client, - io::{SocketAddress, TimeVal, TimeValLike}, + async_client::AsyncClient, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; @@ -13,23 +13,31 @@ use qos_test_primitives::ChildWrapper; const PROOF_TEST_ENCLAVE_SOCKET: &str = "/tmp/proof_test.enclave.sock"; -#[test] -fn fetch_and_verify_app_proof() { +#[tokio::test] +async fn fetch_and_verify_app_proof() { let _enclave_app: ChildWrapper = Command::new(PIVOT_PROOF_PATH) .arg(PROOF_TEST_ENCLAVE_SOCKET) .spawn() .unwrap() .into(); - let enclave_client = Client::new( + wait_for_usock(PROOF_TEST_ENCLAVE_SOCKET).await; + + let enclave_pool = AsyncStreamPool::new( SocketAddress::new_unix(PROOF_TEST_ENCLAVE_SOCKET), + 1, + ) + .unwrap(); + + let enclave_client = AsyncClient::new( + enclave_pool.shared(), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), ); let app_request = borsh::to_vec(&PivotProofMsg::AdditionRequest { a: 2, b: 2 }).unwrap(); - let response = enclave_client.send(&app_request).unwrap(); + let response = enclave_client.call(&app_request).await.unwrap(); match PivotProofMsg::try_from_slice(&response).unwrap() { PivotProofMsg::AdditionResponse { result, proof } => { diff --git a/src/integration/tests/qos_host.rs b/src/integration/tests/qos_host.rs index 0905687f..06f3a431 100644 --- a/src/integration/tests/qos_host.rs +++ b/src/integration/tests/qos_host.rs @@ -25,15 +25,16 @@ fn connects_and_gets_info() { let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); assert!(r.is_err()); // expect 500 here - let secret_path: PathWrapper = "./reaper_works.secret".into(); + let secret_path: PathWrapper = "/tmp/qos_host_reaper_works.secret".into(); // let eph_path = "reaper_works.eph.key"; - let manifest_path: PathWrapper = "reaper_works.manifest".into(); + let manifest_path: PathWrapper = + "/tmp/qos_host_reaper_works.manifest".into(); // For our sanity, ensure the secret does not yet exist drop(std::fs::remove_file(&*secret_path)); let mut _enclave_child_process: ChildWrapper = - Command::new("../target/debug/async_qos_core") + Command::new("../target/debug/qos_core") .args([ "--usock", TEST_ENCLAVE_SOCKET, @@ -52,7 +53,7 @@ fn connects_and_gets_info() { .into(); // Give the enclave server time to bind to the socket - std::thread::sleep(std::time::Duration::from_millis(200)); + std::thread::sleep(std::time::Duration::from_millis(500)); let r = ureq::get("http://127.0.0.1:3323/qos/enclave-info").call(); assert!(r.is_ok()); // expect 200 here diff --git a/src/integration/tests/reaper.rs b/src/integration/tests/reaper.rs index 42025892..cf9759d5 100644 --- a/src/integration/tests/reaper.rs +++ b/src/integration/tests/reaper.rs @@ -3,7 +3,7 @@ use std::fs; use integration::{PIVOT_ABORT_PATH, PIVOT_OK_PATH, PIVOT_PANIC_PATH}; use qos_core::{ handles::Handles, - io::SocketAddress, + io::{AsyncStreamPool, SocketAddress}, protocol::services::boot::ManifestEnvelope, reaper::{Reaper, REAPER_EXIT_DELAY_IN_SECONDS}, }; @@ -12,10 +12,10 @@ use qos_test_primitives::PathWrapper; #[test] fn reaper_works() { - let secret_path: PathWrapper = "./reaper_works.secret".into(); + let secret_path: PathWrapper = "/tmp/reaper_works.secret".into(); // let eph_path = "reaper_works.eph.key"; - let usock: PathWrapper = "./reaper_works/reaper_works.sock".into(); - let manifest_path: PathWrapper = "reaper_works.manifest".into(); + let usock: PathWrapper = "/tmp/reaper_works.sock".into(); + let manifest_path: PathWrapper = "/tmp/reaper_works.manifest".into(); let msg = "durp-a-durp"; // For our sanity, ensure the secret does not yet exist @@ -37,12 +37,19 @@ fn reaper_works() { handles.put_manifest_envelope(&manifest_envelope).unwrap(); assert!(handles.pivot_exists()); + let enclave_pool = + AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) + .unwrap(); + let reaper_handle = std::thread::spawn(move || { Reaper::execute( &handles, Box::new(MockNsm), - SocketAddress::new_unix(&usock), - SocketAddress::new_unix("./never.sock"), + enclave_pool, + app_pool, None, ) }); @@ -68,10 +75,10 @@ fn reaper_works() { #[test] fn reaper_handles_non_zero_exits() { let secret_path: PathWrapper = - "./reaper_handles_non_zero_exits.secret".into(); - let usock: PathWrapper = "./reaper_handles_non_zero_exits.sock".into(); + "/tmp/reaper_handles_non_zero_exits.secret".into(); + let usock: PathWrapper = "/tmp/reaper_handles_non_zero_exits.sock".into(); let manifest_path: PathWrapper = - "./reaper_handles_non_zero_exits.manifest".into(); + "/tmp/reaper_handles_non_zero_exits.manifest".into(); // For our sanity, ensure the secret does not yet exist drop(fs::remove_file(&*secret_path)); @@ -88,12 +95,19 @@ fn reaper_handles_non_zero_exits() { handles.put_manifest_envelope(&Default::default()).unwrap(); assert!(handles.pivot_exists()); + let enclave_pool = + AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) + .unwrap(); + let reaper_handle = std::thread::spawn(move || { Reaper::execute( &handles, Box::new(MockNsm), - SocketAddress::new_unix(&usock), - SocketAddress::new_unix("./never.sock"), + enclave_pool, + app_pool, None, ) }); @@ -120,9 +134,10 @@ fn reaper_handles_non_zero_exits() { #[test] fn reaper_handles_panic() { - let secret_path: PathWrapper = "./reaper_handles_panics.secret".into(); - let usock: PathWrapper = "./reaper_handles_panics.sock".into(); - let manifest_path: PathWrapper = "./reaper_handles_panics.manifest".into(); + let secret_path: PathWrapper = "/tmp/reaper_handles_panics.secret".into(); + let usock: PathWrapper = "/tmp/reaper_handles_panics.sock".into(); + let manifest_path: PathWrapper = + "/tmp/reaper_handles_panics.manifest".into(); // For our sanity, ensure the secret does not yet exist drop(fs::remove_file(&*secret_path)); @@ -139,12 +154,19 @@ fn reaper_handles_panic() { handles.put_manifest_envelope(&Default::default()).unwrap(); assert!(handles.pivot_exists()); + let enclave_pool = + AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) + .unwrap(); + let reaper_handle = std::thread::spawn(move || { Reaper::execute( &handles, Box::new(MockNsm), - SocketAddress::new_unix(&usock), - SocketAddress::new_unix("./never.sock"), + enclave_pool, + app_pool, None, ) }); diff --git a/src/integration/tests/simple_socket_stress.rs b/src/integration/tests/simple_socket_stress.rs index 8d2e6f16..720e0bec 100644 --- a/src/integration/tests/simple_socket_stress.rs +++ b/src/integration/tests/simple_socket_stress.rs @@ -1,55 +1,59 @@ use std::process::Command; -use integration::{PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH}; +use integration::{ + wait_for_usock, PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH, +}; use qos_core::{ - client::{Client, ClientError}, - io::{SocketAddress, TimeVal, TimeValLike}, + async_client::{AsyncClient, ClientError}, + io::{AsyncStreamPool, IOError, SocketAddress, TimeVal, TimeValLike}, protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; use qos_test_primitives::ChildWrapper; const SOCKET_STRESS_SOCK: &str = "/tmp/simple_socket_stress.sock"; -#[test] -fn simple_socket_stress() { +#[tokio::test] +async fn simple_socket_stress() { let _enclave_app: ChildWrapper = Command::new(PIVOT_SOCKET_STRESS_PATH) .arg(SOCKET_STRESS_SOCK) .spawn() .unwrap() .into(); - let enclave_client = Client::new( - SocketAddress::new_unix(SOCKET_STRESS_SOCK), - // The timeout of `PivotSocketStressMsg::SlowResponse` is relative to - // `ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS`. - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - ); + wait_for_usock(SOCKET_STRESS_SOCK).await; + + // needs to be long enough for process exit to register and not cause a timeout + let timeout = TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS); + + let app_pool = + AsyncStreamPool::new(SocketAddress::new_unix(SOCKET_STRESS_SOCK), 1) + .unwrap(); + + let enclave_client = AsyncClient::new(app_pool.shared(), timeout); let app_request = - borsh::to_vec(&PivotSocketStressMsg::SlowRequest).unwrap(); - let err = enclave_client.send(&app_request).unwrap_err(); + borsh::to_vec(&PivotSocketStressMsg::SlowRequest(5500)).unwrap(); + let err = enclave_client.call(&app_request).await.unwrap_err(); match err { ClientError::IOError(qos_core::io::IOError::RecvTimeout) => (), - e => panic!("did not get expected err {:?}", e), + e => panic!("slow pivot did not get expected err {:?}", e), }; let app_request = borsh::to_vec(&PivotSocketStressMsg::PanicRequest).unwrap(); - let err = enclave_client.send(&app_request).unwrap_err(); + let err = enclave_client.call(&app_request).await.unwrap_err(); match err { ClientError::IOError(qos_core::io::IOError::RecvConnectionClosed) => (), - e => panic!("did not get expected err {:?}", e), + e => panic!("panicing pivot did not get expected err {:?}", e), }; - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; // The app has panic'ed and exited - so any proceeding request should fail. let app_request = borsh::to_vec(&PivotSocketStressMsg::OkRequest).unwrap(); - let err = enclave_client.send(&app_request).unwrap_err(); + let err = enclave_client.call(&app_request).await.unwrap_err(); match err { - ClientError::IOError(qos_core::io::IOError::ConnectNixError( - nix::Error::ENOENT, - )) => (), + ClientError::IOError(IOError::StdIoError(_)) => (), // for usock this is probably "no such file or directoy", vsock would differ e => panic!("did not get expected err {:?}", e), }; } diff --git a/src/qos_core/Cargo.toml b/src/qos_core/Cargo.toml index e4c23876..4127df37 100644 --- a/src/qos_core/Cargo.toml +++ b/src/qos_core/Cargo.toml @@ -22,8 +22,8 @@ serde_bytes = { version = "0.11", default-features = false } serde = { version = "1", features = ["derive"], default-features = false } futures = { version = "0.3.30" } -tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false, optional = true} -tokio-vsock = { version = "0.7.1", optional = true } +tokio = { version = "1.38.0", features = ["io-util", "macros", "net", "rt-multi-thread", "time", "signal"], default-features = false } +tokio-vsock = { version = "0.7.1" } [dev-dependencies] qos_test_primitives = { path = "../qos_test_primitives" } @@ -33,13 +33,11 @@ rustls = { version = "0.23.5" } webpki-roots = { version = "0.26.1" } [features] -async = ["tokio", "tokio-vsock"] # Support for VSOCK vm = [] # Never use in production - support for mock NSM mock = ["qos_nsm/mock"] [[bin]] -name = "async_qos_core" -path = "src/bin/async_qos_core.rs" -required-features = ["async"] +name = "qos_core" +path = "src/main.rs" diff --git a/src/qos_core/src/async_client.rs b/src/qos_core/src/async_client.rs index a23e9946..d76889fe 100644 --- a/src/qos_core/src/async_client.rs +++ b/src/qos_core/src/async_client.rs @@ -1,30 +1,105 @@ //! Streaming socket based client to connect with //! [`crate::server::SocketServer`]. -use crate::{client::ClientError, io::SharedAsyncStreamPool}; +use std::time::Duration; +use nix::sys::time::TimeVal; + +use crate::io::{IOError, SharedAsyncStreamPool}; + +/// Enclave client error. +#[derive(Debug)] +pub enum ClientError { + /// [`io::IOError`] wrapper. + IOError(IOError), + /// `borsh::io::Error` wrapper. + BorshError(borsh::io::Error), +} + +impl From for ClientError { + fn from(err: IOError) -> Self { + Self::IOError(err) + } +} + +impl From for ClientError { + fn from(err: borsh::io::Error) -> Self { + Self::BorshError(err) + } +} /// Client for communicating with the enclave `crate::server::SocketServer`. #[derive(Clone, Debug)] pub struct AsyncClient { pool: SharedAsyncStreamPool, + timeout: Duration, } impl AsyncClient { /// Create a new client. #[must_use] - pub fn new(pool: SharedAsyncStreamPool) -> Self { - Self { pool } + pub fn new(pool: SharedAsyncStreamPool, timeout: TimeVal) -> Self { + let timeout = timeval_to_duration(timeout); + Self { pool, timeout } } /// Send raw bytes and wait for a response until the clients configured /// timeout. + /// + /// # Panics + /// Does not. See comment bellow. pub async fn call(&self, request: &[u8]) -> Result, ClientError> { - // TODO: ales - remove later, debug reasons let pool = self.pool.read().await; - let mut stream = pool.get().await; - eprintln!("AsyncClient::call - Stream aquired"); - let resp = stream.call(request).await?; + // hold the stream if we got it before timeout, but errored out on timeout later + let mut maybe_stream = None; + + // timeout should apply to the entire operation + let timeout_result = tokio::time::timeout(self.timeout, async { + maybe_stream = Some(pool.get().await); + + maybe_stream + .as_deref_mut() + .expect("unreachable unwrap") // this can't happen, we just assigned it above + .call(request) + .await + }) + .await; + + let resp = match timeout_result { + Ok(result) => result?, + Err(_err) => { + // ensure we clean up the stream if we had it + if let Some(mut stream) = maybe_stream { + stream.reset(); + } + return Err(IOError::RecvTimeout.into()); + } + }; + Ok(resp) } + + /// Expands the underlaying `AsyncPool` to given `pool_size` + pub async fn expand_to( + &mut self, + pool_size: u32, + ) -> Result<(), ClientError> { + self.pool.write().await.expand_to(pool_size)?; + + Ok(()) + } + + /// Attempt a one-off connection, used for tests + pub async fn try_connect(&self) -> Result<(), IOError> { + let pool = self.pool.read().await; + let mut stream = pool.get().await; + + stream.connect().await + } +} + +fn timeval_to_duration(timeval: TimeVal) -> Duration { + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + Duration::new(timeval.tv_sec() as u64, timeval.tv_usec() as u32 * 1000) } diff --git a/src/qos_core/src/async_server.rs b/src/qos_core/src/async_server.rs index 4ea24e59..81831c93 100644 --- a/src/qos_core/src/async_server.rs +++ b/src/qos_core/src/async_server.rs @@ -3,10 +3,20 @@ use tokio::task::JoinHandle; -use crate::{ - io::{AsyncListener, AsyncStreamPool, IOError}, - server::SocketServerError, -}; +use crate::io::{AsyncListener, AsyncStreamPool, IOError}; + +/// Error variants for [`SocketServer`] +#[derive(Debug)] +pub enum SocketServerError { + /// `io::IOError` wrapper. + IOError(IOError), +} + +impl From for SocketServerError { + fn from(err: IOError) -> Self { + Self::IOError(err) + } +} /// Something that can process requests in an async way. pub trait AsyncRequestProcessor: Send { @@ -100,24 +110,38 @@ where P: AsyncRequestProcessor + Clone, { loop { + eprintln!("AsyncServer: accepting"); let mut stream = listener.accept().await?; loop { match stream.recv().await { Ok(payload) => { let response = processor.process(payload).await; - stream.send(&response).await?; + match stream.send(&response).await { + Ok(()) => {} + Err(err) => { + eprintln!( + "AsyncServer: error sending reply {err:?}, re-accepting" + ); + break; + } + } } - Err(err) => match err { - IOError::StdIoError(err) => { + Err(err) => { + if let IOError::StdIoError(err) = err { + eprintln!("AsyncServer: io error {err:?}"); if err.kind() == std::io::ErrorKind::UnexpectedEof { eprintln!( "AsyncServer: unexpected eof, re-accepting" ); - break; // just re-accept + break; } + } else { + eprintln!( + "AsyncServer: unknown error {err:?}, re-accepting" + ); + break; } - _ => return Err(err.into()), - }, + } } } } diff --git a/src/qos_core/src/bin/async_qos_core.rs b/src/qos_core/src/bin/async_qos_core.rs deleted file mode 100644 index 4fec305b..00000000 --- a/src/qos_core/src/bin/async_qos_core.rs +++ /dev/null @@ -1,5 +0,0 @@ -use qos_core::cli::CLI; - -fn main() { - CLI::async_execute(); -} diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index 8cb94abc..8c96d8fd 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -12,7 +12,6 @@ use crate::{ EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; -#[cfg(feature = "async")] use crate::io::{AsyncStreamPool, IOError}; /// "cid" @@ -51,11 +50,7 @@ impl EnclaveOpts { /// Create a new [`AsyncPool`] of [`AsyncStream`] using the list of [`SocketAddress`] for the enclave server and /// return the new [`AsyncPool`]. Analogous to [`Self::addr`] and [`Self::app_addr`] depending on the [`app`] parameter. - #[cfg(feature = "async")] - #[allow(unused)] fn async_pool(&self, app: bool) -> Result { - use nix::sys::time::{TimeVal, TimeValLike}; - let usock_param = if app { APP_USOCK } else { USOCK }; match ( @@ -71,15 +66,12 @@ impl EnclaveOpts { p.parse().map_err(|_| IOError::ConnectAddressInvalid)?; AsyncStreamPool::new( SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS), - TimeVal::seconds(5), 1, ) } - (None, None, Some(u)) => AsyncStreamPool::new( - SocketAddress::new_unix(u), - TimeVal::seconds(5), - 1, - ), + (None, None, Some(u)) => { + AsyncStreamPool::new(SocketAddress::new_unix(u), 1) + } _ => panic!("Invalid socket opts"), } } @@ -167,37 +159,11 @@ impl EnclaveOpts { /// Enclave server CLI. pub struct CLI; impl CLI { - /// Execute the enclave server CLI with the environment args. - pub fn execute() { - let mut args: Vec = env::args().collect(); - let opts = EnclaveOpts::new(&mut args); - - if opts.parsed.version() { - println!("version: {}", env!("CARGO_PKG_VERSION")); - } else if opts.parsed.help() { - println!("{}", opts.parsed.info()); - } else { - Reaper::execute( - &Handles::new( - opts.ephemeral_file(), - opts.quorum_file(), - opts.manifest_file(), - opts.pivot_file(), - ), - opts.nsm(), - opts.addr(), - opts.app_addr(), - None, - ); - } - } - /// Execute the enclave server CLI with the environment args using tokio/async /// /// # Panics /// If the socket pools cannot be created - #[cfg(feature = "async")] - pub fn async_execute() { + pub async fn execute() { let mut args: Vec = env::args().collect(); let opts = EnclaveOpts::new(&mut args); @@ -206,20 +172,26 @@ impl CLI { } else if opts.parsed.help() { println!("{}", opts.parsed.info()); } else { - Reaper::async_execute( - &Handles::new( - opts.ephemeral_file(), - opts.quorum_file(), - opts.manifest_file(), - opts.pivot_file(), - ), - opts.nsm(), - opts.async_pool(false) - .expect("Unable to create enclave socket pool"), - opts.async_pool(true) - .expect("Unable to create enclave app pool"), - None, - ); + // start reaper in a thread so we can terminate on ctrl+c properly + std::thread::spawn(move || { + Reaper::execute( + &Handles::new( + opts.ephemeral_file(), + opts.quorum_file(), + opts.manifest_file(), + opts.pivot_file(), + ), + opts.nsm(), + opts.async_pool(false) + .expect("Unable to create enclave socket pool"), + opts.async_pool(true) + .expect("Unable to create enclave app pool"), + None, + ); + }); + + eprintln!("qos_core: Reaper running, press ctrl+c to quit"); + let _ = tokio::signal::ctrl_c().await; } } } diff --git a/src/qos_core/src/client.rs b/src/qos_core/src/client.rs deleted file mode 100644 index 855dc265..00000000 --- a/src/qos_core/src/client.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! Streaming socket based client to connect with -//! [`crate::server::SocketServer`]. - -use crate::io::{self, SocketAddress, Stream, TimeVal}; - -/// Enclave client error. -#[derive(Debug)] -pub enum ClientError { - /// [`io::IOError`] wrapper. - IOError(io::IOError), - /// `borsh::io::Error` wrapper. - BorshError(borsh::io::Error), -} - -impl From for ClientError { - fn from(err: io::IOError) -> Self { - Self::IOError(err) - } -} - -impl From for ClientError { - fn from(err: borsh::io::Error) -> Self { - Self::BorshError(err) - } -} - -/// Client for communicating with the enclave [`crate::server::SocketServer`]. -#[derive(Debug, Clone)] -pub struct Client { - addr: SocketAddress, - timeout: TimeVal, -} - -impl Client { - /// Create a new client. - #[must_use] - pub fn new(addr: SocketAddress, timeout: TimeVal) -> Self { - Self { addr, timeout } - } - - /// Send raw bytes and wait for a response until the clients configured - /// timeout. - pub fn send(&self, request: &[u8]) -> Result, ClientError> { - let stream = Stream::connect(&self.addr, self.timeout)?; - stream.send(request)?; - stream.recv().map_err(Into::into) - } -} diff --git a/src/qos_core/src/io/async_pool.rs b/src/qos_core/src/io/async_pool.rs index 0ea5c886..879f489e 100644 --- a/src/qos_core/src/io/async_pool.rs +++ b/src/qos_core/src/io/async_pool.rs @@ -1,6 +1,6 @@ use std::{path::Path, sync::Arc}; -use nix::sys::{socket::UnixAddr, time::TimeVal}; +use nix::sys::socket::UnixAddr; use tokio::sync::{Mutex, MutexGuard, RwLock}; use super::{AsyncListener, AsyncStream, IOError, SocketAddress}; @@ -25,7 +25,6 @@ struct AsyncPool { pub struct AsyncStreamPool { addresses: Vec, // local copy used for `listen` only TODO: refactor listeners out of pool pool: AsyncPool, - timeout: TimeVal, } /// Helper type to wrap `AsyncStreamPool` in `Arc` and `RwLock`. Used to allow multiple processors to run across IO @@ -36,7 +35,6 @@ impl AsyncStreamPool { /// Create a new `AsyncStreamPool` with given starting `SocketAddress`, timout and number of addresses to populate. pub fn new( start_address: SocketAddress, - timeout: TimeVal, mut count: u32, ) -> Result { eprintln!( @@ -56,24 +54,22 @@ impl AsyncStreamPool { addr = addr.next_address()?; } - Ok(Self::with_addresses(addresses, timeout)) + Ok(Self::with_addresses(addresses)) } /// Create a new `AsyncStreamPool` which will contain all the provided addresses but no connections yet. - /// Includes the connect timeout which gets used in case `get` gets called. #[must_use] fn with_addresses( addresses: impl IntoIterator, - timeout: TimeVal, ) -> Self { let addresses: Vec = addresses.into_iter().collect(); let streams: Vec = - addresses.iter().map(|a| AsyncStream::new(a, timeout)).collect(); + addresses.iter().map(AsyncStream::new).collect(); let pool = AsyncPool::from(streams); - Self { addresses, pool, timeout } + Self { addresses, pool } } /// Helper function to get the Arc and Mutex wrapping @@ -123,7 +119,7 @@ impl AsyncStreamPool { for _ in count..size { next = next.next_address()?; - self.pool.push(AsyncStream::new(&next, self.timeout)); + self.pool.push(AsyncStream::new(&next)); self.addresses.push(next.clone()); } } diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs index 4b79e85f..7416319b 100644 --- a/src/qos_core/src/io/async_stream.rs +++ b/src/qos_core/src/io/async_stream.rs @@ -1,11 +1,6 @@ //! Abstractions to handle connection based socket streams. -use std::{ - pin::Pin, - time::{Duration, SystemTime}, -}; - -pub use nix::sys::time::TimeVal; +use std::{io::ErrorKind, pin::Pin}; use tokio::{ io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, @@ -35,58 +30,41 @@ enum InnerStream { pub struct AsyncStream { address: Option, inner: Option, - timeout: Duration, } impl AsyncStream { // accept a new connection, used by server side fn unix_accepted(stream: UnixStream) -> Self { - Self { - address: None, - inner: Some(InnerStream::Unix(stream)), - timeout: Duration::ZERO, - } + Self { address: None, inner: Some(InnerStream::Unix(stream)) } } // accept a new connection, used by server side #[cfg(feature = "vm")] fn vsock_accepted(stream: VsockStream) -> Self { - Self { - address: None, - inner: Some(InnerStream::Vsock(stream)), - timeout: Duration::ZERO, - } + Self { address: None, inner: Some(InnerStream::Vsock(stream)) } } /// Create a new `AsyncStream` with known `SocketAddress` and `TimeVal`. The stream starts disconnected /// and will connect on the first `call`. #[must_use] - pub fn new(address: &SocketAddress, timeout: TimeVal) -> Self { - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - let timeout = Duration::new( - timeout.tv_sec() as u64, - timeout.tv_usec() as u32 * 1000, - ); - - Self { address: Some(address.clone()), inner: None, timeout } + pub fn new(address: &SocketAddress) -> Self { + Self { address: Some(address.clone()), inner: None } } /// Create a new `Stream` from a `SocketAddress` and a timeout and connect using async /// Sets `inner` to the new stream. pub async fn connect(&mut self) -> Result<(), IOError> { - let timeout = self.timeout; let addr = self.address()?.clone(); match self.address()? { SocketAddress::Unix(_uaddr) => { - let inner = retry_unix_connect(addr, timeout).await?; + let inner = unix_connect(addr).await?; self.inner = Some(InnerStream::Unix(inner)); } #[cfg(feature = "vm")] SocketAddress::Vsock(_vaddr) => { - let inner = retry_vsock_connect(addr, timeout).await?; + let inner = vsock_connect(addr).await?; self.inner = Some(InnerStream::Vsock(inner)); } @@ -97,16 +75,15 @@ impl AsyncStream { /// Reconnects this `AsyncStream` by calling `connect` again on the underlaying socket pub async fn reconnect(&mut self) -> Result<(), IOError> { - let timeout = self.timeout; let addr = self.address()?.clone(); match &mut self.inner_mut()? { InnerStream::Unix(ref mut s) => { - *s = retry_unix_connect(addr, timeout).await?; + *s = unix_connect(addr).await?; } #[cfg(feature = "vm")] InnerStream::Vsock(ref mut s) => { - *s = retry_vsock_connect(addr, timeout).await?; + *s = vsock_connect(addr).await?; } } Ok(()) @@ -136,8 +113,20 @@ impl AsyncStream { if self.inner.is_none() { self.connect().await?; } - self.send(req_buf).await?; - self.recv().await + + let send_result = self.send(req_buf).await; + if send_result.is_err() { + self.reset(); + send_result?; + } + + let result = self.recv().await; + eprintln!("AsyncStream: received"); + if result.is_err() { + self.reset(); + } + + result } fn address(&self) -> Result<&SocketAddress, IOError> { @@ -147,6 +136,11 @@ impl AsyncStream { fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { self.inner.as_mut().ok_or(IOError::DisconnectedStream) } + + /// Resets the inner stream, forcing a re-connect next `call` + pub fn reset(&mut self) { + self.inner = None; + } } async fn send( @@ -156,6 +150,8 @@ async fn send( let len = buf.len(); // First, send the length of the buffer let len_buf: [u8; size_of::()] = (len as u64).to_le_bytes(); + + // send the header stream.write_all(&len_buf).await?; // Send the actual contents of the buffer stream.write_all(buf).await?; @@ -168,7 +164,14 @@ async fn recv( ) -> Result, IOError> { let length: usize = { let mut buf = [0u8; size_of::()]; - stream.read_exact(&mut buf).await?; + + let r = stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, + _ => IOError::StdIoError(e), + }); + + r?; + u64::from_le_bytes(buf) .try_into() // Should only be possible if we are on 32bit architecture @@ -177,7 +180,10 @@ async fn recv( // Read the buffer let mut buf = vec![0; length]; - stream.read_exact(&mut buf).await?; + stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, + _ => IOError::StdIoError(e), + })?; Ok(buf) } @@ -259,6 +265,10 @@ impl AsyncListener { SocketAddress::Unix(uaddr) => { let path = uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; + if path.exists() { + // attempt cleanup, this mostly happens from tests/panics + std::fs::remove_file(path)?; + } let inner = InnerListener::Unix(UnixListener::bind(path)?); Self { inner } } @@ -309,79 +319,24 @@ impl Drop for AsyncListener { } } -// raw unix socket connect retry with timeout, 50ms period -async fn retry_unix_connect( +async fn unix_connect( addr: SocketAddress, - timeout: Duration, ) -> Result { - let sleep_time = Duration::from_millis(50); - let eot = SystemTime::now() + timeout; let addr = addr.usock(); let path = addr.path().ok_or(IOError::ConnectAddressInvalid)?; - loop { - let socket = UnixSocket::new_stream()?; - - eprintln!("Attempting USOCK connect to: {:?}", addr.path()); - let tr = tokio::time::timeout(timeout, socket.connect(path)).await; - match tr { - Ok(r) => match r { - Ok(stream) => { - eprintln!("Connected to USOCK at: {:?}", addr.path()); - return Ok(stream); - } - Err(err) => { - eprintln!("Error connecting to USOCK: {err}"); - if SystemTime::now() > eot { - return Err(err); - } - tokio::time::sleep(sleep_time).await; - } - }, - Err(err) => { - eprintln!( - "Connecting to USOCK failed with timeout error: {err}" - ); - return Err(err.into()); - } - } - } + let socket = UnixSocket::new_stream()?; + eprintln!("Attempting USOCK connect to: {:?}", addr.path()); + socket.connect(path).await } -// raw vsock socket connect retry with timeout, 50ms period +// raw vsock socket connect #[cfg(feature = "vm")] -async fn retry_vsock_connect( +async fn vsock_connect( addr: SocketAddress, - timeout: Duration, ) -> Result { - let sleep_time = Duration::from_millis(50); - let eot = SystemTime::now() + timeout; let addr = addr.vsock(); - loop { - eprintln!("Attempting VSOCK connect to: {:?}", addr); - let tr = - tokio::time::timeout(timeout, VsockStream::connect(*addr)).await; - match tr { - Ok(r) => match r { - Ok(stream) => { - eprintln!("Connected to VSOCK at: {:?}", addr); - return Ok(stream); - } - Err(err) => { - eprintln!("Error connecting to VSOCK: {}", err); - if SystemTime::now() > eot { - return Err(err); - } - tokio::time::sleep(sleep_time).await; - } - }, - Err(err) => { - eprintln!( - "Connecting to VSOCK failed with timeout error: {err}" - ); - return Err(err.into()); - } - } - } + eprintln!("Attempting VSOCK connect to: {:?}", addr); + VsockStream::connect(*addr).await } diff --git a/src/qos_core/src/io/mod.rs b/src/qos_core/src/io/mod.rs index 4f2eb212..17ef2d29 100644 --- a/src/qos_core/src/io/mod.rs +++ b/src/qos_core/src/io/mod.rs @@ -3,20 +3,15 @@ //! NOTE TO MAINTAINERS: Interaction with any sys calls should be contained //! within this module. -#[cfg(feature = "async")] mod async_pool; -#[cfg(feature = "async")] mod async_stream; -#[cfg(feature = "async")] pub use async_pool::*; -#[cfg(feature = "async")] pub use async_stream::*; mod stream; -pub use stream::{ - Listener, SocketAddress, Stream, TimeVal, TimeValLike, MAX_PAYLOAD_SIZE, - VMADDR_FLAG_TO_HOST, VMADDR_NO_FLAGS, -}; +pub use stream::{SocketAddress, VMADDR_FLAG_TO_HOST, VMADDR_NO_FLAGS}; + +pub use nix::sys::time::{TimeVal, TimeValLike}; /// QOS I/O error #[derive(Debug)] @@ -49,7 +44,6 @@ pub enum IOError { RecvNixError(nix::Error), /// Reading the response size resulted in a size which exceeds the max payload size. OversizedPayload(usize), - #[cfg(feature = "async")] /// A async socket pool error during pool operations. PoolError(PoolError), } @@ -66,14 +60,6 @@ impl From for IOError { } } -#[cfg(feature = "async")] -impl From for IOError { - fn from(_: tokio::time::error::Elapsed) -> Self { - Self::ConnectTimeout - } -} - -#[cfg(feature = "async")] impl From for IOError { fn from(value: PoolError) -> Self { Self::PoolError(value) diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index aa3b7bc9..156f9d68 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -1,31 +1,10 @@ //! Abstractions to handle connection based socket streams. -use std::{ - io::{ErrorKind, Read, Write}, - mem::size_of, - os::fd::{AsFd, AsRawFd, FromRawFd, OwnedFd}, -}; - #[cfg(feature = "vm")] use nix::sys::socket::VsockAddr; -use nix::sys::socket::{ - accept, bind, connect, listen, recv, send, socket, sockopt, AddressFamily, - Backlog, MsgFlags, SetSockOpt, SockFlag, SockType, SockaddrLike, UnixAddr, -}; -pub use nix::sys::time::{TimeVal, TimeValLike}; - -use super::IOError; +use nix::sys::socket::{AddressFamily, SockaddrLike, UnixAddr}; // 25(retries) x 10(milliseconds) = 1/4 a second of retrying -const MAX_RETRY: usize = 25; -const BACKOFF_MILLISECONDS: u64 = 10; -const BACKLOG: i32 = 127; // due to bug in nix::Backlog check, 128 is disallowed, fixed in https://github.com/nix-rust/nix/commit/a0869f993c0e7639b13b9bb11cb74d54a8018fbd - -const MEGABYTE: usize = 1024 * 1024; - -/// Maximum payload size for a single recv / send call. We're being generous with 128MB. -/// The goal here is to avoid server crashes if the payload size exceeds the available system memory. -pub const MAX_PAYLOAD_SIZE: usize = 128 * MEGABYTE; /// Socket address. #[derive(Clone, Debug, PartialEq, Eq)] @@ -151,7 +130,7 @@ impl SocketAddress { } /// Extract svm_flags field value from existing VSOCK. -#[cfg(all(feature = "vm", feature = "async"))] +#[cfg(feature = "vm")] #[allow(unsafe_code)] pub fn vsock_svm_flags(vsock: VsockAddr) -> u8 { unsafe { @@ -172,469 +151,3 @@ struct SockAddrVm { svm_flags: u8, svm_zero: [u8; 3], } - -/// Handle on a stream -pub struct Stream { - fd: OwnedFd, -} - -impl Stream { - /// Create a new `Stream` from a `SocketAddress` and a timeout - pub fn connect( - addr: &SocketAddress, - timeout: TimeVal, - ) -> Result { - let mut err = IOError::UnknownError; - - for _ in 0..MAX_RETRY { - let fd = socket_fd(addr)?; - - // set `SO_RCVTIMEO` - let receive_timeout = sockopt::ReceiveTimeout; - receive_timeout.set(&fd.as_fd(), &timeout)?; - - let send_timeout = sockopt::SendTimeout; - send_timeout.set(&fd.as_fd(), &timeout)?; - - let stream = Self { fd }; - match connect(stream.fd.as_raw_fd(), &*addr.addr()) { - Ok(()) => return Ok(stream), - Err(e) => err = IOError::ConnectNixError(e), - } - - std::thread::sleep(std::time::Duration::from_millis( - BACKOFF_MILLISECONDS, - )); - } - - Err(err) - } - - /// Sends a buffer over the underlying socket - pub fn send(&self, buf: &[u8]) -> Result<(), IOError> { - let len = buf.len(); - // First, send the length of the buffer - { - let len_buf: [u8; size_of::()] = (len as u64).to_le_bytes(); - - // First, send the length of the buffer - let mut sent_bytes = 0; - while sent_bytes < len_buf.len() { - sent_bytes += match send( - self.fd.as_raw_fd(), - &len_buf[sent_bytes..len_buf.len()], - MsgFlags::empty(), - ) { - Ok(size) => size, - Err(err) => return Err(IOError::SendNixError(err)), - }; - } - } - - // Then, send the contents of the buffer - { - let mut sent_bytes = 0; - while sent_bytes < len { - sent_bytes += match send( - self.fd.as_raw_fd(), - &buf[sent_bytes..len], - MsgFlags::empty(), - ) { - Ok(size) => size, - Err(err) => return Err(IOError::SendNixError(err)), - } - } - } - - Ok(()) - } - - /// Receive from the underlying socket - pub fn recv(&self) -> Result, IOError> { - let length: usize = { - { - let mut buf = [0u8; size_of::()]; - let len = buf.len(); - std::debug_assert!(buf.len() == 8); - - let mut received_bytes = 0; - while received_bytes < len { - received_bytes += match recv( - self.fd.as_raw_fd(), - &mut buf[received_bytes..len], - MsgFlags::empty(), - ) { - Ok(0) => { - return Err(IOError::RecvConnectionClosed); - } - Ok(size) => size, - Err(nix::Error::EINTR) => { - return Err(IOError::RecvInterrupted); - } - Err(nix::Error::EAGAIN) => { - return Err(IOError::RecvTimeout); - } - Err(err) => { - return Err(IOError::RecvNixError(err)); - } - }; - } - - u64::from_le_bytes(buf) - .try_into() - // Should only be possible if we are on 32bit architecture - .map_err(|_| IOError::ArithmeticSaturation)? - } - }; - - if length > MAX_PAYLOAD_SIZE { - return Err(IOError::OversizedPayload(length)); - } - - // Read the buffer - let mut buf = vec![0; length]; - { - let mut received_bytes = 0; - while received_bytes < length { - received_bytes += match recv( - self.fd.as_raw_fd(), - &mut buf[received_bytes..length], - MsgFlags::empty(), - ) { - Ok(0) => { - return Err(IOError::RecvConnectionClosed); - } - Ok(size) => size, - Err(nix::Error::EINTR) => { - return Err(IOError::RecvInterrupted); - } - Err(nix::Error::EAGAIN) => { - return Err(IOError::RecvTimeout); - } - Err(err) => { - return Err(IOError::NixError(err)); - } - }; - } - } - Ok(buf) - } -} - -impl Read for Stream { - fn read(&mut self, buf: &mut [u8]) -> Result { - match recv(self.fd.as_raw_fd(), buf, MsgFlags::empty()) { - Ok(0) => Err(std::io::Error::new( - ErrorKind::ConnectionAborted, - "read 0 bytes", - )), - Ok(size) => Ok(size), - Err(err) => Err(std::io::Error::from_raw_os_error(err as i32)), - } - } -} - -impl Write for Stream { - fn write(&mut self, buf: &[u8]) -> Result { - match send(self.fd.as_raw_fd(), buf, MsgFlags::empty()) { - Ok(0) => Err(std::io::Error::new( - ErrorKind::ConnectionAborted, - "wrote 0 bytes", - )), - Ok(size) => Ok(size), - Err(err) => Err(std::io::Error::from_raw_os_error(err as i32)), - } - } - - // No-op because we can't flush a socket. - fn flush(&mut self) -> Result<(), std::io::Error> { - Ok(()) - } -} - -/// Abstraction to listen for incoming stream connections. -pub struct Listener { - fd: OwnedFd, - addr: SocketAddress, -} - -impl Listener { - /// Bind and listen on the given address. - pub(crate) fn listen(addr: SocketAddress) -> Result { - // In case the last connection at this addr did not shutdown correctly - Self::clean(&addr); - - let fd = socket_fd(&addr)?; - bind(fd.as_raw_fd(), &*addr.addr())?; - - listen(&fd.as_fd(), Backlog::new(BACKLOG)?)?; - - Ok(Self { fd, addr }) - } - - #[allow(unsafe_code)] - fn accept(&self) -> Result { - let fd = accept(self.fd.as_raw_fd())?; - - Ok(Stream { fd: unsafe { OwnedFd::from_raw_fd(fd) } }) - } - - /// Remove Unix socket if it exists - fn clean(addr: &SocketAddress) { - // Not irrefutable when "vm" is enabled - #[allow(irrefutable_let_patterns)] - if let SocketAddress::Unix(addr) = addr { - if let Some(path) = addr.path() { - if path.exists() { - drop(std::fs::remove_file(path)); - } - } - } - } -} - -impl Iterator for Listener { - type Item = Stream; - fn next(&mut self) -> Option { - self.accept().ok() - } -} - -impl Drop for Listener { - fn drop(&mut self) { - // OwnedFd::Drop will close the socket, we just need to clear the file - Self::clean(&self.addr); - } -} - -fn socket_fd(addr: &SocketAddress) -> Result { - socket( - addr.family(), - // Type - sequenced, two way byte stream. (full duplexed). - // Stream must be in a connected state before send/receive. - SockType::Stream, - // Flags - SockFlag::empty(), - // Protocol - no protocol needs to be specified as SOCK_STREAM - // is both a type and protocol. - None, - ) - .map_err(IOError::NixError) -} - -#[cfg(test)] -mod test { - - use std::{ - os::unix::net::UnixListener, path::Path, str::from_utf8, thread, - }; - - use super::*; - - fn timeval() -> TimeVal { - TimeVal::seconds(1) - } - - // A simple test socket server which says "PONG" when you send "PING". - // Then it kills itself. - pub struct HarakiriPongServer { - path: String, - listener: Option, - } - - impl HarakiriPongServer { - pub fn new(path: String) -> Self { - Self { path, listener: None } - } - pub fn start(&mut self) { - let listener = UnixListener::bind(&self.path).unwrap(); - - let (mut stream, _peer_addr) = listener.accept().unwrap(); - self.listener = Some(listener); - - // Read 4 bytes ("PING") - let mut buf = [0u8; 4]; - stream.read_exact(&mut buf).unwrap(); - - // Send "PONG" if "PING" was sent - if from_utf8(&buf).unwrap() == "PING" { - let _ = stream.write(b"PONG").unwrap(); - } - } - } - - impl Drop for HarakiriPongServer { - fn drop(&mut self) { - if let Some(_listener) = &self.listener { - let server_socket = Path::new(&self.path); - if server_socket.exists() { - drop(std::fs::remove_file(server_socket)); - } - println!("HarakiriPongServer dropped successfully.") - } else { - println!( - "HarakiriPongServer dropped without a fd set. All done." - ) - } - } - } - - #[test] - fn stream_integration_test() { - // Ensure concurrent tests do not listen at the same path - let unix_addr = - nix::sys::socket::UnixAddr::new("./stream_integration_test.sock") - .unwrap(); - let addr: SocketAddress = SocketAddress::Unix(unix_addr); - let listener: Listener = Listener::listen(addr.clone()).unwrap(); - let client = Stream::connect(&addr, timeval()).unwrap(); - let server = listener.accept().unwrap(); - - let data = vec![1, 2, 3, 4, 5, 6, 6, 6]; - client.send(&data).unwrap(); - - let resp = server.recv().unwrap(); - - assert_eq!(data, resp); - } - - #[test] - fn stream_implements_read_write_traits() { - let socket_server_path = "./stream_implements_read_write_traits.sock"; - - // Start a simple socket server which replies "PONG" to any incoming - // request - let mut server = - HarakiriPongServer::new(socket_server_path.to_string()); - - // Start the server in its own thread - thread::spawn(move || { - server.start(); - }); - - // Now create a stream connecting to this mini-server - let unix_addr = - nix::sys::socket::UnixAddr::new(socket_server_path).unwrap(); - let addr = SocketAddress::Unix(unix_addr); - let mut pong_stream = Stream::connect(&addr, timeval()).unwrap(); - - // Write "PING" - let written = pong_stream.write(b"PING").unwrap(); - assert_eq!(written, 4); - - // Read, and expect "PONG" - let mut resp = [0u8; 4]; - let res = pong_stream.read(&mut resp).unwrap(); - assert_eq!(res, 4); - assert_eq!(from_utf8(&resp).unwrap(), "PONG"); - } - - #[test] - fn listener_iterator_test() { - // Ensure concurrent tests are not attempting to listen at the same - // address - let unix_addr = - nix::sys::socket::UnixAddr::new("./listener_iterator_test.sock") - .unwrap(); - let addr = SocketAddress::Unix(unix_addr); - - let mut listener = Listener::listen(addr.clone()).unwrap(); - - let handler = std::thread::spawn(move || { - if let Some(stream) = listener.next() { - let req = stream.recv().unwrap(); - stream.send(&req).unwrap(); - } - }); - - let client = Stream::connect(&addr, timeval()).unwrap(); - - let data = vec![1, 2, 3, 4, 5, 6, 6, 6]; - client.send(&data).unwrap(); - let resp = client.recv().unwrap(); - assert_eq!(data, resp); - - handler.join().unwrap(); - } - - #[test] - fn limit_sized_payload() { - // Ensure concurrent tests are not attempting to listen on the same socket - let unix_addr = - nix::sys::socket::UnixAddr::new("./limit_sized_payload.sock") - .unwrap(); - let addr = SocketAddress::Unix(unix_addr); - - let mut listener = Listener::listen(addr.clone()).unwrap(); - let handler = std::thread::spawn(move || { - if let Some(stream) = listener.next() { - let req = stream.recv().unwrap(); - stream.send(&req.clone()).unwrap(); - } - }); - - // Sending a request that is strictly less than the max size should work - // (the response will be exactly max size) - let client = Stream::connect(&addr, timeval()).unwrap(); - let req = vec![1u8; MAX_PAYLOAD_SIZE]; - client.send(&req).unwrap(); - let resp = client.recv().unwrap(); - assert_eq!(resp.len(), MAX_PAYLOAD_SIZE); - handler.join().unwrap(); - } - - #[test] - fn oversized_payload() { - // Ensure concurrent tests are not attempting to listen on the same socket - let unix_addr = - nix::sys::socket::UnixAddr::new("./oversized_payload.sock") - .unwrap(); - let addr = SocketAddress::Unix(unix_addr); - let mut listener = Listener::listen(addr.clone()).unwrap(); - - // Sneaky handler: adds one byte to the req, and returns this as a response - let _handler = std::thread::spawn(move || { - if let Some(stream) = listener.next() { - let req = stream.recv().unwrap(); - stream.send(&[req.clone(), vec![1u8]].concat()).unwrap(); - } - }); - - let client = Stream::connect(&addr, timeval()).unwrap(); - - // Sending with the limit payload size will fail to receive: our sneaky handler - // will add one byte and cause the response to be oversized. - let req = vec![1u8; MAX_PAYLOAD_SIZE]; - client.send(&req).unwrap(); - - match client.recv().unwrap_err() { - IOError::OversizedPayload(size) => { - assert_eq!(size, MAX_PAYLOAD_SIZE + 1); - } - other => { - panic!("test failed: unexpected error variant ({:?})", other); - } - } - - // N.B: we do not call _handler.join().unwrap() here, because the handler is blocking (indefinitely) on "send" - // Once the test exits, Rust/OS checks will pick up the slack and clean up this thread when this test exits. - } - - #[cfg(feature = "vm")] - #[test] - fn vsock_svm_flags_are_not_lost() { - let vsock = SocketAddress::new_vsock_raw(1, 1, VMADDR_FLAG_TO_HOST); - - assert_eq!(vsock_svm_flags(vsock), VMADDR_FLAG_TO_HOST); - - let first = SocketAddress::new_vsock(3, 3, VMADDR_FLAG_TO_HOST); - let second = first.next_address().unwrap(); - - match second { - SocketAddress::Vsock(second_vsock) => { - assert_eq!(vsock_svm_flags(second_vsock), VMADDR_FLAG_TO_HOST) - } - _ => panic!("not a vsock??"), - } - } -} diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index a1d1390f..46d60b6c 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -17,19 +17,15 @@ compile_error!( "feature \"vm\" and feature \"mock\" cannot be enabled at the same time" ); -#[cfg(feature = "async")] pub mod async_client; -#[cfg(feature = "async")] pub mod async_server; pub mod cli; -pub mod client; pub mod handles; pub mod io; pub mod parser; pub mod protocol; pub mod reaper; -pub mod server; /// Path to Quorum Key secret. #[cfg(not(feature = "vm"))] diff --git a/src/qos_core/src/main.rs b/src/qos_core/src/main.rs index 1e3d00c6..b5e30257 100644 --- a/src/qos_core/src/main.rs +++ b/src/qos_core/src/main.rs @@ -1,5 +1,6 @@ use qos_core::cli::CLI; -pub fn main() { - CLI::execute(); +#[tokio::main] +async fn main() { + CLI::execute().await; } diff --git a/src/qos_core/src/protocol/async_processor.rs b/src/qos_core/src/protocol/async_processor.rs index 36a90d19..b44f5dda 100644 --- a/src/qos_core/src/protocol/async_processor.rs +++ b/src/qos_core/src/protocol/async_processor.rs @@ -1,15 +1,18 @@ //! Quorum protocol processor use std::sync::Arc; +use crate::io::{TimeVal, TimeValLike}; use borsh::BorshDeserialize; use tokio::sync::Mutex; use super::{ - error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, ProtocolPhase, + error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, + ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; use crate::{ + async_client::{AsyncClient, ClientError}, async_server::AsyncRequestProcessor, - io::{IOError, SharedAsyncStreamPool}, + io::SharedAsyncStreamPool, }; const MEGABYTE: usize = 1024 * 1024; @@ -28,7 +31,7 @@ impl ProtocolState { /// Enclave state machine that executes when given a `ProtocolMsg`. #[derive(Clone)] pub struct AsyncProcessor { - app_pool: SharedAsyncStreamPool, + app_client: AsyncClient, state: SharedProtocolState, } @@ -39,7 +42,11 @@ impl AsyncProcessor { state: SharedProtocolState, app_pool: SharedAsyncStreamPool, ) -> Self { - Self { app_pool, state } + let app_client = AsyncClient::new( + app_pool, + TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), + ); + Self { app_client, state } } /// Helper to get phase between locking the shared state @@ -48,8 +55,11 @@ impl AsyncProcessor { } /// Expands the app pool to given pool size - pub async fn expand_to(&mut self, pool_size: u32) -> Result<(), IOError> { - self.app_pool.write().await.expand_to(pool_size) + pub async fn expand_to( + &mut self, + pool_size: u32, + ) -> Result<(), ClientError> { + self.app_client.expand_to(pool_size).await } } @@ -80,17 +90,11 @@ impl AsyncRequestProcessor for AsyncProcessor { } let result = self - .app_pool - .read() - .await - .get() - .await + .app_client .call(&data) .await .map(|data| ProtocolMsg::ProxyResponse { data }) - .map_err(|_e| { - ProtocolMsg::ProtocolErrorResponse(ProtocolError::IOError) - }); + .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); match result { Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) diff --git a/src/qos_core/src/protocol/error.rs b/src/qos_core/src/protocol/error.rs index 84f79d4d..e6de4549 100644 --- a/src/qos_core/src/protocol/error.rs +++ b/src/qos_core/src/protocol/error.rs @@ -3,7 +3,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use qos_p256::P256Error; use crate::{ - client::{self, ClientError}, + async_client::ClientError, io::IOError, protocol::{services::boot, ProtocolPhase}, }; @@ -157,8 +157,8 @@ impl From for ProtocolError { } } -impl From for ProtocolError { - fn from(err: client::ClientError) -> Self { +impl From for ProtocolError { + fn from(err: ClientError) -> Self { match err { ClientError::IOError(IOError::RecvTimeout) => { ProtocolError::AppClientRecvTimeout diff --git a/src/qos_core/src/protocol/mod.rs b/src/qos_core/src/protocol/mod.rs index 70f2f739..7785fc2e 100644 --- a/src/qos_core/src/protocol/mod.rs +++ b/src/qos_core/src/protocol/mod.rs @@ -5,16 +5,13 @@ use qos_crypto::sha_256; mod error; pub mod msg; -mod processor; pub mod services; mod state; pub use error::ProtocolError; -pub use processor::Processor; pub(crate) use state::ProtocolState; pub use state::{ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS}; -#[cfg(feature = "async")] pub(crate) mod async_processor; /// 256bit hash diff --git a/src/qos_core/src/protocol/processor.rs b/src/qos_core/src/protocol/processor.rs deleted file mode 100644 index f4ea8773..00000000 --- a/src/qos_core/src/protocol/processor.rs +++ /dev/null @@ -1,85 +0,0 @@ -//! Quorum protocol processor -use borsh::BorshDeserialize; -use nix::sys::time::{TimeVal, TimeValLike}; -use qos_nsm::NsmProvider; - -use super::{ - error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, - ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, -}; -use crate::io::MAX_PAYLOAD_SIZE; -use crate::{client::Client, handles::Handles, io::SocketAddress, server}; - -/// Enclave state machine that executes when given a `ProtocolMsg`. -pub struct Processor { - app_client: Client, - state: ProtocolState, -} - -impl Processor { - /// Create a new `Self`. - #[must_use] - pub fn new( - attestor: Box, - handles: Handles, - app_addr: SocketAddress, - test_only_init_phase_override: Option, - ) -> Self { - let app_client = Client::new( - app_addr, - TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), - ); - - Self { - app_client, - state: ProtocolState::new( - attestor, - handles, - test_only_init_phase_override, - ), - } - } -} - -impl server::RequestProcessor for Processor { - fn process(&mut self, req_bytes: Vec) -> Vec { - if req_bytes.len() > MAX_PAYLOAD_SIZE { - return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - ProtocolError::OversizedPayload, - )) - .expect("ProtocolMsg can always be serialized. qed."); - } - - let Ok(msg_req) = ProtocolMsg::try_from_slice(&req_bytes) else { - return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - ProtocolError::ProtocolMsgDeserialization, - )) - .expect("ProtocolMsg can always be serialized. qed."); - }; - - // handle Proxy outside of the state - match msg_req { - ProtocolMsg::ProxyRequest { data } => { - let phase = self.state.get_phase(); - if phase != ProtocolPhase::QuorumKeyProvisioned { - let err = ProtocolError::NoMatchingRoute(phase); - return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( - err, - )) - .expect("ProtocolMsg can always be serialized. qed."); - } - let result = self - .app_client - .send(&data) - .map(|data| ProtocolMsg::ProxyResponse { data }) - .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); - - match result { - Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) - .expect("ProtocolMsg can always be serialized. qed."), - } - } - _ => self.state.handle_msg(&msg_req), - } - } -} diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 1d69018f..27201fb4 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -4,18 +4,23 @@ //! //! The pivot is an executable the enclave runs to initialize the secure //! applications. -use std::process::Command; +use std::{ + process::Command, + sync::{Arc, RwLock}, + time::Duration, +}; use qos_nsm::NsmProvider; use crate::{ + async_server::AsyncSocketServer, handles::Handles, - io::SocketAddress, + io::AsyncStreamPool, protocol::{ + async_processor::AsyncProcessor, services::boot::{PivotConfig, RestartPolicy}, - Processor, ProtocolPhase, + ProtocolPhase, ProtocolState, }, - server::SocketServer, }; /// Delay for restarting the pivot app if the process exits. @@ -28,40 +33,114 @@ pub const REAPER_EXIT_DELAY_IN_SECONDS: u64 = 3; /// and pivot binary. pub struct Reaper; impl Reaper { - /// Run the Reaper. + /// Run the Reaper, with the given shutdown oneshot channel Receiver. If a signal is passed (regardless of value) + /// the Reaper will shut down and clean up the server. It is the responsibility of the caller to send the shutdown + /// signal. /// /// # Panics /// /// - If spawning the pivot errors. /// - If waiting for the pivot errors. + #[allow(dead_code)] + #[allow(clippy::too_many_lines)] pub fn execute( handles: &Handles, nsm: Box, - addr: SocketAddress, - app_addr: SocketAddress, + pool: AsyncStreamPool, + app_pool: AsyncStreamPool, test_only_init_phase_override: Option, ) { let handles2 = handles.clone(); + let inter_state = Arc::new(RwLock::new(InterState::Booting)); + let server_state = inter_state.clone(); + std::thread::spawn(move || { - let processor = Processor::new( - nsm, - handles2, - app_addr, - test_only_init_phase_override, - ); - SocketServer::listen(addr, processor).unwrap(); + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async move { + // run the state processor inside a tokio runtime in this thread + // create the state + let protocol_state = ProtocolState::new( + nsm, + handles2.clone(), + test_only_init_phase_override, + ); + // send a shared version of state and the async pool to each processor + let mut processor = AsyncProcessor::new( + protocol_state.shared(), + app_pool.shared(), + ); + // listen_all will multiplex the processor accross all sockets + let mut server = + AsyncSocketServer::listen_all(pool, &processor) + .expect("unable to get listen task list"); + + loop { + // see if we got interrupted + if *server_state.read().unwrap() == InterState::Quitting + { + server.terminate(); + return; + } + + let (manifest_present, pool_size) = + get_pool_size_from_pivot_args(&handles2); + + if manifest_present { + let pool_size = pool_size.unwrap_or(1); + // expand server to pool_size + 1 (due to qos-host extra socket) + server.listen_to(pool_size + 1, &processor).expect( + "unable to listen_to on the running server", + ); + // expand app connections to pool_size + processor.expand_to(pool_size).await.expect( + "unable to expand_to on the processor app pool", + ); + + *server_state.write().unwrap() = + InterState::PivotReady; + eprintln!("Manifest is present, breaking out of server check loop"); + break; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + } + + eprintln!( + "Reaper server post-expansion, waiting for shutdown" + ); + while *server_state.read().unwrap() != InterState::Quitting + { + tokio::time::sleep(Duration::from_millis(100)).await; + } + + eprintln!("Reaper server shutdown"); + server.terminate(); // ensure we cleanup the sockets + *server_state.write().unwrap() = InterState::Quitting; + }); }); loop { + let server_state = *inter_state.read().unwrap(); + // helper for integration tests and manual runs aka qos_core binary + if server_state == InterState::Quitting { + eprintln!("quit called by ctrl+c"); + std::process::exit(1); + } + if handles.quorum_key_exists() && handles.pivot_exists() && handles.manifest_envelope_exists() + && server_state == InterState::PivotReady { // The state required to pivot exists, so we can break this // holding pattern and start the pivot. break; } + eprintln!("Reaper looping"); std::thread::sleep(std::time::Duration::from_secs(1)); } @@ -99,282 +178,99 @@ impl Reaper { .expect("Failed to spawn") .wait() .expect("Pivot executable never started..."); - println!("Pivot exited with status: {status}"); + println!("Pivot (no restart) exited with status: {status}"); } } std::thread::sleep(std::time::Duration::from_secs( REAPER_EXIT_DELAY_IN_SECONDS, )); + println!("Reaper exiting ... "); } } -#[cfg(feature = "async")] -mod inner { - use std::{ - sync::{Arc, RwLock}, - time::Duration, - }; - - #[allow(clippy::wildcard_imports)] - use super::*; - use crate::{ - async_server::AsyncSocketServer, - io::AsyncStreamPool, - protocol::{async_processor::AsyncProcessor, ProtocolState}, - }; - - // basic helper for x-thread comms in Reaper - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - enum InterState { - // We're booting, no pivot yet - Booting, - // We've booted and pivot is ready - PivotReady, - // We're quitting (ctrl+c for tests and such) - Quitting, - } - - impl Reaper { - /// Run the Reaper using Tokio inside a thread for server processing. - /// - /// # Panics - /// - /// - If spawning the pivot errors. - /// - If waiting for the pivot errors. - #[allow(dead_code)] - #[allow(clippy::too_many_lines)] - pub fn async_execute( - handles: &Handles, - nsm: Box, - pool: AsyncStreamPool, - app_pool: AsyncStreamPool, - test_only_init_phase_override: Option, - ) { - let handles2 = handles.clone(); - let inter_state = Arc::new(RwLock::new(InterState::Booting)); - let server_state = inter_state.clone(); - - std::thread::spawn(move || { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap() - .block_on(async move { - // run the state processor inside a tokio runtime in this thread - // create the state - let protocol_state = ProtocolState::new( - nsm, - handles2.clone(), - test_only_init_phase_override, - ); - // send a shared version of state and the async pool to each processor - let mut processor = AsyncProcessor::new( - protocol_state.shared(), - app_pool.shared(), - ); - // listen_all will multiplex the processor accross all sockets - let mut server = - AsyncSocketServer::listen_all(pool, &processor) - .expect("unable to get listen task list"); - - loop { - let (manifest_present, pool_size) = - get_pool_size_from_pivot_args(&handles2); - let pool_size = pool_size.unwrap_or(1); - // expand server to pool_size + 1 (due to qos-host extra socket) - server.listen_to(pool_size + 1, &processor).expect( - "unable to listen_to on the running server", - ); - // expand app connections to pool_size - processor.expand_to(pool_size).await.expect( - "unable to expand_to on the processor app pool", - ); - - if manifest_present { - *server_state.write().unwrap() = - InterState::PivotReady; - eprintln!("manifest is present, breaking out of server check loop"); - break; - } - - // sleep up to 1s, checking for ctrl+c, if it happens break out - if let Ok(ctrl_res) = tokio::time::timeout( - Duration::from_secs(1), - tokio::signal::ctrl_c(), - ) - .await - { - return ctrl_c_handler( - ctrl_res, - server, - &server_state, - ); - } - } - // wait until ctrl+c - ctrl_c_handler( - tokio::signal::ctrl_c().await, - server, - &server_state, - ); - }); - }); - - loop { - let server_state = *inter_state.read().unwrap(); - // helper for integration tests and manual runs aka qos_core binary - if server_state == InterState::Quitting { - eprintln!("quit called by ctrl+c"); - std::process::exit(1); - } - - if handles.quorum_key_exists() - && handles.pivot_exists() - && handles.manifest_envelope_exists() - && server_state == InterState::PivotReady - { - // The state required to pivot exists, so we can break this - // holding pattern and start the pivot. - break; - } - - eprintln!("Reaper looping"); - std::thread::sleep(std::time::Duration::from_secs(1)); - eprintln!("Reaper done looping"); - } - - println!("Reaper::execute about to spawn pivot"); - - let PivotConfig { args, restart, .. } = handles - .get_manifest_envelope() - .expect("Checked above that the manifest exists.") - .manifest - .pivot; - - let mut pivot = Command::new(handles.pivot_path()); - pivot.args(&args[..]); - match restart { - RestartPolicy::Always => loop { - let status = pivot - .spawn() - .expect("Failed to spawn") - .wait() - .expect("Pivot executable never started..."); - - println!("Pivot exited with status: {status}"); - - // pause to ensure OS has enough time to clean up resources - // before restarting - std::thread::sleep(std::time::Duration::from_secs( - REAPER_RESTART_DELAY_IN_SECONDS, - )); - - println!("Restarting pivot ..."); - }, - RestartPolicy::Never => { - let status = pivot - .spawn() - .expect("Failed to spawn") - .wait() - .expect("Pivot executable never started..."); - println!("Pivot exited with status: {status}"); - } - } - - std::thread::sleep(std::time::Duration::from_secs( - REAPER_EXIT_DELAY_IN_SECONDS, - )); - println!("Reaper exiting ... "); - } - } - - fn ctrl_c_handler( - ctrl_c: std::io::Result<()>, - server: AsyncSocketServer, - server_state: &Arc>, - ) { - match ctrl_c { - Ok(()) => { - server.terminate(); - *server_state.write().unwrap() = InterState::Quitting; - } - Err(err) => panic!("{err}"), - } - } +// basic helper for x-thread comms in Reaper +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum InterState { + // We're booting, no pivot yet + Booting, + // We've booted and pivot is ready + PivotReady, + // We're quitting (ctrl+c for tests and such) + Quitting, +} - // return if we have manifest and get pool_size args if present from it - fn get_pool_size_from_pivot_args(handles: &Handles) -> (bool, Option) { - if let Ok(envelope) = handles.get_manifest_envelope() { - (true, extract_pool_size_arg(&envelope.manifest.pivot.args)) - } else { - (false, None) - } +// return if we have manifest and get pool_size args if present from it +fn get_pool_size_from_pivot_args(handles: &Handles) -> (bool, Option) { + if let Ok(envelope) = handles.get_manifest_envelope() { + (true, extract_pool_size_arg(&envelope.manifest.pivot.args)) + } else { + (false, None) } +} - // find the u32 value of --pool-size argument passed to the pivot if present - fn extract_pool_size_arg(args: &[String]) -> Option { - if let Some((i, _)) = - args.iter().enumerate().find(|(_, a)| *a == "--pool-size") - { - if let Some(pool_size_str) = args.get(i + 1) { - match pool_size_str.parse::() { - Ok(pool_size) => Some(pool_size), - Err(_) => None, - } - } else { - None +// find the u32 value of --pool-size argument passed to the pivot if present +fn extract_pool_size_arg(args: &[String]) -> Option { + if let Some((i, _)) = + args.iter().enumerate().find(|(_, a)| *a == "--pool-size") + { + if let Some(pool_size_str) = args.get(i + 1) { + match pool_size_str.parse::() { + Ok(pool_size) => Some(pool_size), + Err(_) => None, } } else { None } + } else { + None } +} - #[cfg(test)] - mod test { - use super::*; - - #[test] - fn extract_pool_size_arg_works() { - // no arg - assert_eq!( - extract_pool_size_arg(&vec![ - "unrelated".to_owned(), - "--args".to_owned(), - ]), - None - ); - - // should work - assert_eq!( - extract_pool_size_arg(&vec![ - "--pool-size".to_owned(), - "8".to_owned(), - ]), - Some(8) - ); - - // wrong number, expect None - assert_eq!( - extract_pool_size_arg(&vec![ - "--pool-size".to_owned(), - "8a".to_owned(), - ]), - None - ); - - // duplicate arg, use 1st - assert_eq!( - extract_pool_size_arg(&vec![ - "--pool-size".to_owned(), - "8".to_owned(), - "--pool-size".to_owned(), - "9".to_owned(), - ]), - Some(8) - ); - } +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn extract_pool_size_arg_works() { + // no arg + assert_eq!( + extract_pool_size_arg(&vec![ + "unrelated".to_owned(), + "--args".to_owned(), + ]), + None + ); + + // should work + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8".to_owned(), + ]), + Some(8) + ); + + // wrong number, expect None + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8a".to_owned(), + ]), + None + ); + + // duplicate arg, use 1st + assert_eq!( + extract_pool_size_arg(&vec![ + "--pool-size".to_owned(), + "8".to_owned(), + "--pool-size".to_owned(), + "9".to_owned(), + ]), + Some(8) + ); } } diff --git a/src/qos_core/src/server.rs b/src/qos_core/src/server.rs deleted file mode 100644 index be38f9b4..00000000 --- a/src/qos_core/src/server.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Streaming socket based server for use in an enclave. Listens for connections -//! from [`crate::client::Client`]. - -use std::marker::PhantomData; - -use crate::io::{self, Listener, SocketAddress}; - -/// Error variants for [`SocketServer`] -#[derive(Debug)] -pub enum SocketServerError { - /// `io::IOError` wrapper. - IOError(io::IOError), -} - -impl From for SocketServerError { - fn from(err: io::IOError) -> Self { - Self::IOError(err) - } -} - -/// Something that can process requests. -pub trait RequestProcessor { - /// Process an incoming request and return a response. - /// - /// The request and response are raw bytes. Likely this should be encoded - /// data and logic inside of this function should take care of decoding the - /// request and encoding a response. - fn process(&mut self, request: Vec) -> Vec; -} - -/// A bare bones, socket based server. -pub struct SocketServer { - _phantom: PhantomData, -} - -impl SocketServer { - /// Listen and respond to incoming requests with the given `processor`. - pub fn listen( - addr: SocketAddress, - mut processor: R, - ) -> Result<(), SocketServerError> { - println!("`SocketServer` listening on {}", addr.debug_info()); - - let listener = Listener::listen(addr)?; - - for stream in listener { - match stream.recv() { - Ok(payload) => { - let response = processor.process(payload); - let _ = stream.send(&response); - } - Err(err) => eprintln!("Server::listen error: {err:?}"), - } - } - - Ok(()) - } -} diff --git a/src/qos_host/Cargo.toml b/src/qos_host/Cargo.toml index 8ea1ef96..17bd6053 100644 --- a/src/qos_host/Cargo.toml +++ b/src/qos_host/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -qos_core = { path = "../qos_core", default-features = false, features = ["async"] } +qos_core = { path = "../qos_core", default-features = false } qos_hex = { path = "../qos_hex", features = ["serde"], default-features = false } # Third party diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index 4642df21..9bee29c2 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -30,7 +30,7 @@ use axum::{ use borsh::BorshDeserialize; use qos_core::{ async_client::AsyncClient, - io::SharedAsyncStreamPool, + io::{SharedAsyncStreamPool, TimeVal}, protocol::{msg::ProtocolMsg, ProtocolError, ProtocolPhase}, }; @@ -49,6 +49,7 @@ struct AsyncQosHostState { #[allow(clippy::module_name_repetitions)] pub struct AsyncHostServer { enclave_pool: SharedAsyncStreamPool, + timeout: TimeVal, addr: SocketAddr, base_path: Option, } @@ -59,10 +60,11 @@ impl AsyncHostServer { #[must_use] pub fn new( enclave_pool: SharedAsyncStreamPool, + timeout: TimeVal, addr: SocketAddr, base_path: Option, ) -> Self { - Self { enclave_pool, addr, base_path } + Self { enclave_pool, timeout, addr, base_path } } fn path(&self, endpoint: &str) -> String { @@ -81,7 +83,10 @@ impl AsyncHostServer { // pub async fn serve(&self) -> Result<(), String> { pub async fn serve(&self) { let state = Arc::new(AsyncQosHostState { - enclave_client: AsyncClient::new(self.enclave_pool.clone()), + enclave_client: AsyncClient::new( + self.enclave_pool.clone(), + self.timeout, + ), }); let app = Router::new() diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 5cf2d276..bd82d26a 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -8,7 +8,7 @@ use std::{ use qos_core::{ cli::{CID, PORT, USOCK}, - io::{AsyncStreamPool, SocketAddress}, + io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; @@ -110,19 +110,20 @@ impl HostOpts { SocketAddr::new(IpAddr::V4(ip), port) } + pub(crate) fn socket_timeout(&self) -> TimeVal { + let default_timeout = &qos_core::DEFAULT_SOCKET_TIMEOUT.to_owned(); + let timeout_str = + self.parsed.single(SOCKET_TIMEOUT).unwrap_or(default_timeout); + TimeVal::milliseconds( + timeout_str.parse().expect("invalid timeout value"), + ) + } + /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and /// return the new `AsyncPool`. pub(crate) fn enclave_pool( &self, ) -> Result { - use qos_core::io::{TimeVal, TimeValLike}; - - let default_timeout = &qos_core::DEFAULT_SOCKET_TIMEOUT.to_owned(); - let timeout_str = - self.parsed.single(SOCKET_TIMEOUT).unwrap_or(default_timeout); - let timeout = TimeVal::milliseconds( - timeout_str.parse().expect("invalid timeout value"), - ); match ( self.parsed.single(CID), self.parsed.single(PORT), @@ -140,12 +141,12 @@ impl HostOpts { let address = SocketAddress::new_vsock(c, p, self.to_host_flag()); - AsyncStreamPool::new(address, timeout, 1) // qos_host needs only 1 + AsyncStreamPool::new(address, 1) // qos_host needs only 1 } (None, None, Some(u)) => { let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(address, timeout, 1) + AsyncStreamPool::new(address, 1) } _ => panic!("Invalid socket opts"), } @@ -228,6 +229,7 @@ impl CLI { .enclave_pool() .expect("unable to create enclave pool") .shared(), + options.socket_timeout(), options.host_addr(), options.base_path(), ) diff --git a/src/qos_net/Cargo.toml b/src/qos_net/Cargo.toml index eb6f8813..e6201c9e 100644 --- a/src/qos_net/Cargo.toml +++ b/src/qos_net/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -qos_core = { path = "../qos_core", default-features = false, features = ["async"] } +qos_core = { path = "../qos_core", default-features = false } borsh = { version = "1.0", features = [ "std", @@ -35,5 +35,5 @@ webpki-roots = { version = "0.26.1" } [features] default = ["proxy"] # keep this as a default feature ensures we lint by default -proxy = ["hickory-resolver", "rand", "tokio", "qos_core/async", "tokio-rustls"] +proxy = ["hickory-resolver", "rand", "tokio", "tokio-rustls"] vm = ["qos_core/vm"] diff --git a/src/qos_net/src/async_proxy.rs b/src/qos_net/src/async_proxy.rs index ba72fb9d..194bcb29 100644 --- a/src/qos_net/src/async_proxy.rs +++ b/src/qos_net/src/async_proxy.rs @@ -2,9 +2,8 @@ use borsh::BorshDeserialize; use futures::Future; use qos_core::{ - async_server::AsyncSocketServer, + async_server::{AsyncSocketServer, SocketServerError}, io::{AsyncListener, AsyncStream, AsyncStreamPool, IOError}, - server::SocketServerError, }; use crate::{ diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index 996da064..cb8bc6c6 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -38,8 +38,6 @@ impl ProxyOpts { pub(crate) fn async_pool( &self, ) -> Result { - use qos_core::io::{TimeVal, TimeValLike}; - let pool_size: u32 = self .parsed .single(POOL_SIZE) @@ -59,12 +57,12 @@ impl ProxyOpts { let address = SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS); - AsyncStreamPool::new(address, TimeVal::seconds(5), pool_size) + AsyncStreamPool::new(address, pool_size) } (None, None, Some(u)) => { let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(address, TimeVal::seconds(0), pool_size) + AsyncStreamPool::new(address, pool_size) } _ => panic!("Invalid socket opts"), } diff --git a/src/qos_test_primitives/src/lib.rs b/src/qos_test_primitives/src/lib.rs index c8e63fe4..693d1923 100644 --- a/src/qos_test_primitives/src/lib.rs +++ b/src/qos_test_primitives/src/lib.rs @@ -37,7 +37,7 @@ impl Drop for ChildWrapper { } // allow clean exit - std::thread::sleep(Duration::from_millis(10)); + std::thread::sleep(Duration::from_millis(50)); } // Kill the process and explicitly ignore the result From 48fa2dfcd2a844ac4db0aed41d4dcf43550791cd Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 17 Jul 2025 09:22:09 -0700 Subject: [PATCH 18/20] qos_core/qos_net: clean up cli arguments --- src/qos_core/src/cli.rs | 81 +++++++++++++++++++---------------------- src/qos_host/src/cli.rs | 23 ------------ src/qos_net/src/cli.rs | 44 ++++++++++------------ 3 files changed, 57 insertions(+), 91 deletions(-) diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index 8c96d8fd..c18a7937 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -76,38 +76,6 @@ impl EnclaveOpts { } } - /// Get the `SocketAddress` for the enclave server. - /// - /// # Panics - /// - /// Panics if the opts are not valid for exactly one of unix or vsock. - #[allow(unused)] - fn addr(&self) -> SocketAddress { - match ( - self.parsed.single(CID), - self.parsed.single(PORT), - self.parsed.single(USOCK), - ) { - #[cfg(feature = "vm")] - (Some(c), Some(p), None) => SocketAddress::new_vsock( - c.parse::().unwrap(), - p.parse::().unwrap(), - crate::io::VMADDR_NO_FLAGS, - ), - (None, None, Some(u)) => SocketAddress::new_unix(u), - _ => panic!("Invalid socket opts"), - } - } - - #[allow(unused)] - fn app_addr(&self) -> SocketAddress { - SocketAddress::new_unix( - self.parsed - .single(APP_USOCK) - .expect("app-usock has a default value."), - ) - } - /// Get the [`NsmProvider`] fn nsm(&self) -> Box { if self.parsed.flag(MOCK).unwrap_or(false) { @@ -285,6 +253,44 @@ mod test { assert_eq!(*opts.parsed.single(PORT).unwrap(), "3999".to_string()); } + #[test] + fn parse_usock() { + let mut args: Vec<_> = vec![ + "binary", + "--usock", + "/tmp/usock", + "--app-usock", + "/tmp/app_usock", + ] + .into_iter() + .map(String::from) + .collect(); + let opts = EnclaveOpts::new(&mut args); + + assert_eq!( + *opts.parsed.single(USOCK).unwrap(), + "/tmp/usock".to_string() + ); + assert_eq!( + *opts.parsed.single(APP_USOCK).unwrap(), + "/tmp/app_usock".to_string() + ); + } + + #[test] + fn builds_async_pool() { + let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] + .into_iter() + .map(String::from) + .collect(); + let opts = EnclaveOpts::new(&mut args); + + let pool = opts.async_pool(true).unwrap(); + assert_eq!(pool.len(), 1); + let pool = opts.async_pool(false).unwrap(); + assert_eq!(pool.len(), 1); + } + #[test] fn parse_pivot_file_and_quorum_file() { let pivot = "pivot.file"; @@ -315,17 +321,6 @@ mod test { assert_eq!(opts.ephemeral_file(), ephemeral); } - #[test] - fn parse_usock() { - let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] - .into_iter() - .map(String::from) - .collect(); - let opts = EnclaveOpts::new(&mut args); - - assert_eq!(opts.addr(), SocketAddress::new_unix("./test.sock")); - } - #[test] fn parse_manifest_file() { let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index bd82d26a..847479d4 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -152,29 +152,6 @@ impl HostOpts { } } - /// Get the `SocketAddress` for the enclave server. - /// - /// # Panics - /// - /// Panics if the options are not valid for exactly one of unix or vsock. - #[must_use] - pub fn enclave_addr(&self) -> SocketAddress { - match ( - self.parsed.single(CID), - self.parsed.single(PORT), - self.parsed.single(USOCK), - ) { - #[cfg(feature = "vm")] - (Some(c), Some(p), None) => SocketAddress::new_vsock( - c.parse::().unwrap(), - p.parse::().unwrap(), - self.to_host_flag(), - ), - (None, None, Some(u)) => SocketAddress::new_unix(u), - _ => panic!("Invalid socket options"), - } - } - fn ip(&self) -> String { self.parsed.single(HOST_IP).expect("required arg").clone() } diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index cb8bc6c6..141b68fb 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -67,29 +67,6 @@ impl ProxyOpts { _ => panic!("Invalid socket opts"), } } - - /// Get the `SocketAddress` for the proxy server. - /// - /// # Panics - /// - /// Panics if the opts are not valid for exactly one of unix or vsock. - #[allow(unused)] - pub(crate) fn addr(&self) -> SocketAddress { - match ( - self.parsed.single(CID), - self.parsed.single(PORT), - self.parsed.single(USOCK), - ) { - #[cfg(feature = "vm")] - (Some(c), Some(p), None) => SocketAddress::new_vsock( - c.parse::().unwrap(), - p.parse::().unwrap(), - qos_core::io::VMADDR_NO_FLAGS, - ), - (None, None, Some(u)) => SocketAddress::new_unix(u), - _ => panic!("Invalid socket opts"), - } - } } /// Proxy CLI. @@ -177,15 +154,19 @@ mod test { assert_eq!(*opts.parsed.single(CID).unwrap(), "6".to_string()); assert_eq!(*opts.parsed.single(PORT).unwrap(), "3999".to_string()); } + #[test] fn parse_usock() { - let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] + let mut args: Vec<_> = vec!["binary", "--usock", "/tmp/usock"] .into_iter() .map(String::from) .collect(); let opts = ProxyOpts::new(&mut args); - assert_eq!(opts.addr(), SocketAddress::new_unix("./test.sock")); + assert_eq!( + *opts.parsed.single(USOCK).unwrap(), + "/tmp/usock".to_string() + ); } #[test] @@ -201,6 +182,19 @@ mod test { assert_eq!(pool.len(), 7); } + #[test] + fn builds_async_pool() { + let mut args: Vec<_> = + vec!["binary", "--usock", "./test.sock", "--pool-size", "3"] + .into_iter() + .map(String::from) + .collect(); + let opts = ProxyOpts::new(&mut args); + + let pool = opts.async_pool().unwrap(); + assert_eq!(pool.len(), 3); + } + #[test] #[should_panic = "Entered invalid CLI args: MutuallyExclusiveInput(\"cid\", \"usock\")"] fn panic_on_too_many_opts() { From daf871f188bcbac12d3905254cdbe75e1b013134 Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 17 Jul 2025 09:43:56 -0700 Subject: [PATCH 19/20] qos_core: refactor async_* modules into direct versions Removes the unnecessary Async prefixes --- src/init/init.rs | 6 +- .../src/bin/pivot_async_remote_tls.rs | 34 +- src/integration/src/bin/pivot_proof.rs | 13 +- .../src/bin/pivot_socket_stress.rs | 13 +- src/integration/src/lib.rs | 8 +- src/integration/tests/async_client.rs | 28 +- src/integration/tests/async_remote_tls.rs | 8 +- .../tests/enclave_app_client_socket_stress.rs | 12 +- src/integration/tests/proofs.rs | 14 +- src/integration/tests/reaper.rs | 17 +- src/integration/tests/simple_socket_stress.rs | 8 +- src/qos_core/src/cli.rs | 14 +- .../src/{async_client.rs => client.rs} | 10 +- src/qos_core/src/io/async_stream.rs | 342 --------------- src/qos_core/src/io/mod.rs | 157 ++++++- .../src/io/{async_pool.rs => pool.rs} | 53 +-- src/qos_core/src/io/stream.rs | 412 +++++++++++++----- src/qos_core/src/lib.rs | 4 +- src/qos_core/src/protocol/async_processor.rs | 17 +- src/qos_core/src/protocol/error.rs | 4 +- src/qos_core/src/reaper.rs | 13 +- .../src/{async_server.rs => server.rs} | 26 +- src/qos_host/src/async_host.rs | 12 +- src/qos_host/src/cli.rs | 11 +- src/qos_net/src/cli.rs | 17 +- src/qos_net/src/lib.rs | 6 +- src/qos_net/src/{async_proxy.rs => proxy.rs} | 37 +- ...roxy_connection.rs => proxy_connection.rs} | 14 +- ...{async_proxy_stream.rs => proxy_stream.rs} | 30 +- 29 files changed, 653 insertions(+), 687 deletions(-) rename src/qos_core/src/{async_client.rs => client.rs} (92%) delete mode 100644 src/qos_core/src/io/async_stream.rs rename src/qos_core/src/io/{async_pool.rs => pool.rs} (81%) rename src/qos_core/src/{async_server.rs => server.rs} (86%) rename src/qos_net/src/{async_proxy.rs => proxy.rs} (85%) rename src/qos_net/src/{async_proxy_connection.rs => proxy_connection.rs} (91%) rename src/qos_net/src/{async_proxy_stream.rs => proxy_stream.rs} (80%) diff --git a/src/init/init.rs b/src/init/init.rs index 4834ec74..973adb56 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -1,6 +1,6 @@ use qos_core::{ handles::Handles, - io::{AsyncStreamPool, SocketAddress, VMADDR_NO_FLAGS}, + io::{SocketAddress, StreamPool, VMADDR_NO_FLAGS}, reaper::Reaper, EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; @@ -70,13 +70,13 @@ async fn main() { ); let start_port = 3; // used for qos-host only! others follow 4+ for the -host - let core_pool = AsyncStreamPool::new( + let core_pool = StreamPool::new( SocketAddress::new_vsock(cid, start_port, VMADDR_NO_FLAGS), 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) ) .expect("unable to create core pool"); - let app_pool = AsyncStreamPool::new( + let app_pool = StreamPool::new( SocketAddress::new_unix(SEC_APP_SOCK), 1, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) ) diff --git a/src/integration/src/bin/pivot_async_remote_tls.rs b/src/integration/src/bin/pivot_async_remote_tls.rs index f22c1973..b9856cf1 100644 --- a/src/integration/src/bin/pivot_async_remote_tls.rs +++ b/src/integration/src/bin/pivot_async_remote_tls.rs @@ -4,26 +4,26 @@ use std::{io::ErrorKind, sync::Arc}; use borsh::BorshDeserialize; use integration::PivotRemoteTlsMsg; use qos_core::{ - async_server::{AsyncRequestProcessor, AsyncSocketServer}, - io::{AsyncStreamPool, SharedAsyncStreamPool, SocketAddress}, + io::{SharedStreamPool, SocketAddress, StreamPool}, + server::{RequestProcessor, SocketServer}, }; -use qos_net::async_proxy_stream::AsyncProxyStream; +use qos_net::proxy_stream::ProxyStream; use rustls::RootCertStore; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio_rustls::TlsConnector; #[derive(Clone)] struct Processor { - net_pool: SharedAsyncStreamPool, + net_pool: SharedStreamPool, } impl Processor { - fn new(net_pool: SharedAsyncStreamPool) -> Self { + fn new(net_pool: SharedStreamPool) -> Self { Processor { net_pool } } } -impl AsyncRequestProcessor for Processor { +impl RequestProcessor for Processor { async fn process(&self, request: Vec) -> Vec { let msg = PivotRemoteTlsMsg::try_from_slice(&request) .expect("Received invalid message - test is broken!"); @@ -31,7 +31,7 @@ impl AsyncRequestProcessor for Processor { match msg { PivotRemoteTlsMsg::RemoteTlsRequest { host, path } => { let pool = self.net_pool.read().await; - let mut stream = AsyncProxyStream::connect_by_name( + let mut stream = ProxyStream::connect_by_name( pool.get().await, host.clone(), 443, @@ -105,20 +105,16 @@ async fn main() { let socket_path: &String = &args[1]; let proxy_path: &String = &args[2]; - let enclave_pool = - AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) - .expect("unable to create async stream pool"); + let enclave_pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create async stream pool"); - let proxy_pool = - AsyncStreamPool::new(SocketAddress::new_unix(proxy_path), 1) - .expect("unable to create async stream pool") - .shared(); + let proxy_pool = StreamPool::new(SocketAddress::new_unix(proxy_path), 1) + .expect("unable to create async stream pool") + .shared(); - let server = AsyncSocketServer::listen_all( - enclave_pool, - &Processor::new(proxy_pool), - ) - .unwrap(); + let server = + SocketServer::listen_all(enclave_pool, &Processor::new(proxy_pool)) + .unwrap(); match tokio::signal::ctrl_c().await { Ok(_) => { diff --git a/src/integration/src/bin/pivot_proof.rs b/src/integration/src/bin/pivot_proof.rs index 7d6e679f..f9ba0539 100644 --- a/src/integration/src/bin/pivot_proof.rs +++ b/src/integration/src/bin/pivot_proof.rs @@ -3,9 +3,9 @@ use core::panic; use borsh::BorshDeserialize; use integration::{AdditionProof, AdditionProofPayload, PivotProofMsg}; use qos_core::{ - async_server::{AsyncRequestProcessor, AsyncSocketServer}, handles::EphemeralKeyHandle, - io::{AsyncStreamPool, SocketAddress}, + io::{SocketAddress, StreamPool}, + server::{RequestProcessor, SocketServer}, }; #[derive(Clone)] @@ -13,7 +13,7 @@ struct Processor { ephemeral_key_handle: EphemeralKeyHandle, } -impl AsyncRequestProcessor for Processor { +impl RequestProcessor for Processor { async fn process(&self, request: Vec) -> Vec { let msg = PivotProofMsg::try_from_slice(&request) .expect("Received invalid message - test is broken!"); @@ -54,11 +54,10 @@ async fn main() { let args: Vec = std::env::args().collect(); let socket_path: &String = &args[1]; - let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) - .expect("unable to create app pool"); + let app_pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create app pool"); - let server = AsyncSocketServer::listen_all( + let server = SocketServer::listen_all( app_pool, &Processor { ephemeral_key_handle: EphemeralKeyHandle::new( diff --git a/src/integration/src/bin/pivot_socket_stress.rs b/src/integration/src/bin/pivot_socket_stress.rs index c8e4b3e9..0d6a096e 100644 --- a/src/integration/src/bin/pivot_socket_stress.rs +++ b/src/integration/src/bin/pivot_socket_stress.rs @@ -3,14 +3,14 @@ use core::panic; use borsh::BorshDeserialize; use integration::PivotSocketStressMsg; use qos_core::{ - async_server::{AsyncRequestProcessor, AsyncSocketServer}, - io::{AsyncStreamPool, SocketAddress}, + io::{SocketAddress, StreamPool}, + server::{RequestProcessor, SocketServer}, }; #[derive(Clone)] struct Processor; -impl AsyncRequestProcessor for Processor { +impl RequestProcessor for Processor { async fn process(&self, request: Vec) -> Vec { // Simulate just some baseline lag for all requests std::thread::sleep(std::time::Duration::from_secs(1)); @@ -52,11 +52,10 @@ async fn main() { let args: Vec = std::env::args().collect(); let socket_path = &args[1]; - let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) - .expect("unable to create app pool"); + let app_pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) + .expect("unable to create app pool"); - let server = AsyncSocketServer::listen_all(app_pool, &Processor).unwrap(); + let server = SocketServer::listen_all(app_pool, &Processor).unwrap(); tokio::signal::ctrl_c().await.unwrap(); server.terminate(); diff --git a/src/integration/src/lib.rs b/src/integration/src/lib.rs index b8a03142..7f315729 100644 --- a/src/integration/src/lib.rs +++ b/src/integration/src/lib.rs @@ -8,8 +8,8 @@ use std::time::Duration; use borsh::{BorshDeserialize, BorshSerialize}; use qos_core::{ - async_client::AsyncClient, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + client::SocketClient, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; @@ -142,8 +142,8 @@ pub struct AdditionProofPayload { /// Panics if fs::exists errors. pub async fn wait_for_usock(path: &str) { let addr = SocketAddress::new_unix(path); - let pool = AsyncStreamPool::new(addr, 1).unwrap().shared(); - let client = AsyncClient::new(pool, TimeVal::milliseconds(50)); + let pool = StreamPool::new(addr, 1).unwrap().shared(); + let client = SocketClient::new(pool, TimeVal::milliseconds(50)); for _ in 0..50 { if std::fs::exists(path).unwrap() && client.try_connect().await.is_ok() diff --git a/src/integration/tests/async_client.rs b/src/integration/tests/async_client.rs index 7a0564c7..a19af8dc 100644 --- a/src/integration/tests/async_client.rs +++ b/src/integration/tests/async_client.rs @@ -1,14 +1,14 @@ use qos_core::{ - async_client::AsyncClient, - async_server::SocketServerError, - async_server::{AsyncRequestProcessor, AsyncSocketServer}, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + client::SocketClient, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, + server::SocketServerError, + server::{RequestProcessor, SocketServer}, }; #[derive(Clone)] struct EchoProcessor; -impl AsyncRequestProcessor for EchoProcessor { +impl RequestProcessor for EchoProcessor { async fn process(&self, request: Vec) -> Vec { request } @@ -16,10 +16,10 @@ impl AsyncRequestProcessor for EchoProcessor { async fn run_echo_server( socket_path: &str, -) -> Result { - let pool = AsyncStreamPool::new(SocketAddress::new_unix(socket_path), 1) +) -> Result { + let pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) .expect("unable to create async pool"); - let server = AsyncSocketServer::listen_all(pool, &EchoProcessor)?; + let server = SocketServer::listen_all(pool, &EchoProcessor)?; Ok(server) } @@ -29,11 +29,11 @@ async fn direct_connect_works() { let socket_path = "/tmp/async_client_test_direct_connect_works.sock"; let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(500); - let pool = AsyncStreamPool::new(socket, 1) + let pool = StreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool, timeout); + let client = SocketClient::new(pool, timeout); let server = run_echo_server(socket_path).await.unwrap(); @@ -48,10 +48,10 @@ async fn times_out_properly() { let socket_path = "/tmp/async_client_test_times_out_properly.sock"; let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(500); - let pool = AsyncStreamPool::new(socket, 1) + let pool = StreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool, timeout); + let client = SocketClient::new(pool, timeout); let r = client.call(&[0]).await; assert!(r.is_err()); @@ -62,10 +62,10 @@ async fn repeat_connect_works() { let socket_path = "/tmp/async_client_test_repeat_connect_works.sock"; let socket = SocketAddress::new_unix(socket_path); let timeout = TimeVal::milliseconds(500); - let pool = AsyncStreamPool::new(socket, 1) + let pool = StreamPool::new(socket, 1) .expect("unable to create async pool") .shared(); - let client = AsyncClient::new(pool, timeout); + let client = SocketClient::new(pool, timeout); // server not running yet, expect a connection error let r = client.call(&[0]).await; diff --git a/src/integration/tests/async_remote_tls.rs b/src/integration/tests/async_remote_tls.rs index e6eca108..87858377 100644 --- a/src/integration/tests/async_remote_tls.rs +++ b/src/integration/tests/async_remote_tls.rs @@ -5,8 +5,8 @@ use integration::{ PivotRemoteTlsMsg, PIVOT_ASYNC_REMOTE_TLS_PATH, QOS_NET_PATH, }; use qos_core::{ - async_client::AsyncClient, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + client::SocketClient, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; @@ -41,13 +41,13 @@ async fn fetch_async_remote_tls_content() { tokio::time::sleep(Duration::from_millis(50)).await; } - let enclave_pool = AsyncStreamPool::new( + let enclave_pool = StreamPool::new( SocketAddress::new_unix(REMOTE_TLS_TEST_ENCLAVE_SOCKET), 1, ) .expect("unable to create enclave async pool"); - let enclave_client = AsyncClient::new( + let enclave_client = SocketClient::new( enclave_pool.shared(), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), ); diff --git a/src/integration/tests/enclave_app_client_socket_stress.rs b/src/integration/tests/enclave_app_client_socket_stress.rs index fd5829bc..9011e96b 100644 --- a/src/integration/tests/enclave_app_client_socket_stress.rs +++ b/src/integration/tests/enclave_app_client_socket_stress.rs @@ -3,9 +3,9 @@ use integration::{ wait_for_usock, PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH, }; use qos_core::{ - async_client::AsyncClient, + client::SocketClient, handles::Handles, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, protocol::{ msg::ProtocolMsg, services::boot::{ @@ -76,10 +76,10 @@ async fn enclave_app_client_socket_stress() { handles.put_quorum_key(&p256_pair).unwrap(); let enclave_pool = - AsyncStreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); + StreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix(APP_SOCK), 1).unwrap(); + StreamPool::new(SocketAddress::new_unix(APP_SOCK), 1).unwrap(); std::thread::spawn(move || { Reaper::execute( @@ -97,8 +97,8 @@ async fn enclave_app_client_socket_stress() { wait_for_usock(APP_SOCK).await; let enclave_client_pool = - AsyncStreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); - let enclave_client = AsyncClient::new( + StreamPool::new(SocketAddress::new_unix(ENCLAVE_SOCK), 1).unwrap(); + let enclave_client = SocketClient::new( enclave_client_pool.shared(), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS + 3), // needs to be bigger than the slow request below + some time for recovery ); diff --git a/src/integration/tests/proofs.rs b/src/integration/tests/proofs.rs index 67af5a8b..f1af76dd 100644 --- a/src/integration/tests/proofs.rs +++ b/src/integration/tests/proofs.rs @@ -3,8 +3,8 @@ use std::{process::Command, str}; use borsh::BorshDeserialize; use integration::{wait_for_usock, PivotProofMsg, PIVOT_PROOF_PATH}; use qos_core::{ - async_client::AsyncClient, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + client::SocketClient, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; @@ -23,13 +23,11 @@ async fn fetch_and_verify_app_proof() { wait_for_usock(PROOF_TEST_ENCLAVE_SOCKET).await; - let enclave_pool = AsyncStreamPool::new( - SocketAddress::new_unix(PROOF_TEST_ENCLAVE_SOCKET), - 1, - ) - .unwrap(); + let enclave_pool = + StreamPool::new(SocketAddress::new_unix(PROOF_TEST_ENCLAVE_SOCKET), 1) + .unwrap(); - let enclave_client = AsyncClient::new( + let enclave_client = SocketClient::new( enclave_pool.shared(), TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), ); diff --git a/src/integration/tests/reaper.rs b/src/integration/tests/reaper.rs index cf9759d5..6b307e47 100644 --- a/src/integration/tests/reaper.rs +++ b/src/integration/tests/reaper.rs @@ -3,7 +3,7 @@ use std::fs; use integration::{PIVOT_ABORT_PATH, PIVOT_OK_PATH, PIVOT_PANIC_PATH}; use qos_core::{ handles::Handles, - io::{AsyncStreamPool, SocketAddress}, + io::{SocketAddress, StreamPool}, protocol::services::boot::ManifestEnvelope, reaper::{Reaper, REAPER_EXIT_DELAY_IN_SECONDS}, }; @@ -38,11 +38,10 @@ fn reaper_works() { assert!(handles.pivot_exists()); let enclave_pool = - AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + StreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) - .unwrap(); + StreamPool::new(SocketAddress::new_unix("./never.sock"), 1).unwrap(); let reaper_handle = std::thread::spawn(move || { Reaper::execute( @@ -96,11 +95,10 @@ fn reaper_handles_non_zero_exits() { assert!(handles.pivot_exists()); let enclave_pool = - AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + StreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) - .unwrap(); + StreamPool::new(SocketAddress::new_unix("./never.sock"), 1).unwrap(); let reaper_handle = std::thread::spawn(move || { Reaper::execute( @@ -155,11 +153,10 @@ fn reaper_handles_panic() { assert!(handles.pivot_exists()); let enclave_pool = - AsyncStreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); + StreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix("./never.sock"), 1) - .unwrap(); + StreamPool::new(SocketAddress::new_unix("./never.sock"), 1).unwrap(); let reaper_handle = std::thread::spawn(move || { Reaper::execute( diff --git a/src/integration/tests/simple_socket_stress.rs b/src/integration/tests/simple_socket_stress.rs index 720e0bec..506b9b53 100644 --- a/src/integration/tests/simple_socket_stress.rs +++ b/src/integration/tests/simple_socket_stress.rs @@ -4,8 +4,8 @@ use integration::{ wait_for_usock, PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH, }; use qos_core::{ - async_client::{AsyncClient, ClientError}, - io::{AsyncStreamPool, IOError, SocketAddress, TimeVal, TimeValLike}, + client::{ClientError, SocketClient}, + io::{IOError, SocketAddress, StreamPool, TimeVal, TimeValLike}, protocol::ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; use qos_test_primitives::ChildWrapper; @@ -26,10 +26,10 @@ async fn simple_socket_stress() { let timeout = TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS); let app_pool = - AsyncStreamPool::new(SocketAddress::new_unix(SOCKET_STRESS_SOCK), 1) + StreamPool::new(SocketAddress::new_unix(SOCKET_STRESS_SOCK), 1) .unwrap(); - let enclave_client = AsyncClient::new(app_pool.shared(), timeout); + let enclave_client = SocketClient::new(app_pool.shared(), timeout); let app_request = borsh::to_vec(&PivotSocketStressMsg::SlowRequest(5500)).unwrap(); diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index c18a7937..70b38f6d 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -12,7 +12,7 @@ use crate::{ EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, }; -use crate::io::{AsyncStreamPool, IOError}; +use crate::io::{IOError, StreamPool}; /// "cid" pub const CID: &str = "cid"; @@ -30,7 +30,7 @@ pub const EPHEMERAL_FILE_OPT: &str = "ephemeral-file"; /// Name for the option to specify the manifest file. pub const MANIFEST_FILE_OPT: &str = "manifest-file"; const APP_USOCK: &str = "app-usock"; -/// Name for the option to specify the maximum `AsyncPool` size. +/// Name for the option to specify the maximum `StreamPool` size. pub const POOL_SIZE: &str = "pool-size"; /// CLI options for starting up the enclave server. @@ -48,9 +48,9 @@ impl EnclaveOpts { Self { parsed } } - /// Create a new [`AsyncPool`] of [`AsyncStream`] using the list of [`SocketAddress`] for the enclave server and - /// return the new [`AsyncPool`]. Analogous to [`Self::addr`] and [`Self::app_addr`] depending on the [`app`] parameter. - fn async_pool(&self, app: bool) -> Result { + /// Create a new `StreamPool` using the list of `SocketAddress` for the qos host. + /// The `app` parameter specifies if this is a pool meant for the enclave itself, or the enclave app. + fn async_pool(&self, app: bool) -> Result { let usock_param = if app { APP_USOCK } else { USOCK }; match ( @@ -64,13 +64,13 @@ impl EnclaveOpts { c.parse().map_err(|_| IOError::ConnectAddressInvalid)?; let p = p.parse().map_err(|_| IOError::ConnectAddressInvalid)?; - AsyncStreamPool::new( + StreamPool::new( SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS), 1, ) } (None, None, Some(u)) => { - AsyncStreamPool::new(SocketAddress::new_unix(u), 1) + StreamPool::new(SocketAddress::new_unix(u), 1) } _ => panic!("Invalid socket opts"), } diff --git a/src/qos_core/src/async_client.rs b/src/qos_core/src/client.rs similarity index 92% rename from src/qos_core/src/async_client.rs rename to src/qos_core/src/client.rs index d76889fe..e40f7207 100644 --- a/src/qos_core/src/async_client.rs +++ b/src/qos_core/src/client.rs @@ -5,7 +5,7 @@ use std::time::Duration; use nix::sys::time::TimeVal; -use crate::io::{IOError, SharedAsyncStreamPool}; +use crate::io::{IOError, SharedStreamPool}; /// Enclave client error. #[derive(Debug)] @@ -29,15 +29,15 @@ impl From for ClientError { } /// Client for communicating with the enclave `crate::server::SocketServer`. #[derive(Clone, Debug)] -pub struct AsyncClient { - pool: SharedAsyncStreamPool, +pub struct SocketClient { + pool: SharedStreamPool, timeout: Duration, } -impl AsyncClient { +impl SocketClient { /// Create a new client. #[must_use] - pub fn new(pool: SharedAsyncStreamPool, timeout: TimeVal) -> Self { + pub fn new(pool: SharedStreamPool, timeout: TimeVal) -> Self { let timeout = timeval_to_duration(timeout); Self { pool, timeout } } diff --git a/src/qos_core/src/io/async_stream.rs b/src/qos_core/src/io/async_stream.rs deleted file mode 100644 index 7416319b..00000000 --- a/src/qos_core/src/io/async_stream.rs +++ /dev/null @@ -1,342 +0,0 @@ -//! Abstractions to handle connection based socket streams. - -use std::{io::ErrorKind, pin::Pin}; - -use tokio::{ - io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, - net::{UnixListener, UnixSocket, UnixStream}, -}; -#[cfg(feature = "vm")] -use tokio_vsock::{VsockListener, VsockStream}; - -use super::{IOError, SocketAddress}; - -#[derive(Debug)] -enum InnerListener { - Unix(UnixListener), - #[cfg(feature = "vm")] - Vsock(VsockListener), -} - -#[derive(Debug)] -enum InnerStream { - Unix(UnixStream), - #[cfg(feature = "vm")] - Vsock(VsockStream), -} - -/// Handle on a stream -#[derive(Debug)] -pub struct AsyncStream { - address: Option, - inner: Option, -} - -impl AsyncStream { - // accept a new connection, used by server side - fn unix_accepted(stream: UnixStream) -> Self { - Self { address: None, inner: Some(InnerStream::Unix(stream)) } - } - - // accept a new connection, used by server side - #[cfg(feature = "vm")] - fn vsock_accepted(stream: VsockStream) -> Self { - Self { address: None, inner: Some(InnerStream::Vsock(stream)) } - } - - /// Create a new `AsyncStream` with known `SocketAddress` and `TimeVal`. The stream starts disconnected - /// and will connect on the first `call`. - #[must_use] - pub fn new(address: &SocketAddress) -> Self { - Self { address: Some(address.clone()), inner: None } - } - - /// Create a new `Stream` from a `SocketAddress` and a timeout and connect using async - /// Sets `inner` to the new stream. - pub async fn connect(&mut self) -> Result<(), IOError> { - let addr = self.address()?.clone(); - - match self.address()? { - SocketAddress::Unix(_uaddr) => { - let inner = unix_connect(addr).await?; - - self.inner = Some(InnerStream::Unix(inner)); - } - #[cfg(feature = "vm")] - SocketAddress::Vsock(_vaddr) => { - let inner = vsock_connect(addr).await?; - - self.inner = Some(InnerStream::Vsock(inner)); - } - } - - Ok(()) - } - - /// Reconnects this `AsyncStream` by calling `connect` again on the underlaying socket - pub async fn reconnect(&mut self) -> Result<(), IOError> { - let addr = self.address()?.clone(); - - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => { - *s = unix_connect(addr).await?; - } - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => { - *s = vsock_connect(addr).await?; - } - } - Ok(()) - } - - /// Sends a buffer over the underlying socket using async - pub async fn send(&mut self, buf: &[u8]) -> Result<(), IOError> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => send(s, buf).await, - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => send(s, buf).await, - } - } - - /// Receive from the underlying socket using async - pub async fn recv(&mut self) -> Result, IOError> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => recv(s).await, - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => recv(s).await, - } - } - - /// Perform a "call" by sending the `req_buf` bytes and waiting for reply on the same socket. - pub async fn call(&mut self, req_buf: &[u8]) -> Result, IOError> { - // first time? connect - if self.inner.is_none() { - self.connect().await?; - } - - let send_result = self.send(req_buf).await; - if send_result.is_err() { - self.reset(); - send_result?; - } - - let result = self.recv().await; - eprintln!("AsyncStream: received"); - if result.is_err() { - self.reset(); - } - - result - } - - fn address(&self) -> Result<&SocketAddress, IOError> { - self.address.as_ref().ok_or(IOError::ConnectAddressInvalid) - } - - fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { - self.inner.as_mut().ok_or(IOError::DisconnectedStream) - } - - /// Resets the inner stream, forcing a re-connect next `call` - pub fn reset(&mut self) { - self.inner = None; - } -} - -async fn send( - stream: &mut S, - buf: &[u8], -) -> Result<(), IOError> { - let len = buf.len(); - // First, send the length of the buffer - let len_buf: [u8; size_of::()] = (len as u64).to_le_bytes(); - - // send the header - stream.write_all(&len_buf).await?; - // Send the actual contents of the buffer - stream.write_all(buf).await?; - - Ok(()) -} - -async fn recv( - stream: &mut S, -) -> Result, IOError> { - let length: usize = { - let mut buf = [0u8; size_of::()]; - - let r = stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { - ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, - _ => IOError::StdIoError(e), - }); - - r?; - - u64::from_le_bytes(buf) - .try_into() - // Should only be possible if we are on 32bit architecture - .map_err(|_| IOError::ArithmeticSaturation)? - }; - - // Read the buffer - let mut buf = vec![0; length]; - stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { - ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, - _ => IOError::StdIoError(e), - })?; - - Ok(buf) -} - -impl From for std::io::Error { - fn from(value: IOError) -> Self { - match value { - IOError::DisconnectedStream => std::io::Error::new( - std::io::ErrorKind::NotFound, - "connection not found", - ), - _ => { - std::io::Error::new(std::io::ErrorKind::Other, "unknown error") - } - } - } -} - -impl AsyncRead for AsyncStream { - fn poll_read( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> std::task::Poll> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => Pin::new(s).poll_read(cx, buf), - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => Pin::new(s).poll_read(cx, buf), - } - } -} - -impl AsyncWrite for AsyncStream { - fn poll_write( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> std::task::Poll> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => Pin::new(s).poll_write(cx, buf), - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => Pin::new(s).poll_write(cx, buf), - } - } - - fn poll_flush( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => Pin::new(s).poll_flush(cx), - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => Pin::new(s).poll_flush(cx), - } - } - - fn poll_shutdown( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - match &mut self.inner_mut()? { - InnerStream::Unix(ref mut s) => Pin::new(s).poll_shutdown(cx), - #[cfg(feature = "vm")] - InnerStream::Vsock(ref mut s) => Pin::new(s).poll_shutdown(cx), - } - } -} - -/// Abstraction to listen for incoming stream connections. -pub struct AsyncListener { - inner: InnerListener, - // addr: SocketAddress, -} - -impl AsyncListener { - /// Bind and listen on the given address. - pub(crate) fn listen(addr: &SocketAddress) -> Result { - let listener = match *addr { - SocketAddress::Unix(uaddr) => { - let path = - uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; - if path.exists() { - // attempt cleanup, this mostly happens from tests/panics - std::fs::remove_file(path)?; - } - let inner = InnerListener::Unix(UnixListener::bind(path)?); - Self { inner } - } - #[cfg(feature = "vm")] - SocketAddress::Vsock(vaddr) => { - let inner = InnerListener::Vsock(VsockListener::bind(vaddr)?); - Self { inner } - } - }; - - Ok(listener) - } - - /// Accept a new connection. - pub async fn accept(&self) -> Result { - let stream = match &self.inner { - InnerListener::Unix(l) => { - let (s, _) = l.accept().await?; - AsyncStream::unix_accepted(s) - } - #[cfg(feature = "vm")] - InnerListener::Vsock(l) => { - let (s, _) = l.accept().await?; - AsyncStream::vsock_accepted(s) - } - }; - - Ok(stream) - } -} - -impl Drop for AsyncListener { - fn drop(&mut self) { - match &mut self.inner { - InnerListener::Unix(usock) => match usock.local_addr() { - Ok(addr) => { - if let Some(path) = addr.as_pathname() { - _ = std::fs::remove_file(path); - } else { - eprintln!("unable to path the usock"); // do not crash in Drop - } - } - Err(e) => eprintln!("{e}"), // do not crash in Drop - }, - #[cfg(feature = "vm")] - InnerListener::Vsock(_vsock) => {} // vsock's drop will clear this - } - } -} - -async fn unix_connect( - addr: SocketAddress, -) -> Result { - let addr = addr.usock(); - let path = addr.path().ok_or(IOError::ConnectAddressInvalid)?; - - let socket = UnixSocket::new_stream()?; - eprintln!("Attempting USOCK connect to: {:?}", addr.path()); - socket.connect(path).await -} - -// raw vsock socket connect -#[cfg(feature = "vm")] -async fn vsock_connect( - addr: SocketAddress, -) -> Result { - let addr = addr.vsock(); - - eprintln!("Attempting VSOCK connect to: {:?}", addr); - VsockStream::connect(*addr).await -} diff --git a/src/qos_core/src/io/mod.rs b/src/qos_core/src/io/mod.rs index 17ef2d29..da1618df 100644 --- a/src/qos_core/src/io/mod.rs +++ b/src/qos_core/src/io/mod.rs @@ -3,14 +3,14 @@ //! NOTE TO MAINTAINERS: Interaction with any sys calls should be contained //! within this module. -mod async_pool; -mod async_stream; -pub use async_pool::*; -pub use async_stream::*; - +mod pool; mod stream; -pub use stream::{SocketAddress, VMADDR_FLAG_TO_HOST, VMADDR_NO_FLAGS}; +pub use pool::*; +pub use stream::*; +#[cfg(feature = "vm")] +use nix::sys::socket::VsockAddr; +use nix::sys::socket::{AddressFamily, SockaddrLike, UnixAddr}; pub use nix::sys::time::{TimeVal, TimeValLike}; /// QOS I/O error @@ -65,3 +65,148 @@ impl From for IOError { Self::PoolError(value) } } +/// Socket address. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum SocketAddress { + /// VSOCK address. + #[cfg(feature = "vm")] + Vsock(VsockAddr), + /// Unix address. + Unix(UnixAddr), +} + +/// VSOCK flag for talking to host. +pub const VMADDR_FLAG_TO_HOST: u8 = 0x01; +/// Don't specify any flags for a VSOCK. +pub const VMADDR_NO_FLAGS: u8 = 0x00; + +impl SocketAddress { + /// Create a new Unix socket. + /// + /// # Panics + /// + /// Panics if `nix::sys::socket::UnixAddr::new` panics. + #[must_use] + pub fn new_unix(path: &str) -> Self { + let addr = UnixAddr::new(path).unwrap(); + Self::Unix(addr) + } + + /// Create a new Vsock socket. + /// + /// For flags see: [Add flags field in the vsock address](). + #[cfg(feature = "vm")] + pub fn new_vsock(cid: u32, port: u32, flags: u8) -> Self { + Self::Vsock(Self::new_vsock_raw(cid, port, flags)) + } + + /// Create a new raw VsockAddr. + /// + /// For flags see: [Add flags field in the vsock address](). + #[cfg(feature = "vm")] + #[allow(unsafe_code)] + pub fn new_vsock_raw(cid: u32, port: u32, flags: u8) -> VsockAddr { + let vsock_addr = SockAddrVm { + svm_family: AddressFamily::Vsock as libc::sa_family_t, + svm_reserved1: 0, + svm_cid: cid, + svm_port: port, + svm_flags: flags, + svm_zero: [0; 3], + }; + let vsock_addr_len = size_of::() as libc::socklen_t; + let addr = unsafe { + VsockAddr::from_raw( + &vsock_addr as *const SockAddrVm as *const libc::sockaddr, + Some(vsock_addr_len), + ) + .unwrap() + }; + addr + } + + /// Get the `AddressFamily` of the socket. + #[must_use] + pub fn family(&self) -> AddressFamily { + match *self { + #[cfg(feature = "vm")] + Self::Vsock(_) => AddressFamily::Vsock, + Self::Unix(_) => AddressFamily::Unix, + } + } + + /// Convenience method for accessing the wrapped address + #[must_use] + pub fn addr(&self) -> Box { + match *self { + #[cfg(feature = "vm")] + Self::Vsock(vsa) => Box::new(vsa), + Self::Unix(ua) => Box::new(ua), + } + } + + /// Shows socket debug info + #[must_use] + pub fn debug_info(&self) -> String { + match self { + #[cfg(feature = "vm")] + Self::Vsock(vsock) => { + format!("vsock cid: {} port: {}", vsock.cid(), vsock.port()) + } + Self::Unix(usock) => { + format!( + "usock path: {}", + usock + .path() + .unwrap_or(&std::path::PathBuf::from("unknown/error")) + .as_os_str() + .to_str() + .unwrap_or("unable to procure") + ) + } + } + } + + /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise + #[must_use] + pub fn usock(&self) -> &UnixAddr { + match self { + Self::Unix(usock) => usock, + #[cfg(feature = "vm")] + _ => panic!("invalid socket address requested"), + } + } + + /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise + #[must_use] + #[cfg(feature = "vm")] + pub fn vsock(&self) -> &VsockAddr { + match self { + Self::Vsock(vsock) => vsock, + _ => panic!("invalid socket address requested"), + } + } +} + +/// Extract svm_flags field value from existing VSOCK. +#[cfg(feature = "vm")] +#[allow(unsafe_code)] +pub fn vsock_svm_flags(vsock: VsockAddr) -> u8 { + unsafe { + let cast: SockAddrVm = std::mem::transmute(vsock); + cast.svm_flags + } +} + +#[cfg(feature = "vm")] +#[repr(C)] +struct SockAddrVm { + svm_family: libc::sa_family_t, + svm_reserved1: libc::c_ushort, + svm_port: libc::c_uint, + svm_cid: libc::c_uint, + // Field added [here](https://github.com/torvalds/linux/commit/3a9c049a81f6bd7c78436d7f85f8a7b97b0821e6) + // but not yet in a version of libc we can use. + svm_flags: u8, + svm_zero: [u8; 3], +} diff --git a/src/qos_core/src/io/async_pool.rs b/src/qos_core/src/io/pool.rs similarity index 81% rename from src/qos_core/src/io/async_pool.rs rename to src/qos_core/src/io/pool.rs index 879f489e..ed133e4f 100644 --- a/src/qos_core/src/io/async_pool.rs +++ b/src/qos_core/src/io/pool.rs @@ -3,7 +3,7 @@ use std::{path::Path, sync::Arc}; use nix::sys::socket::UnixAddr; use tokio::sync::{Mutex, MutexGuard, RwLock}; -use super::{AsyncListener, AsyncStream, IOError, SocketAddress}; +use super::{IOError, Listener, SocketAddress, Stream}; /// Socket Pool Errors #[derive(Debug)] @@ -20,27 +20,24 @@ struct AsyncPool { handles: Vec>, } -/// Specialization of `AsyncPool` with `AsyncStream` and connection/liste logic. +/// Specialization of `AsyncPool` with `Stream` and connection/liste logic. #[derive(Debug)] -pub struct AsyncStreamPool { +pub struct StreamPool { addresses: Vec, // local copy used for `listen` only TODO: refactor listeners out of pool - pool: AsyncPool, + pool: AsyncPool, } -/// Helper type to wrap `AsyncStreamPool` in `Arc` and `RwLock`. Used to allow multiple processors to run across IO +/// Helper type to wrap `StreamPool` in `Arc` and `RwLock`. Used to allow multiple processors to run across IO /// await points without locking the whole set. -pub type SharedAsyncStreamPool = Arc>; +pub type SharedStreamPool = Arc>; -impl AsyncStreamPool { - /// Create a new `AsyncStreamPool` with given starting `SocketAddress`, timout and number of addresses to populate. +impl StreamPool { + /// Create a new `StreamPool` with given starting `SocketAddress`, timout and number of addresses to populate. pub fn new( start_address: SocketAddress, mut count: u32, ) -> Result { - eprintln!( - "AsyncStreamPool start address: {:?}", - start_address.debug_info() - ); + eprintln!("StreamPool start address: {:?}", start_address.debug_info()); let mut addresses = Vec::new(); let mut addr = start_address; @@ -57,15 +54,14 @@ impl AsyncStreamPool { Ok(Self::with_addresses(addresses)) } - /// Create a new `AsyncStreamPool` which will contain all the provided addresses but no connections yet. + /// Create a new `StreamPool` which will contain all the provided addresses but no connections yet. #[must_use] fn with_addresses( addresses: impl IntoIterator, ) -> Self { let addresses: Vec = addresses.into_iter().collect(); - let streams: Vec = - addresses.iter().map(AsyncStream::new).collect(); + let streams: Vec = addresses.iter().map(Stream::new).collect(); let pool = AsyncPool::from(streams); @@ -74,7 +70,7 @@ impl AsyncStreamPool { /// Helper function to get the Arc and Mutex wrapping #[must_use] - pub fn shared(self) -> SharedAsyncStreamPool { + pub fn shared(self) -> SharedStreamPool { Arc::new(RwLock::new(self)) } @@ -90,17 +86,17 @@ impl AsyncStreamPool { self.len() == 0 } - /// Gets the next available `AsyncStream` behind a `MutexGuard` - pub async fn get(&self) -> MutexGuard { + /// Gets the next available `Stream` behind a `MutexGuard` + pub async fn get(&self) -> MutexGuard { self.pool.get().await } /// Create a new pool by listening new connection on all the addresses - pub fn listen(&self) -> Result, IOError> { + pub fn listen(&self) -> Result, IOError> { let mut listeners = Vec::new(); for addr in &self.addresses { - let listener = AsyncListener::listen(addr)?; + let listener = Listener::listen(addr)?; listeners.push(listener); } @@ -119,7 +115,7 @@ impl AsyncStreamPool { for _ in count..size { next = next.next_address()?; - self.pool.push(AsyncStream::new(&next)); + self.pool.push(Stream::new(&next)); self.addresses.push(next.clone()); } } @@ -127,11 +123,8 @@ impl AsyncStreamPool { Ok(()) } - /// Listen to new connections on added sockets on top of existing listeners, returning the list of new `AsyncListener` - pub fn listen_to( - &mut self, - size: u32, - ) -> Result, IOError> { + /// Listen to new connections on added sockets on top of existing listeners, returning the list of new `Listener` + pub fn listen_to(&mut self, size: u32) -> Result, IOError> { eprintln!("listening async pool to {size}"); let size = size as usize; let mut listeners = Vec::new(); @@ -144,7 +137,7 @@ impl AsyncStreamPool { eprintln!("adding listener on {}", next.debug_info()); self.addresses.push(next.clone()); - let listener = AsyncListener::listen(&next)?; + let listener = Listener::listen(&next)?; listeners.push(listener); } @@ -155,7 +148,7 @@ impl AsyncStreamPool { } impl AsyncPool { - /// Get a `AsyncStream` behind a `MutexGuard` for use in a `AsyncStream::call` + /// Get a `Stream` behind a `MutexGuard` for use in a `Stream::call` /// Will wait (async) if all connections are locked until one becomes available async fn get(&self) -> MutexGuard { // TODO: make this into an error @@ -212,7 +205,7 @@ impl SocketAddress { /// Creates and returns the "following" `SocketAddress`. In case of VSOCK we increment the port from the source by 1. /// In case of USOCK we increment the postfix of the path if present, or add a `"_0"` at the end. /// - /// This is mostly used by the `AsyncSocketPool`. + /// This is mostly used by the `SocketPool`. pub(crate) fn next_address(&self) -> Result { match self { Self::Unix(usock) => match usock.path() { @@ -229,7 +222,7 @@ impl SocketAddress { Self::Vsock(vsock) => Ok(Self::new_vsock( vsock.cid(), vsock.port() + 1, - super::stream::vsock_svm_flags(*vsock), + super::vsock_svm_flags(*vsock), )), } } diff --git a/src/qos_core/src/io/stream.rs b/src/qos_core/src/io/stream.rs index 156f9d68..2daf7855 100644 --- a/src/qos_core/src/io/stream.rs +++ b/src/qos_core/src/io/stream.rs @@ -1,153 +1,341 @@ //! Abstractions to handle connection based socket streams. +use std::{io::ErrorKind, pin::Pin}; + +use tokio::{ + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::{UnixListener, UnixSocket, UnixStream}, +}; #[cfg(feature = "vm")] -use nix::sys::socket::VsockAddr; -use nix::sys::socket::{AddressFamily, SockaddrLike, UnixAddr}; +use tokio_vsock::{VsockListener, VsockStream}; -// 25(retries) x 10(milliseconds) = 1/4 a second of retrying +use super::{IOError, SocketAddress}; -/// Socket address. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum SocketAddress { - /// VSOCK address. +#[derive(Debug)] +enum InnerListener { + Unix(UnixListener), #[cfg(feature = "vm")] - Vsock(VsockAddr), - /// Unix address. - Unix(UnixAddr), + Vsock(VsockListener), } -/// VSOCK flag for talking to host. -pub const VMADDR_FLAG_TO_HOST: u8 = 0x01; -/// Don't specify any flags for a VSOCK. -pub const VMADDR_NO_FLAGS: u8 = 0x00; - -impl SocketAddress { - /// Create a new Unix socket. - /// - /// # Panics - /// - /// Panics if `nix::sys::socket::UnixAddr::new` panics. - #[must_use] - pub fn new_unix(path: &str) -> Self { - let addr = UnixAddr::new(path).unwrap(); - Self::Unix(addr) - } - - /// Create a new Vsock socket. - /// - /// For flags see: [Add flags field in the vsock address](). +#[derive(Debug)] +enum InnerStream { + Unix(UnixStream), #[cfg(feature = "vm")] - pub fn new_vsock(cid: u32, port: u32, flags: u8) -> Self { - Self::Vsock(Self::new_vsock_raw(cid, port, flags)) + Vsock(VsockStream), +} + +/// Handle on a stream +#[derive(Debug)] +pub struct Stream { + address: Option, + inner: Option, +} + +impl Stream { + // accept a new connection, used by server side + fn unix_accepted(stream: UnixStream) -> Self { + Self { address: None, inner: Some(InnerStream::Unix(stream)) } } - /// Create a new raw VsockAddr. - /// - /// For flags see: [Add flags field in the vsock address](). + // accept a new connection, used by server side #[cfg(feature = "vm")] - #[allow(unsafe_code)] - pub fn new_vsock_raw(cid: u32, port: u32, flags: u8) -> VsockAddr { - let vsock_addr = SockAddrVm { - svm_family: AddressFamily::Vsock as libc::sa_family_t, - svm_reserved1: 0, - svm_cid: cid, - svm_port: port, - svm_flags: flags, - svm_zero: [0; 3], - }; - let vsock_addr_len = size_of::() as libc::socklen_t; - let addr = unsafe { - VsockAddr::from_raw( - &vsock_addr as *const SockAddrVm as *const libc::sockaddr, - Some(vsock_addr_len), - ) - .unwrap() - }; - addr + fn vsock_accepted(stream: VsockStream) -> Self { + Self { address: None, inner: Some(InnerStream::Vsock(stream)) } } - /// Get the `AddressFamily` of the socket. + /// Create a new `Stream` with known `SocketAddress` and `TimeVal`. The stream starts disconnected + /// and will connect on the first `call`. #[must_use] - pub fn family(&self) -> AddressFamily { - match *self { + pub fn new(address: &SocketAddress) -> Self { + Self { address: Some(address.clone()), inner: None } + } + + /// Create a new `Stream` from a `SocketAddress` and a timeout and connect using async + /// Sets `inner` to the new stream. + pub async fn connect(&mut self) -> Result<(), IOError> { + let addr = self.address()?.clone(); + + match self.address()? { + SocketAddress::Unix(_uaddr) => { + let inner = unix_connect(addr).await?; + + self.inner = Some(InnerStream::Unix(inner)); + } #[cfg(feature = "vm")] - Self::Vsock(_) => AddressFamily::Vsock, - Self::Unix(_) => AddressFamily::Unix, + SocketAddress::Vsock(_vaddr) => { + let inner = vsock_connect(addr).await?; + + self.inner = Some(InnerStream::Vsock(inner)); + } } + + Ok(()) } - /// Convenience method for accessing the wrapped address - #[must_use] - pub fn addr(&self) -> Box { - match *self { + /// Reconnects this `Stream` by calling `connect` again on the underlaying socket + pub async fn reconnect(&mut self) -> Result<(), IOError> { + let addr = self.address()?.clone(); + + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => { + *s = unix_connect(addr).await?; + } #[cfg(feature = "vm")] - Self::Vsock(vsa) => Box::new(vsa), - Self::Unix(ua) => Box::new(ua), + InnerStream::Vsock(ref mut s) => { + *s = vsock_connect(addr).await?; + } } + Ok(()) } - /// Shows socket debug info - #[must_use] - pub fn debug_info(&self) -> String { - match self { + /// Sends a buffer over the underlying socket using async + pub async fn send(&mut self, buf: &[u8]) -> Result<(), IOError> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => send(s, buf).await, #[cfg(feature = "vm")] - Self::Vsock(vsock) => { - format!("vsock cid: {} port: {}", vsock.cid(), vsock.port()) - } - Self::Unix(usock) => { - format!( - "usock path: {}", - usock - .path() - .unwrap_or(&std::path::PathBuf::from("unknown/error")) - .as_os_str() - .to_str() - .unwrap_or("unable to procure") - ) + InnerStream::Vsock(ref mut s) => send(s, buf).await, + } + } + + /// Receive from the underlying socket using async + pub async fn recv(&mut self) -> Result, IOError> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => recv(s).await, + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => recv(s).await, + } + } + + /// Perform a "call" by sending the `req_buf` bytes and waiting for reply on the same socket. + pub async fn call(&mut self, req_buf: &[u8]) -> Result, IOError> { + // first time? connect + if self.inner.is_none() { + self.connect().await?; + } + + let send_result = self.send(req_buf).await; + if send_result.is_err() { + self.reset(); + send_result?; + } + + let result = self.recv().await; + if result.is_err() { + self.reset(); + } + + result + } + + fn address(&self) -> Result<&SocketAddress, IOError> { + self.address.as_ref().ok_or(IOError::ConnectAddressInvalid) + } + + fn inner_mut(&mut self) -> Result<&mut InnerStream, IOError> { + self.inner.as_mut().ok_or(IOError::DisconnectedStream) + } + + /// Resets the inner stream, forcing a re-connect next `call` + pub fn reset(&mut self) { + self.inner = None; + } +} + +async fn send( + stream: &mut S, + buf: &[u8], +) -> Result<(), IOError> { + let len = buf.len(); + // First, send the length of the buffer + let len_buf: [u8; size_of::()] = (len as u64).to_le_bytes(); + + // send the header + stream.write_all(&len_buf).await?; + // Send the actual contents of the buffer + stream.write_all(buf).await?; + + Ok(()) +} + +async fn recv( + stream: &mut S, +) -> Result, IOError> { + let length: usize = { + let mut buf = [0u8; size_of::()]; + + let r = stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, + _ => IOError::StdIoError(e), + }); + + r?; + + u64::from_le_bytes(buf) + .try_into() + // Should only be possible if we are on 32bit architecture + .map_err(|_| IOError::ArithmeticSaturation)? + }; + + // Read the buffer + let mut buf = vec![0; length]; + stream.read_exact(&mut buf).await.map_err(|e| match e.kind() { + ErrorKind::UnexpectedEof => IOError::RecvConnectionClosed, + _ => IOError::StdIoError(e), + })?; + + Ok(buf) +} + +impl From for std::io::Error { + fn from(value: IOError) -> Self { + match value { + IOError::DisconnectedStream => std::io::Error::new( + std::io::ErrorKind::NotFound, + "connection not found", + ), + _ => { + std::io::Error::new(std::io::ErrorKind::Other, "unknown error") } } } +} - /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise - #[must_use] - pub fn usock(&self) -> &UnixAddr { - match self { - Self::Unix(usock) => usock, +impl AsyncRead for Stream { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_read(cx, buf), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_read(cx, buf), + } + } +} + +impl AsyncWrite for Stream { + fn poll_write( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_write(cx, buf), #[cfg(feature = "vm")] - _ => panic!("invalid socket address requested"), + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_write(cx, buf), } } - /// Returns the `UnixAddr` if this is a USOCK `SocketAddress`, panics otherwise - #[must_use] - #[cfg(feature = "vm")] - pub fn vsock(&self) -> &VsockAddr { - match self { - Self::Vsock(vsock) => vsock, - _ => panic!("invalid socket address requested"), + fn poll_flush( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_flush(cx), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_flush(cx), + } + } + + fn poll_shutdown( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + match &mut self.inner_mut()? { + InnerStream::Unix(ref mut s) => Pin::new(s).poll_shutdown(cx), + #[cfg(feature = "vm")] + InnerStream::Vsock(ref mut s) => Pin::new(s).poll_shutdown(cx), } } } -/// Extract svm_flags field value from existing VSOCK. -#[cfg(feature = "vm")] -#[allow(unsafe_code)] -pub fn vsock_svm_flags(vsock: VsockAddr) -> u8 { - unsafe { - let cast: SockAddrVm = std::mem::transmute(vsock); - cast.svm_flags +/// Abstraction to listen for incoming stream connections. +pub struct Listener { + inner: InnerListener, + // addr: SocketAddress, +} + +impl Listener { + /// Bind and listen on the given address. + pub(crate) fn listen(addr: &SocketAddress) -> Result { + let listener = match *addr { + SocketAddress::Unix(uaddr) => { + let path = + uaddr.path().ok_or(IOError::ConnectAddressInvalid)?; + if path.exists() { + // attempt cleanup, this mostly happens from tests/panics + std::fs::remove_file(path)?; + } + let inner = InnerListener::Unix(UnixListener::bind(path)?); + Self { inner } + } + #[cfg(feature = "vm")] + SocketAddress::Vsock(vaddr) => { + let inner = InnerListener::Vsock(VsockListener::bind(vaddr)?); + Self { inner } + } + }; + + Ok(listener) + } + + /// Accept a new connection. + pub async fn accept(&self) -> Result { + let stream = match &self.inner { + InnerListener::Unix(l) => { + let (s, _) = l.accept().await?; + Stream::unix_accepted(s) + } + #[cfg(feature = "vm")] + InnerListener::Vsock(l) => { + let (s, _) = l.accept().await?; + Stream::vsock_accepted(s) + } + }; + + Ok(stream) } } +impl Drop for Listener { + fn drop(&mut self) { + match &mut self.inner { + InnerListener::Unix(usock) => match usock.local_addr() { + Ok(addr) => { + if let Some(path) = addr.as_pathname() { + _ = std::fs::remove_file(path); + } else { + eprintln!("unable to path the usock"); // do not crash in Drop + } + } + Err(e) => eprintln!("{e}"), // do not crash in Drop + }, + #[cfg(feature = "vm")] + InnerListener::Vsock(_vsock) => {} // vsock's drop will clear this + } + } +} + +async fn unix_connect( + addr: SocketAddress, +) -> Result { + let addr = addr.usock(); + let path = addr.path().ok_or(IOError::ConnectAddressInvalid)?; + + let socket = UnixSocket::new_stream()?; + eprintln!("Attempting USOCK connect to: {:?}", addr.path()); + socket.connect(path).await +} + +// raw vsock socket connect #[cfg(feature = "vm")] -#[repr(C)] -struct SockAddrVm { - svm_family: libc::sa_family_t, - svm_reserved1: libc::c_ushort, - svm_port: libc::c_uint, - svm_cid: libc::c_uint, - // Field added [here](https://github.com/torvalds/linux/commit/3a9c049a81f6bd7c78436d7f85f8a7b97b0821e6) - // but not yet in a version of libc we can use. - svm_flags: u8, - svm_zero: [u8; 3], +async fn vsock_connect( + addr: SocketAddress, +) -> Result { + let addr = addr.vsock(); + + eprintln!("Attempting VSOCK connect to: {:?}", addr); + VsockStream::connect(*addr).await } diff --git a/src/qos_core/src/lib.rs b/src/qos_core/src/lib.rs index 46d60b6c..b84a19af 100644 --- a/src/qos_core/src/lib.rs +++ b/src/qos_core/src/lib.rs @@ -17,8 +17,8 @@ compile_error!( "feature \"vm\" and feature \"mock\" cannot be enabled at the same time" ); -pub mod async_client; -pub mod async_server; +pub mod client; +pub mod server; pub mod cli; pub mod handles; diff --git a/src/qos_core/src/protocol/async_processor.rs b/src/qos_core/src/protocol/async_processor.rs index b44f5dda..b0334401 100644 --- a/src/qos_core/src/protocol/async_processor.rs +++ b/src/qos_core/src/protocol/async_processor.rs @@ -10,9 +10,9 @@ use super::{ ProtocolPhase, ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS, }; use crate::{ - async_client::{AsyncClient, ClientError}, - async_server::AsyncRequestProcessor, - io::SharedAsyncStreamPool, + client::{ClientError, SocketClient}, + io::SharedStreamPool, + server::RequestProcessor, }; const MEGABYTE: usize = 1024 * 1024; @@ -31,18 +31,15 @@ impl ProtocolState { /// Enclave state machine that executes when given a `ProtocolMsg`. #[derive(Clone)] pub struct AsyncProcessor { - app_client: AsyncClient, + app_client: SocketClient, state: SharedProtocolState, } impl AsyncProcessor { /// Create a new `Self`. #[must_use] - pub fn new( - state: SharedProtocolState, - app_pool: SharedAsyncStreamPool, - ) -> Self { - let app_client = AsyncClient::new( + pub fn new(state: SharedProtocolState, app_pool: SharedStreamPool) -> Self { + let app_client = SocketClient::new( app_pool, TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), ); @@ -63,7 +60,7 @@ impl AsyncProcessor { } } -impl AsyncRequestProcessor for AsyncProcessor { +impl RequestProcessor for AsyncProcessor { async fn process(&self, req_bytes: Vec) -> Vec { if req_bytes.len() > MAX_ENCODED_MSG_LEN { return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse( diff --git a/src/qos_core/src/protocol/error.rs b/src/qos_core/src/protocol/error.rs index e6de4549..50144b5b 100644 --- a/src/qos_core/src/protocol/error.rs +++ b/src/qos_core/src/protocol/error.rs @@ -3,7 +3,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use qos_p256::P256Error; use crate::{ - async_client::ClientError, + client::ClientError, io::IOError, protocol::{services::boot, ProtocolPhase}, }; @@ -147,7 +147,7 @@ pub enum ProtocolError { DifferentManifest, /// Error from the qos crypto library. QosCrypto(String), - /// Error during expanding the `AsyncPool`. + /// Error during expanding the `StreamPool`. PoolExpandError, } diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 27201fb4..818b90fa 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -13,14 +13,14 @@ use std::{ use qos_nsm::NsmProvider; use crate::{ - async_server::AsyncSocketServer, handles::Handles, - io::AsyncStreamPool, + io::StreamPool, protocol::{ async_processor::AsyncProcessor, services::boot::{PivotConfig, RestartPolicy}, ProtocolPhase, ProtocolState, }, + server::SocketServer, }; /// Delay for restarting the pivot app if the process exits. @@ -46,8 +46,8 @@ impl Reaper { pub fn execute( handles: &Handles, nsm: Box, - pool: AsyncStreamPool, - app_pool: AsyncStreamPool, + pool: StreamPool, + app_pool: StreamPool, test_only_init_phase_override: Option, ) { let handles2 = handles.clone(); @@ -73,9 +73,8 @@ impl Reaper { app_pool.shared(), ); // listen_all will multiplex the processor accross all sockets - let mut server = - AsyncSocketServer::listen_all(pool, &processor) - .expect("unable to get listen task list"); + let mut server = SocketServer::listen_all(pool, &processor) + .expect("unable to get listen task list"); loop { // see if we got interrupted diff --git a/src/qos_core/src/async_server.rs b/src/qos_core/src/server.rs similarity index 86% rename from src/qos_core/src/async_server.rs rename to src/qos_core/src/server.rs index 81831c93..e67a68c0 100644 --- a/src/qos_core/src/async_server.rs +++ b/src/qos_core/src/server.rs @@ -3,7 +3,7 @@ use tokio::task::JoinHandle; -use crate::io::{AsyncListener, AsyncStreamPool, IOError}; +use crate::io::{IOError, Listener, StreamPool}; /// Error variants for [`SocketServer`] #[derive(Debug)] @@ -19,7 +19,7 @@ impl From for SocketServerError { } /// Something that can process requests in an async way. -pub trait AsyncRequestProcessor: Send { +pub trait RequestProcessor: Send { /// Process an incoming request and return a response in async. /// /// The request and response are raw bytes. Likely this should be encoded @@ -32,23 +32,23 @@ pub trait AsyncRequestProcessor: Send { } /// A bare bones, socket based server. -pub struct AsyncSocketServer { - /// `AsyncStreamPool` used to serve messages over. - pub pool: AsyncStreamPool, +pub struct SocketServer { + /// `StreamPool` used to serve messages over. + pub pool: StreamPool, /// List of tasks that are running on the server. pub tasks: Vec>>, } -impl AsyncSocketServer { +impl SocketServer { /// Listen and respond to incoming requests on all the pool's addresses with the given `processor`. /// This method returns a list of tasks that are running as part of this listener. `JoinHandle::abort()` /// should be called on each when the program exists (e.g. on ctrl+c) pub fn listen_all

( - pool: AsyncStreamPool, + pool: StreamPool, processor: &P, ) -> Result where - P: AsyncRequestProcessor + 'static + Clone, + P: RequestProcessor + 'static + Clone, { println!("`AsyncSocketServer` listening on pool size {}", pool.len()); @@ -59,11 +59,11 @@ impl AsyncSocketServer { } fn spawn_tasks_for_listeners

( - listeners: Vec, + listeners: Vec, processor: &P, ) -> Vec>> where - P: AsyncRequestProcessor + 'static + Clone, + P: RequestProcessor + 'static + Clone, { let mut tasks = Vec::new(); for listener in listeners { @@ -84,7 +84,7 @@ impl AsyncSocketServer { processor: &P, ) -> Result<(), IOError> where - P: AsyncRequestProcessor + 'static + Clone, + P: RequestProcessor + 'static + Clone, { let listeners = self.pool.listen_to(pool_size)?; let tasks = Self::spawn_tasks_for_listeners(listeners, processor); @@ -103,11 +103,11 @@ impl AsyncSocketServer { } async fn accept_loop

( - listener: AsyncListener, + listener: Listener, processor: P, ) -> Result<(), SocketServerError> where - P: AsyncRequestProcessor + Clone, + P: RequestProcessor + Clone, { loop { eprintln!("AsyncServer: accepting"); diff --git a/src/qos_host/src/async_host.rs b/src/qos_host/src/async_host.rs index 9bee29c2..42bbbe81 100644 --- a/src/qos_host/src/async_host.rs +++ b/src/qos_host/src/async_host.rs @@ -29,8 +29,8 @@ use axum::{ }; use borsh::BorshDeserialize; use qos_core::{ - async_client::AsyncClient, - io::{SharedAsyncStreamPool, TimeVal}, + client::SocketClient, + io::{SharedStreamPool, TimeVal}, protocol::{msg::ProtocolMsg, ProtocolError, ProtocolPhase}, }; @@ -42,13 +42,13 @@ use crate::{ /// Resource shared across tasks in the `AsyncHostServer`. #[derive(Debug)] struct AsyncQosHostState { - enclave_client: AsyncClient, + enclave_client: SocketClient, } /// HTTP server for the host of the enclave; proxies requests to the enclave. #[allow(clippy::module_name_repetitions)] pub struct AsyncHostServer { - enclave_pool: SharedAsyncStreamPool, + enclave_pool: SharedStreamPool, timeout: TimeVal, addr: SocketAddr, base_path: Option, @@ -59,7 +59,7 @@ impl AsyncHostServer { /// server. #[must_use] pub fn new( - enclave_pool: SharedAsyncStreamPool, + enclave_pool: SharedStreamPool, timeout: TimeVal, addr: SocketAddr, base_path: Option, @@ -83,7 +83,7 @@ impl AsyncHostServer { // pub async fn serve(&self) -> Result<(), String> { pub async fn serve(&self) { let state = Arc::new(AsyncQosHostState { - enclave_client: AsyncClient::new( + enclave_client: SocketClient::new( self.enclave_pool.clone(), self.timeout, ), diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index 847479d4..60aff44d 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -8,7 +8,7 @@ use std::{ use qos_core::{ cli::{CID, PORT, USOCK}, - io::{AsyncStreamPool, SocketAddress, TimeVal, TimeValLike}, + io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; @@ -119,11 +119,10 @@ impl HostOpts { ) } - /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and - /// return the new `AsyncPool`. + /// Create a new `StreamPool` using the list of `SocketAddress` for the qos host. pub(crate) fn enclave_pool( &self, - ) -> Result { + ) -> Result { match ( self.parsed.single(CID), self.parsed.single(PORT), @@ -141,12 +140,12 @@ impl HostOpts { let address = SocketAddress::new_vsock(c, p, self.to_host_flag()); - AsyncStreamPool::new(address, 1) // qos_host needs only 1 + StreamPool::new(address, 1) // qos_host needs only 1 } (None, None, Some(u)) => { let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(address, 1) + StreamPool::new(address, 1) } _ => panic!("Invalid socket opts"), } diff --git a/src/qos_net/src/cli.rs b/src/qos_net/src/cli.rs index 141b68fb..6146ce9b 100644 --- a/src/qos_net/src/cli.rs +++ b/src/qos_net/src/cli.rs @@ -5,9 +5,9 @@ use qos_core::{ parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; -use qos_core::io::AsyncStreamPool; +use qos_core::io::StreamPool; -use crate::async_proxy::AsyncProxyServer; +use crate::proxy::ProxyServer; /// "cid" pub const CID: &str = "cid"; @@ -33,11 +33,10 @@ impl ProxyOpts { Self { parsed } } - /// Create a new `AsyncPool` of `AsyncStream` using the list of `SocketAddress` for the enclave server and - /// return the new `AsyncPool`. + /// Create a new `StreamPool` using the list of `SocketAddress` for the enclave server. pub(crate) fn async_pool( &self, - ) -> Result { + ) -> Result { let pool_size: u32 = self .parsed .single(POOL_SIZE) @@ -57,12 +56,12 @@ impl ProxyOpts { let address = SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS); - AsyncStreamPool::new(address, pool_size) + StreamPool::new(address, pool_size) } (None, None, Some(u)) => { let address = SocketAddress::new_unix(u); - AsyncStreamPool::new(address, pool_size) + StreamPool::new(address, pool_size) } _ => panic!("Invalid socket opts"), } @@ -75,7 +74,7 @@ pub struct CLI; impl CLI { /// Execute the enclave proxy CLI with the environment args in an async way. pub async fn execute() { - use qos_core::async_server::AsyncSocketServer; + use qos_core::server::SocketServer; let mut args: Vec = std::env::args().collect(); let opts = ProxyOpts::new(&mut args); @@ -85,7 +84,7 @@ impl CLI { } else if opts.parsed.help() { println!("{}", opts.parsed.info()); } else { - let server = AsyncSocketServer::listen_proxy( + let server = SocketServer::listen_proxy( opts.async_pool().expect("unable to create async socket pool"), ) .await diff --git a/src/qos_net/src/lib.rs b/src/qos_net/src/lib.rs index ea74616b..f63b32ae 100644 --- a/src/qos_net/src/lib.rs +++ b/src/qos_net/src/lib.rs @@ -9,11 +9,11 @@ pub mod error; pub mod proxy_msg; #[cfg(feature = "proxy")] -pub mod async_proxy; +pub mod proxy; #[cfg(feature = "proxy")] -pub mod async_proxy_connection; +pub mod proxy_connection; #[cfg(feature = "proxy")] -pub mod async_proxy_stream; +pub mod proxy_stream; #[cfg(feature = "proxy")] pub mod cli; diff --git a/src/qos_net/src/async_proxy.rs b/src/qos_net/src/proxy.rs similarity index 85% rename from src/qos_net/src/async_proxy.rs rename to src/qos_net/src/proxy.rs index 194bcb29..48ce60a4 100644 --- a/src/qos_net/src/async_proxy.rs +++ b/src/qos_net/src/proxy.rs @@ -2,27 +2,26 @@ use borsh::BorshDeserialize; use futures::Future; use qos_core::{ - async_server::{AsyncSocketServer, SocketServerError}, - io::{AsyncListener, AsyncStream, AsyncStreamPool, IOError}, + io::{IOError, Listener, Stream, StreamPool}, + server::{SocketServer, SocketServerError}, }; use crate::{ - async_proxy_connection::AsyncProxyConnection, error::QosNetError, - proxy_msg::ProxyMsg, + error::QosNetError, proxy_connection::ProxyConnection, proxy_msg::ProxyMsg, }; const MEGABYTE: usize = 1024 * 1024; const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; /// Socket<>TCP proxy to enable remote connections -pub struct AsyncProxy { - tcp_connection: Option, - sock_stream: AsyncStream, +pub struct Proxy { + tcp_connection: Option, + sock_stream: Stream, } -impl AsyncProxy { - /// Create a new AsyncProxy from the given AsyncStream, with empty tcp_connection - pub fn new(sock_stream: AsyncStream) -> Self { +impl Proxy { + /// Create a new `Proxy` from the given `Stream`, with empty tcp_connection + pub fn new(sock_stream: Stream) -> Self { Self { tcp_connection: None, sock_stream } } @@ -35,7 +34,7 @@ impl AsyncProxy { dns_resolvers: Vec, dns_port: u16, ) -> ProxyMsg { - match AsyncProxyConnection::new_from_name( + match ProxyConnection::new_from_name( hostname.clone(), port, dns_resolvers.clone(), @@ -60,7 +59,7 @@ impl AsyncProxy { /// Create a new connection, targeting an IP address directly. /// address. The TCP connection is opened and saved in internal state. async fn connect_by_ip(&mut self, ip: String, port: u16) -> ProxyMsg { - match AsyncProxyConnection::new_from_ip(ip.clone(), port).await { + match ProxyConnection::new_from_ip(ip.clone(), port).await { Ok(conn) => { let connection_id = conn.id; let remote_ip = conn.ip.clone(); @@ -115,7 +114,7 @@ impl AsyncProxy { } } -impl AsyncProxy { +impl Proxy { async fn run(&mut self) -> Result<(), IOError> { loop { // Only try to process ProxyMsg content on the USOCK/VSOCK if we're not connected to TCP yet. @@ -143,17 +142,17 @@ impl AsyncProxy { } } -pub trait AsyncProxyServer { +pub trait ProxyServer { fn listen_proxy( - pool: AsyncStreamPool, + pool: StreamPool, ) -> impl Future, SocketServerError>> + Send; } -impl AsyncProxyServer for AsyncSocketServer { +impl ProxyServer for SocketServer { /// Listen on a tcp proxy server in a way that allows the USOCK/VSOCK to be used as a /// dumb pipe after getting the `connect*` calls. async fn listen_proxy( - pool: AsyncStreamPool, + pool: StreamPool, ) -> Result, SocketServerError> { println!( "`AsyncSocketServer` proxy listening on pool size {}", @@ -175,11 +174,11 @@ impl AsyncProxyServer for AsyncSocketServer { } async fn accept_loop_proxy( - listener: AsyncListener, + listener: Listener, ) -> Result<(), SocketServerError> { loop { let stream = listener.accept().await?; - let mut proxy = AsyncProxy::new(stream); + let mut proxy = Proxy::new(stream); proxy.run().await?; } } diff --git a/src/qos_net/src/async_proxy_connection.rs b/src/qos_net/src/proxy_connection.rs similarity index 91% rename from src/qos_net/src/async_proxy_connection.rs rename to src/qos_net/src/proxy_connection.rs index ebee0139..9ef3bfc7 100644 --- a/src/qos_net/src/async_proxy_connection.rs +++ b/src/qos_net/src/proxy_connection.rs @@ -16,7 +16,7 @@ use tokio::{ use crate::error::QosNetError; /// Struct representing a TCP connection held on our proxy -pub struct AsyncProxyConnection { +pub struct ProxyConnection { /// Unsigned integer with the connection ID (random positive int) pub id: u128, /// IP address of the remote host @@ -25,7 +25,7 @@ pub struct AsyncProxyConnection { pub(crate) tcp_stream: TcpStream, } -impl AsyncProxyConnection { +impl ProxyConnection { /// Create a new `ProxyConnection` from a name. This results in a DNS /// request + TCP connection pub async fn new_from_name( @@ -33,7 +33,7 @@ impl AsyncProxyConnection { port: u16, dns_resolvers: Vec, dns_port: u16, - ) -> Result { + ) -> Result { let ip = resolve_hostname(hostname, dns_resolvers, dns_port).await?; // Generate a new random u32 to get an ID. We'll use it to name our @@ -45,7 +45,7 @@ impl AsyncProxyConnection { let tcp_addr = SocketAddr::new(ip, port); let tcp_stream = TcpStream::connect(tcp_addr).await?; - Ok(AsyncProxyConnection { + Ok(ProxyConnection { id: connection_id, ip: ip.to_string(), tcp_stream, @@ -57,7 +57,7 @@ impl AsyncProxyConnection { pub async fn new_from_ip( ip: String, port: u16, - ) -> Result { + ) -> Result { // Generate a new random u32 to get an ID. We'll use it to name our // socket. This will be our connection ID. let connection_id = { @@ -69,11 +69,11 @@ impl AsyncProxyConnection { let tcp_addr = SocketAddr::new(ip_addr, port); let tcp_stream = TcpStream::connect(tcp_addr).await?; - Ok(AsyncProxyConnection { id: connection_id, ip, tcp_stream }) + Ok(ProxyConnection { id: connection_id, ip, tcp_stream }) } } -impl AsyncProxyConnection { +impl ProxyConnection { pub async fn read( &mut self, buf: &mut [u8], diff --git a/src/qos_net/src/async_proxy_stream.rs b/src/qos_net/src/proxy_stream.rs similarity index 80% rename from src/qos_net/src/async_proxy_stream.rs rename to src/qos_net/src/proxy_stream.rs index a648f1d5..6a30bc5d 100644 --- a/src/qos_net/src/async_proxy_stream.rs +++ b/src/qos_net/src/proxy_stream.rs @@ -3,7 +3,7 @@ use std::pin::Pin; use borsh::BorshDeserialize; -use qos_core::io::AsyncStream; +use qos_core::io::Stream; use tokio::{ io::{AsyncRead, AsyncWrite}, sync::MutexGuard, @@ -14,9 +14,9 @@ use crate::{error::QosNetError, proxy_msg::ProxyMsg}; /// Struct representing a remote connection /// This is going to be used by enclaves, on the other side of a socket /// and plugged into the tokio-rustls via the AsyncWrite and AsyncRead traits -pub struct AsyncProxyStream<'pool> { - /// AsyncStream we hold for this connection - stream: MutexGuard<'pool, AsyncStream>, +pub struct ProxyStream<'pool> { + /// Stream we hold for this connection + stream: MutexGuard<'pool, Stream>, /// Once a connection is established (successful `ConnectByName` or /// ConnectByIp request), this connection ID is set the u32 in /// `ConnectResponse`. @@ -27,12 +27,12 @@ pub struct AsyncProxyStream<'pool> { pub remote_ip: String, } -impl<'pool> AsyncProxyStream<'pool> { +impl<'pool> ProxyStream<'pool> { /// Create a new AsyncProxyStream by targeting a hostname /// /// # Arguments /// - /// * `stream` - the `AsyncStream` picked from a `AsyncStreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) + /// * `stream` - the `Stream` picked from a `StreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) /// * `hostname` - the hostname to connect to (the remote qos_net proxy will /// resolve DNS) /// * `port` - the port the remote qos_net proxy should connect to @@ -41,7 +41,7 @@ impl<'pool> AsyncProxyStream<'pool> { /// * `dns_port` - DNS port to use while resolving DNS (typically: 53 or /// 853) pub async fn connect_by_name( - mut stream: MutexGuard<'pool, AsyncStream>, + mut stream: MutexGuard<'pool, Stream>, hostname: String, port: u16, dns_resolvers: Vec, @@ -75,12 +75,12 @@ impl<'pool> AsyncProxyStream<'pool> { /// Create a new ProxyStream by targeting an IP address directly. /// /// # Arguments - /// * `stream` - the `AsyncStream` picked from a `AsyncStreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) + /// * `stream` - the `Stream` picked from a `StreamPool` behind a `MutexGuard` (e.g. from `pool.get().await`) /// * `ip` - the IP the remote qos_net proxy should connect to /// * `port` - the port the remote qos_net proxy should connect to /// (typically: 80 or 443 for http/https) pub async fn connect_by_ip( - mut stream: MutexGuard<'pool, AsyncStream>, + mut stream: MutexGuard<'pool, Stream>, ip: String, port: u16, ) -> Result { @@ -113,36 +113,36 @@ impl<'pool> AsyncProxyStream<'pool> { } } -impl AsyncRead for AsyncProxyStream<'_> { +impl AsyncRead for ProxyStream<'_> { fn poll_read( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { - Pin::<&mut AsyncStream>::new(&mut self.stream).poll_read(cx, buf) + Pin::<&mut Stream>::new(&mut self.stream).poll_read(cx, buf) } } -impl AsyncWrite for AsyncProxyStream<'_> { +impl AsyncWrite for ProxyStream<'_> { fn poll_write( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { - Pin::<&mut AsyncStream>::new(&mut self.stream).poll_write(cx, buf) + Pin::<&mut Stream>::new(&mut self.stream).poll_write(cx, buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { - Pin::<&mut AsyncStream>::new(&mut self.stream).poll_flush(cx) + Pin::<&mut Stream>::new(&mut self.stream).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { - Pin::<&mut AsyncStream>::new(&mut self.stream).poll_shutdown(cx) + Pin::<&mut Stream>::new(&mut self.stream).poll_shutdown(cx) } } From db6da418b5e36d0a7b10b5b2b14aa72e50dadfbe Mon Sep 17 00:00:00 2001 From: Ales Katona Date: Thu, 17 Jul 2025 14:42:29 -0700 Subject: [PATCH 20/20] qos_core: use Arc> for processor so we don't clone and keep singular state --- src/Cargo.lock | 2 +- .../src/bin/pivot_async_remote_tls.rs | 9 ++++--- src/integration/src/bin/pivot_proof.rs | 16 +++++++---- .../src/bin/pivot_socket_stress.rs | 10 ++++++- src/integration/tests/async_client.rs | 11 +++++++- src/qos_core/src/client.rs | 15 +++++++++-- src/qos_core/src/protocol/async_processor.rs | 19 +++++++------ src/qos_core/src/reaper.rs | 9 +++++-- src/qos_core/src/server.rs | 27 ++++++++++++------- src/qos_nsm/Cargo.toml | 1 + src/qos_nsm/src/nsm.rs | 2 +- 11 files changed, 87 insertions(+), 34 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index ec9335dd..6d517e4b 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1571,7 +1571,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] diff --git a/src/integration/src/bin/pivot_async_remote_tls.rs b/src/integration/src/bin/pivot_async_remote_tls.rs index b9856cf1..88955fbb 100644 --- a/src/integration/src/bin/pivot_async_remote_tls.rs +++ b/src/integration/src/bin/pivot_async_remote_tls.rs @@ -9,7 +9,10 @@ use qos_core::{ }; use qos_net::proxy_stream::ProxyStream; use rustls::RootCertStore; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + sync::RwLock, +}; use tokio_rustls::TlsConnector; #[derive(Clone)] @@ -18,8 +21,8 @@ struct Processor { } impl Processor { - fn new(net_pool: SharedStreamPool) -> Self { - Processor { net_pool } + fn new(net_pool: SharedStreamPool) -> Arc> { + Arc::new(RwLock::new(Processor { net_pool })) } } diff --git a/src/integration/src/bin/pivot_proof.rs b/src/integration/src/bin/pivot_proof.rs index f9ba0539..032ff040 100644 --- a/src/integration/src/bin/pivot_proof.rs +++ b/src/integration/src/bin/pivot_proof.rs @@ -1,4 +1,5 @@ use core::panic; +use std::sync::Arc; use borsh::BorshDeserialize; use integration::{AdditionProof, AdditionProofPayload, PivotProofMsg}; @@ -7,12 +8,19 @@ use qos_core::{ io::{SocketAddress, StreamPool}, server::{RequestProcessor, SocketServer}, }; +use tokio::sync::RwLock; #[derive(Clone)] struct Processor { ephemeral_key_handle: EphemeralKeyHandle, } +impl Processor { + pub fn new(ephemeral_key_handle: EphemeralKeyHandle) -> Arc> { + Arc::new(RwLock::new(Self { ephemeral_key_handle })) + } +} + impl RequestProcessor for Processor { async fn process(&self, request: Vec) -> Vec { let msg = PivotProofMsg::try_from_slice(&request) @@ -59,11 +67,9 @@ async fn main() { let server = SocketServer::listen_all( app_pool, - &Processor { - ephemeral_key_handle: EphemeralKeyHandle::new( - "./mock/ephemeral_seed.secret.keep".to_string(), - ), - }, + &Processor::new(EphemeralKeyHandle::new( + "./mock/ephemeral_seed.secret.keep".to_string(), + )), ) .unwrap(); diff --git a/src/integration/src/bin/pivot_socket_stress.rs b/src/integration/src/bin/pivot_socket_stress.rs index 0d6a096e..8b4e9432 100644 --- a/src/integration/src/bin/pivot_socket_stress.rs +++ b/src/integration/src/bin/pivot_socket_stress.rs @@ -1,4 +1,5 @@ use core::panic; +use std::sync::Arc; use borsh::BorshDeserialize; use integration::PivotSocketStressMsg; @@ -6,10 +7,17 @@ use qos_core::{ io::{SocketAddress, StreamPool}, server::{RequestProcessor, SocketServer}, }; +use tokio::sync::RwLock; #[derive(Clone)] struct Processor; +impl Processor { + pub fn new() -> Arc> { + Arc::new(RwLock::new(Self)) + } +} + impl RequestProcessor for Processor { async fn process(&self, request: Vec) -> Vec { // Simulate just some baseline lag for all requests @@ -55,7 +63,7 @@ async fn main() { let app_pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) .expect("unable to create app pool"); - let server = SocketServer::listen_all(app_pool, &Processor).unwrap(); + let server = SocketServer::listen_all(app_pool, &Processor::new()).unwrap(); tokio::signal::ctrl_c().await.unwrap(); server.terminate(); diff --git a/src/integration/tests/async_client.rs b/src/integration/tests/async_client.rs index a19af8dc..ae69f11f 100644 --- a/src/integration/tests/async_client.rs +++ b/src/integration/tests/async_client.rs @@ -1,13 +1,22 @@ +use std::sync::Arc; + use qos_core::{ client::SocketClient, io::{SocketAddress, StreamPool, TimeVal, TimeValLike}, server::SocketServerError, server::{RequestProcessor, SocketServer}, }; +use tokio::sync::RwLock; #[derive(Clone)] struct EchoProcessor; +impl EchoProcessor { + pub fn new() -> Arc> { + Arc::new(RwLock::new(Self)) + } +} + impl RequestProcessor for EchoProcessor { async fn process(&self, request: Vec) -> Vec { request @@ -19,7 +28,7 @@ async fn run_echo_server( ) -> Result { let pool = StreamPool::new(SocketAddress::new_unix(socket_path), 1) .expect("unable to create async pool"); - let server = SocketServer::listen_all(pool, &EchoProcessor)?; + let server = SocketServer::listen_all(pool, &EchoProcessor::new())?; Ok(server) } diff --git a/src/qos_core/src/client.rs b/src/qos_core/src/client.rs index e40f7207..339530d6 100644 --- a/src/qos_core/src/client.rs +++ b/src/qos_core/src/client.rs @@ -5,7 +5,7 @@ use std::time::Duration; use nix::sys::time::TimeVal; -use crate::io::{IOError, SharedStreamPool}; +use crate::io::{IOError, SharedStreamPool, SocketAddress, StreamPool}; /// Enclave client error. #[derive(Debug)] @@ -35,13 +35,24 @@ pub struct SocketClient { } impl SocketClient { - /// Create a new client. + /// Create a new client with given `StreamPool`. #[must_use] pub fn new(pool: SharedStreamPool, timeout: TimeVal) -> Self { let timeout = timeval_to_duration(timeout); Self { pool, timeout } } + /// Create a new client from a single `SocketAddress`. This creates an implicit single socket `StreamPool`. + pub fn single( + addr: SocketAddress, + timeout: TimeVal, + ) -> Result { + let pool = StreamPool::new(addr, 1)?.shared(); + let timeout = timeval_to_duration(timeout); + + Ok(Self { pool, timeout }) + } + /// Send raw bytes and wait for a response until the clients configured /// timeout. /// diff --git a/src/qos_core/src/protocol/async_processor.rs b/src/qos_core/src/protocol/async_processor.rs index b0334401..a2a924af 100644 --- a/src/qos_core/src/protocol/async_processor.rs +++ b/src/qos_core/src/protocol/async_processor.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::io::{TimeVal, TimeValLike}; use borsh::BorshDeserialize; -use tokio::sync::Mutex; +use tokio::sync::RwLock; use super::{ error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, @@ -19,12 +19,12 @@ const MEGABYTE: usize = 1024 * 1024; const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; /// Helper type to keep `ProtocolState` shared using `Arc>` -type SharedProtocolState = Arc>; +type SharedProtocolState = Arc>; impl ProtocolState { /// Wrap this `ProtocolState` into a `Mutex` in an `Arc`. pub fn shared(self) -> SharedProtocolState { - Arc::new(Mutex::new(self)) + Arc::new(RwLock::new(self)) } } @@ -36,19 +36,22 @@ pub struct AsyncProcessor { } impl AsyncProcessor { - /// Create a new `Self`. + /// Create a new `Self` inside `Arc` and `Mutex`. #[must_use] - pub fn new(state: SharedProtocolState, app_pool: SharedStreamPool) -> Self { + pub fn new( + state: SharedProtocolState, + app_pool: SharedStreamPool, + ) -> Arc> { let app_client = SocketClient::new( app_pool, TimeVal::seconds(ENCLAVE_APP_SOCKET_CLIENT_TIMEOUT_SECS), ); - Self { app_client, state } + Arc::new(RwLock::new(Self { app_client, state })) } /// Helper to get phase between locking the shared state async fn get_phase(&self) -> ProtocolPhase { - self.state.lock().await.get_phase() + self.state.read().await.get_phase() } /// Expands the app pool to given pool size @@ -99,7 +102,7 @@ impl RequestProcessor for AsyncProcessor { } } else { // handle all the others here - self.state.lock().await.handle_msg(&msg_req) + self.state.write().await.handle_msg(&msg_req) } } } diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 818b90fa..2dc8b6fa 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -68,7 +68,7 @@ impl Reaper { test_only_init_phase_override, ); // send a shared version of state and the async pool to each processor - let mut processor = AsyncProcessor::new( + let processor = AsyncProcessor::new( protocol_state.shared(), app_pool.shared(), ); @@ -94,7 +94,12 @@ impl Reaper { "unable to listen_to on the running server", ); // expand app connections to pool_size - processor.expand_to(pool_size).await.expect( + processor + .write() + .await + .expand_to(pool_size) + .await + .expect( "unable to expand_to on the processor app pool", ); diff --git a/src/qos_core/src/server.rs b/src/qos_core/src/server.rs index e67a68c0..191f51bc 100644 --- a/src/qos_core/src/server.rs +++ b/src/qos_core/src/server.rs @@ -1,7 +1,9 @@ //! Streaming socket based server for use in an enclave. Listens for connections //! from [`crate::client::Client`]. -use tokio::task::JoinHandle; +use std::sync::Arc; + +use tokio::{sync::RwLock, task::JoinHandle}; use crate::io::{IOError, Listener, StreamPool}; @@ -18,6 +20,9 @@ impl From for SocketServerError { } } +/// Alias to simplify `Arc>` where `P` is the `RequestProcessor` +pub type SharedProcessor

= Arc>; + /// Something that can process requests in an async way. pub trait RequestProcessor: Send { /// Process an incoming request and return a response in async. @@ -45,10 +50,10 @@ impl SocketServer { /// should be called on each when the program exists (e.g. on ctrl+c) pub fn listen_all

( pool: StreamPool, - processor: &P, + processor: &SharedProcessor

, ) -> Result where - P: RequestProcessor + 'static + Clone, + P: RequestProcessor + Sync + 'static, { println!("`AsyncSocketServer` listening on pool size {}", pool.len()); @@ -60,10 +65,10 @@ impl SocketServer { fn spawn_tasks_for_listeners

( listeners: Vec, - processor: &P, + processor: &SharedProcessor

, ) -> Vec>> where - P: RequestProcessor + 'static + Clone, + P: RequestProcessor + Sync + 'static, { let mut tasks = Vec::new(); for listener in listeners { @@ -81,10 +86,10 @@ impl SocketServer { pub fn listen_to

( &mut self, pool_size: u32, - processor: &P, + processor: &SharedProcessor

, ) -> Result<(), IOError> where - P: RequestProcessor + 'static + Clone, + P: RequestProcessor + Sync + 'static, { let listeners = self.pool.listen_to(pool_size)?; let tasks = Self::spawn_tasks_for_listeners(listeners, processor); @@ -104,10 +109,10 @@ impl SocketServer { async fn accept_loop

( listener: Listener, - processor: P, + processor: SharedProcessor

, ) -> Result<(), SocketServerError> where - P: RequestProcessor + Clone, + P: RequestProcessor, { loop { eprintln!("AsyncServer: accepting"); @@ -115,7 +120,9 @@ where loop { match stream.recv().await { Ok(payload) => { - let response = processor.process(payload).await; + let response = + processor.read().await.process(payload).await; + match stream.send(&response).await { Ok(()) => {} Err(err) => { diff --git a/src/qos_nsm/Cargo.toml b/src/qos_nsm/Cargo.toml index 347f9d77..70fdeeeb 100644 --- a/src/qos_nsm/Cargo.toml +++ b/src/qos_nsm/Cargo.toml @@ -6,6 +6,7 @@ publish = false [dependencies] qos_hex = { path = "../qos_hex" } + borsh = { version = "1.0", features = ["std", "derive"] , default-features = false} aws-nitro-enclaves-nsm-api = { version = "0.4", features = ["nix"], default-features = false } aws-nitro-enclaves-cose = { version = "0.5", default-features = false } diff --git a/src/qos_nsm/src/nsm.rs b/src/qos_nsm/src/nsm.rs index 7fe637a2..1188642d 100644 --- a/src/qos_nsm/src/nsm.rs +++ b/src/qos_nsm/src/nsm.rs @@ -8,7 +8,7 @@ use crate::{nitro, types}; /// generic so mock providers can be subbed in for testing. In production use /// [`Nsm`]. // https://github.com/aws/aws-nitro-enclaves-nsm-api/blob/main/docs/attestation_process.md -pub trait NsmProvider: Send { +pub trait NsmProvider: Send + Sync { /// Create a message with input data and output capacity from a given /// request, then send it to the NSM driver via `ioctl()` and wait /// for the driver's response.