diff --git a/.gitignore b/.gitignore index b78f821af..03506ed58 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ flamegraph.* !/minio/bucket-policy.json *.bak +*settings.toml diff --git a/Cargo.lock b/Cargo.lock index 653864679..0afe1e43b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,18 +130,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-access-control" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47fe28365b33e8334dd70ae2f34a43892363012fe239cf37d2ee91693575b1f8" -dependencies = [ - "anchor-syn 0.30.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-attribute-account" version = "0.29.0" @@ -155,19 +143,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-account" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c288d496168268d198d9b53ee9f4f9d260a55ba4df9877ea1d4486ad6109e0f" -dependencies = [ - "anchor-syn 0.30.1", - "bs58 0.5.0", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-attribute-constant" version = "0.29.0" @@ -179,17 +154,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-constant" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b77b6948d0eeaaa129ce79eea5bbbb9937375a9241d909ca8fb9e006bb6e90" -dependencies = [ - "anchor-syn 0.30.1", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-attribute-error" version = "0.29.0" @@ -201,17 +165,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-error" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d20bb569c5a557c86101b944721d865e1fd0a4c67c381d31a44a84f07f84828" -dependencies = [ - "anchor-syn 0.30.1", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-attribute-event" version = "0.29.0" @@ -224,18 +177,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-event" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cebd8d0671a3a9dc3160c48598d652c34c77de6be4d44345b8b514323284d57" -dependencies = [ - "anchor-syn 0.30.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-attribute-program" version = "0.29.0" @@ -247,30 +188,13 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-attribute-program" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb2a5eb0860e661ab31aff7bb5e0288357b176380e985bade4ccb395981b42d" -dependencies = [ - "anchor-lang-idl", - "anchor-syn 0.30.1", - "anyhow", - "bs58 0.5.0", - "heck 0.3.3", - "proc-macro2", - "quote", - "serde_json", - "syn 1.0.109", -] - [[package]] name = "anchor-client" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb48c4a7911038da546dc752655a29fa49f6bd50ebc1edca218bac8da1012acd" dependencies = [ - "anchor-lang 0.29.0", + "anchor-lang", "anyhow", "futures", "regex", @@ -294,17 +218,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-derive-accounts" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04368b5abef4266250ca8d1d12f4dff860242681e4ec22b885dcfe354fd35aa1" -dependencies = [ - "anchor-syn 0.30.1", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-derive-serde" version = "0.29.0" @@ -318,19 +231,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-derive-serde" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0bb0e0911ad4a70cab880cdd6287fe1e880a1a9d8e4e6defa8e9044b9796a6c" -dependencies = [ - "anchor-syn 0.30.1", - "borsh-derive-internal 0.10.3", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-derive-space" version = "0.29.0" @@ -342,17 +242,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "anchor-derive-space" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef415ff156dc82e9ecb943189b0cb241b3a6bfc26a180234dc21bd3ef3ce0cb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "anchor-gen" version = "0.3.1" @@ -401,15 +290,15 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35da4785497388af0553586d55ebdc08054a8b1724720ef2749d313494f2b8ad" dependencies = [ - "anchor-attribute-access-control 0.29.0", - "anchor-attribute-account 0.29.0", - "anchor-attribute-constant 0.29.0", - "anchor-attribute-error 0.29.0", - "anchor-attribute-event 0.29.0", - "anchor-attribute-program 0.29.0", - "anchor-derive-accounts 0.29.0", - "anchor-derive-serde 0.29.0", - "anchor-derive-space 0.29.0", + "anchor-attribute-access-control", + "anchor-attribute-account", + "anchor-attribute-constant", + "anchor-attribute-error", + "anchor-attribute-event", + "anchor-attribute-program", + "anchor-derive-accounts", + "anchor-derive-serde", + "anchor-derive-space", "arrayref", "base64 0.13.1", "bincode", @@ -420,64 +309,15 @@ dependencies = [ "thiserror", ] -[[package]] -name = "anchor-lang" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6620c9486d9d36a4389cab5e37dc34a42ed0bfaa62e6a75a2999ce98f8f2e373" -dependencies = [ - "anchor-attribute-access-control 0.30.1", - "anchor-attribute-account 0.30.1", - "anchor-attribute-constant 0.30.1", - "anchor-attribute-error 0.30.1", - "anchor-attribute-event 0.30.1", - "anchor-attribute-program 0.30.1", - "anchor-derive-accounts 0.30.1", - "anchor-derive-serde 0.30.1", - "anchor-derive-space 0.30.1", - "arrayref", - "base64 0.21.7", - "bincode", - "borsh 0.10.3", - "bytemuck", - "getrandom 0.2.10", - "solana-program", - "thiserror", -] - -[[package]] -name = "anchor-lang-idl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31cf97b4e6f7d6144a05e435660fcf757dbc3446d38d0e2b851d11ed13625bba" -dependencies = [ - "anchor-lang-idl-spec", - "anyhow", - "heck 0.3.3", - "serde", - "serde_json", - "sha2 0.10.8", -] - -[[package]] -name = "anchor-lang-idl-spec" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdf143115440fe621bdac3a29a1f7472e09f6cd82b2aa569429a0c13f103838" -dependencies = [ - "anyhow", - "serde", -] - [[package]] name = "anchor-spl" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c4fd6e43b2ca6220d2ef1641539e678bfc31b6cc393cf892b373b5997b6a39a" dependencies = [ - "anchor-lang 0.29.0", + "anchor-lang", "solana-program", - "spl-associated-token-account 2.3.0", + "spl-associated-token-account", "spl-token 4.0.0", "spl-token-2022 0.9.0", ] @@ -519,24 +359,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "anchor-syn" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f99daacb53b55cfd37ce14d6c9905929721137fd4c67bbab44a19802aecb622f" -dependencies = [ - "anyhow", - "bs58 0.5.0", - "heck 0.3.3", - "proc-macro2", - "quote", - "serde", - "serde_json", - "sha2 0.10.8", - "syn 1.0.109", - "thiserror", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -1831,7 +1653,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" dependencies = [ "borsh-derive 0.10.3", - "hashbrown 0.13.1", + "hashbrown 0.11.2", ] [[package]] @@ -2124,11 +1946,11 @@ dependencies = [ [[package]] name = "circuit-breaker" -version = "0.1.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.1.2" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -2765,11 +2587,11 @@ dependencies = [ [[package]] name = "data-credits" -version = "0.2.2" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.3" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -3143,11 +2965,11 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fanout" -version = "0.1.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.1.1" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -3720,10 +3542,10 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "helium-anchor-gen" version = "0.1.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", "circuit-breaker", "data-credits", "fanout", @@ -3766,17 +3588,17 @@ dependencies = [ [[package]] name = "helium-entity-manager" -version = "0.2.11" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.3.1" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] name = "helium-lib" -version = "0.0.0" -source = "git+https://github.com/helium/helium-wallet-rs.git?branch=master#21208fcf8dcaaee955fa38798d9fa0da6b8d3f7b" +version = "0.1.1" +source = "git+https://github.com/helium/helium-wallet-rs.git?branch=master#6850e414af3d062e24362fcdc9bd02d6b34d25a5" dependencies = [ "anchor-client", "anchor-spl", @@ -3805,7 +3627,7 @@ dependencies = [ "solana-sdk", "solana-transaction-status", "spl-account-compression", - "spl-associated-token-account 3.0.2", + "spl-associated-token-account", "spl-memo", "thiserror", "tonic", @@ -3831,11 +3653,11 @@ dependencies = [ [[package]] name = "helium-sub-daos" -version = "0.1.8" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.9" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -3885,11 +3707,11 @@ checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" [[package]] name = "hexboosting" -version = "0.1.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.0" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -4373,6 +4195,7 @@ dependencies = [ "rust_decimal_macros", "serde", "serde_json", + "solana", "sqlx", "task-manager", "thiserror", @@ -4701,20 +4524,20 @@ dependencies = [ [[package]] name = "lazy-distributor" -version = "0.2.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.3.0" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] name = "lazy-transactions" version = "0.2.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -5091,11 +4914,11 @@ dependencies = [ [[package]] name = "mobile-entity-manager" -version = "0.1.3" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.1" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -5949,10 +5772,10 @@ dependencies = [ [[package]] name = "price-oracle" version = "0.2.1" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -6125,9 +5948,9 @@ dependencies = [ [[package]] name = "pyth-solana-receiver-sdk" version = "0.3.1" -source = "git+https://github.com/madninja/pyth-crosschain.git?branch=madninja%2Fcap_solana_dep#6576247294bde3ab7b62f7a2dfb4d4d48c401b35" +source = "git+https://github.com/madninja/pyth-crosschain.git?branch=madninja%2Fcap_solana_dep#0b40f6d9599c502b2e1dab4262efba66459b8690" dependencies = [ - "anchor-lang 0.29.0", + "anchor-lang", "hex", "pythnet-sdk", "solana-program", @@ -6136,9 +5959,9 @@ dependencies = [ [[package]] name = "pythnet-sdk" version = "2.3.0" -source = "git+https://github.com/madninja/pyth-crosschain.git?branch=madninja%2Fcap_solana_dep#6576247294bde3ab7b62f7a2dfb4d4d48c401b35" +source = "git+https://github.com/madninja/pyth-crosschain.git?branch=madninja%2Fcap_solana_dep#0b40f6d9599c502b2e1dab4262efba66459b8690" dependencies = [ - "anchor-lang 0.30.1", + "anchor-lang", "bincode", "borsh 0.10.3", "bytemuck", @@ -6598,11 +6421,11 @@ dependencies = [ [[package]] name = "rewards-oracle" -version = "0.2.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.3" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -7358,8 +7181,8 @@ dependencies = [ "solana-sdk", "spl-token 4.0.0", "spl-token-2022 1.0.0", - "spl-token-group-interface 0.1.0", - "spl-token-metadata-interface 0.2.0", + "spl-token-group-interface", + "spl-token-metadata-interface", "thiserror", "zstd", ] @@ -7963,7 +7786,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-sdk", - "spl-associated-token-account 2.3.0", + "spl-associated-token-account", "spl-memo", "spl-token 4.0.0", "spl-token-2022 1.0.0", @@ -8102,7 +7925,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c43bd4455d9fb29b9e4f83c087ccffa2f6f41fecfc0549932ae391d00f3378" dependencies = [ - "anchor-lang 0.29.0", + "anchor-lang", "bytemuck", "spl-concurrent-merkle-tree", "spl-noop", @@ -8124,22 +7947,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "spl-associated-token-account" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2e688554bac5838217ffd1fab7845c573ff106b6336bf7d290db7c98d5a8efd" -dependencies = [ - "assert_matches", - "borsh 1.5.1", - "num-derive 0.4.2", - "num-traits", - "solana-program", - "spl-token 4.0.0", - "spl-token-2022 3.0.2", - "thiserror", -] - [[package]] name = "spl-concurrent-merkle-tree" version = "0.2.0" @@ -8159,18 +7966,7 @@ checksum = "daa600f2fe56f32e923261719bae640d873edadbc5237681a39b8e37bfd4d263" dependencies = [ "bytemuck", "solana-program", - "spl-discriminator-derive 0.1.2", -] - -[[package]] -name = "spl-discriminator" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d1814406e98b08c5cd02c1126f83fd407ad084adce0b05fda5730677822eac" -dependencies = [ - "bytemuck", - "solana-program", - "spl-discriminator-derive 0.2.0", + "spl-discriminator-derive", ] [[package]] @@ -8180,18 +7976,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07fd7858fc4ff8fb0e34090e41d7eb06a823e1057945c26d480bfc21d2338a93" dependencies = [ "quote", - "spl-discriminator-syn 0.1.2", - "syn 2.0.58", -] - -[[package]] -name = "spl-discriminator-derive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" -dependencies = [ - "quote", - "spl-discriminator-syn 0.2.0", + "spl-discriminator-syn", "syn 2.0.58", ] @@ -8208,19 +7993,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "spl-discriminator-syn" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f05593b7ca9eac7caca309720f2eafb96355e037e6d373b909a80fe7b69b9" -dependencies = [ - "proc-macro2", - "quote", - "sha2 0.10.8", - "syn 2.0.58", - "thiserror", -] - [[package]] name = "spl-memo" version = "4.0.0" @@ -8249,20 +8021,7 @@ dependencies = [ "bytemuck", "solana-program", "solana-zk-token-sdk", - "spl-program-error 0.3.1", -] - -[[package]] -name = "spl-pod" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ce669f48cf2eca1ec518916d8725596bfb655beb1c74374cf71dc6cb773c9" -dependencies = [ - "borsh 1.5.1", - "bytemuck", - "solana-program", - "solana-zk-token-sdk", - "spl-program-error 0.4.1", + "spl-program-error", ] [[package]] @@ -8274,20 +8033,7 @@ dependencies = [ "num-derive 0.4.2", "num-traits", "solana-program", - "spl-program-error-derive 0.3.2", - "thiserror", -] - -[[package]] -name = "spl-program-error" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49065093ea91f57b9b2bd81493ff705e2ad4e64507a07dbc02b085778e02770e" -dependencies = [ - "num-derive 0.4.2", - "num-traits", - "solana-program", - "spl-program-error-derive 0.4.1", + "spl-program-error-derive", "thiserror", ] @@ -8303,18 +8049,6 @@ dependencies = [ "syn 2.0.58", ] -[[package]] -name = "spl-program-error-derive" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d375dd76c517836353e093c2dbb490938ff72821ab568b545fd30ab3256b3e" -dependencies = [ - "proc-macro2", - "quote", - "sha2 0.10.8", - "syn 2.0.58", -] - [[package]] name = "spl-tlv-account-resolution" version = "0.4.0" @@ -8323,10 +8057,10 @@ checksum = "062e148d3eab7b165582757453632ffeef490c02c86a48bfdb4988f63eefb3b9" dependencies = [ "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", - "spl-type-length-value 0.3.1", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-type-length-value", ] [[package]] @@ -8337,24 +8071,10 @@ checksum = "56f335787add7fa711819f9e7c573f8145a5358a709446fe2d24bf2a88117c90" dependencies = [ "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", - "spl-type-length-value 0.3.1", -] - -[[package]] -name = "spl-tlv-account-resolution" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cace91ba08984a41556efe49cbf2edca4db2f577b649da7827d3621161784bf8" -dependencies = [ - "bytemuck", - "solana-program", - "spl-discriminator 0.2.2", - "spl-pod 0.2.2", - "spl-program-error 0.4.1", - "spl-type-length-value 0.4.3", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-type-length-value", ] [[package]] @@ -8401,11 +8121,11 @@ dependencies = [ "solana-program", "solana-zk-token-sdk", "spl-memo", - "spl-pod 0.1.1", + "spl-pod", "spl-token 4.0.0", - "spl-token-metadata-interface 0.2.0", + "spl-token-metadata-interface", "spl-transfer-hook-interface 0.3.0", - "spl-type-length-value 0.3.1", + "spl-type-length-value", "thiserror", ] @@ -8424,36 +8144,12 @@ dependencies = [ "solana-security-txt", "solana-zk-token-sdk", "spl-memo", - "spl-pod 0.1.1", + "spl-pod", "spl-token 4.0.0", - "spl-token-group-interface 0.1.0", - "spl-token-metadata-interface 0.2.0", + "spl-token-group-interface", + "spl-token-metadata-interface", "spl-transfer-hook-interface 0.4.1", - "spl-type-length-value 0.3.1", - "thiserror", -] - -[[package]] -name = "spl-token-2022" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5412f99ae7ee6e0afde00defaa354e6228e47e30c0e3adf553e2e01e6abb584" -dependencies = [ - "arrayref", - "bytemuck", - "num-derive 0.4.2", - "num-traits", - "num_enum 0.7.2", - "solana-program", - "solana-security-txt", - "solana-zk-token-sdk", - "spl-memo", - "spl-pod 0.2.2", - "spl-token 4.0.0", - "spl-token-group-interface 0.2.3", - "spl-token-metadata-interface 0.3.3", - "spl-transfer-hook-interface 0.6.3", - "spl-type-length-value 0.4.3", + "spl-type-length-value", "thiserror", ] @@ -8465,22 +8161,9 @@ checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" dependencies = [ "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", -] - -[[package]] -name = "spl-token-group-interface" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419b5cfa3ee8e0f2386fd7e02a33b3ec8a7db4a9c7064a2ea24849dc4a273b6" -dependencies = [ - "bytemuck", - "solana-program", - "spl-discriminator 0.2.2", - "spl-pod 0.2.2", - "spl-program-error 0.4.1", + "spl-discriminator", + "spl-pod", + "spl-program-error", ] [[package]] @@ -8491,24 +8174,10 @@ checksum = "4c16ce3ba6979645fb7627aa1e435576172dd63088dc7848cb09aa331fa1fe4f" dependencies = [ "borsh 0.10.3", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", - "spl-type-length-value 0.3.1", -] - -[[package]] -name = "spl-token-metadata-interface" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30179c47e93625680dabb620c6e7931bd12d62af390f447bc7beb4a3a9b5feee" -dependencies = [ - "borsh 1.5.1", - "solana-program", - "spl-discriminator 0.2.2", - "spl-pod 0.2.2", - "spl-program-error 0.4.1", - "spl-type-length-value 0.4.3", + "spl-discriminator", + "spl-pod", + "spl-program-error", + "spl-type-length-value", ] [[package]] @@ -8520,11 +8189,11 @@ dependencies = [ "arrayref", "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", + "spl-discriminator", + "spl-pod", + "spl-program-error", "spl-tlv-account-resolution 0.4.0", - "spl-type-length-value 0.3.1", + "spl-type-length-value", ] [[package]] @@ -8536,27 +8205,11 @@ dependencies = [ "arrayref", "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", + "spl-discriminator", + "spl-pod", + "spl-program-error", "spl-tlv-account-resolution 0.5.2", - "spl-type-length-value 0.3.1", -] - -[[package]] -name = "spl-transfer-hook-interface" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a98359769cd988f7b35c02558daa56d496a7e3bd8626e61f90a7c757eedb9b" -dependencies = [ - "arrayref", - "bytemuck", - "solana-program", - "spl-discriminator 0.2.2", - "spl-pod 0.2.2", - "spl-program-error 0.4.1", - "spl-tlv-account-resolution 0.6.3", - "spl-type-length-value 0.4.3", + "spl-type-length-value", ] [[package]] @@ -8567,22 +8220,9 @@ checksum = "8f9ebd75d29c5f48de5f6a9c114e08531030b75b8ac2c557600ac7da0b73b1e8" dependencies = [ "bytemuck", "solana-program", - "spl-discriminator 0.1.1", - "spl-pod 0.1.1", - "spl-program-error 0.3.1", -] - -[[package]] -name = "spl-type-length-value" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ce13429dbd41d2cee8a73931c05fda0b0c8ca156a8b0c19445642550bb61a" -dependencies = [ - "bytemuck", - "solana-program", - "spl-discriminator 0.2.2", - "spl-pod 0.2.2", - "spl-program-error 0.4.1", + "spl-discriminator", + "spl-pod", + "spl-program-error", ] [[package]] @@ -9275,11 +8915,11 @@ dependencies = [ [[package]] name = "treasury-management" -version = "0.2.0" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.2.1" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -9523,11 +9163,11 @@ dependencies = [ [[package]] name = "voter-stake-registry" -version = "0.3.3" -source = "git+https://github.com/helium/helium-anchor-gen.git#3036b33793cfe54b20ab24761677493510d5bd50" +version = "0.4.0" +source = "git+https://github.com/helium/helium-anchor-gen.git#87b00759b4ca4727b36e8221410f4255ad06eca4" dependencies = [ "anchor-gen", - "anchor-lang 0.29.0", + "anchor-lang", ] [[package]] @@ -10082,4 +9722,4 @@ dependencies = [ "cc", "libc", "pkg-config", -] +] \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index b82f97c4b..de299736c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,14 +70,13 @@ helium-lib = { git = "https://github.com/helium/helium-wallet-rs.git", branch = hextree = { git = "https://github.com/jaykickliter/HexTree", branch = "main", features = [ "disktree", ] } -helium-proto = { git = "https://github.com/helium/proto", branch = "master", features = [ - "services", -] } +helium-proto = { git = "https://github.com/helium/proto", branch = "master", features = ["services"] } beacon = { git = "https://github.com/helium/proto", branch = "master" } solana-client = "1.18" solana-sdk = "1.18" solana-program = "1.18" spl-token = "3.5.0" +spl-associated-token-account = "1.1.1" reqwest = { version = "0", default-features = false, features = [ "gzip", "json", @@ -128,13 +127,16 @@ sqlx = { git = "https://github.com/launchbadge/sqlx.git", rev = "42dd78fe931df65 # When attempting to test proto changes without needing to push a branch you can # patch the github url to point to your local proto repo. -# +# # Patching for beacon must point directly to the crate, it will not look in the # repo for sibling crates. -# -# [patch.'https://github.com/helium/proto'] -# helium-proto = { path = "../proto" } -# beacon = { path = "../proto/beacon" } - +# # [patch.'https://github.com/helium/proto'] # helium-proto = { git = "https://www.github.com/helium/proto.git", branch = "jg/disco-shares-v2" } + +# [patch.'https://github.com/helium/helium-wallet-rs'] +# helium-lib = { path = "../helium-wallet-rs/helium-lib" } + +# [patch.'https://github.com/helium/helium-anchor-gen'] +# helium-anchor-gen = { path = "../helium-anchor-gen" } + diff --git a/file_store/src/traits/msg_verify.rs b/file_store/src/traits/msg_verify.rs index 64990365f..a2e730c3d 100644 --- a/file_store/src/traits/msg_verify.rs +++ b/file_store/src/traits/msg_verify.rs @@ -44,15 +44,14 @@ impl_msg_verify!(LoraStreamSessionInitV1, signature); impl_msg_verify!(DataTransferSessionReqV1, signature); impl_msg_verify!(CoverageObjectReqV1, signature); impl_msg_verify!(ServiceProviderBoostedRewardsBannedRadioReqV1, signature); -impl_msg_verify!(iot_config::OrgCreateHeliumReqV1, signature); -impl_msg_verify!(iot_config::OrgCreateRoamerReqV1, signature); -impl_msg_verify!(iot_config::OrgUpdateReqV1, signature); impl_msg_verify!(iot_config::OrgDisableReqV1, signature); impl_msg_verify!(iot_config::OrgEnableReqV1, signature); impl_msg_verify!(iot_config::OrgDisableResV1, signature); impl_msg_verify!(iot_config::OrgEnableResV1, signature); impl_msg_verify!(iot_config::OrgResV1, signature); impl_msg_verify!(iot_config::OrgListResV1, signature); +impl_msg_verify!(iot_config::OrgResV2, signature); +impl_msg_verify!(iot_config::OrgListResV2, signature); impl_msg_verify!(iot_config::RouteStreamReqV1, signature); impl_msg_verify!(iot_config::RouteListReqV1, signature); impl_msg_verify!(iot_config::RouteGetReqV1, signature); diff --git a/iot_config/Cargo.toml b/iot_config/Cargo.toml index a16da50c9..480fec034 100644 --- a/iot_config/Cargo.toml +++ b/iot_config/Cargo.toml @@ -47,6 +47,7 @@ triggered = { workspace = true } task-manager = { path = "../task_manager" } humantime-serde = { workspace = true } custom-tracing = { path = "../custom_tracing", features = ["grpc"] } +solana = { path = "../solana" } [dev-dependencies] rand = { workspace = true } diff --git a/iot_config/migrations/20241203190903_solana_net_ids.sql b/iot_config/migrations/20241203190903_solana_net_ids.sql new file mode 100644 index 000000000..3ba1d229b --- /dev/null +++ b/iot_config/migrations/20241203190903_solana_net_ids.sql @@ -0,0 +1,9 @@ +-- Migration here solely for testing purposes +-- An instance of account-postgres-sink +-- Will alter this table depending on an on-chain struct +CREATE TABLE IF NOT EXISTS solana_net_ids ( + address TEXT PRIMARY KEY, + id INTEGER NOT NULL, + authority TEXT NOT NULL, + current_addr_offset NUMERIC NOT NULL +); \ No newline at end of file diff --git a/iot_config/migrations/20241203190910_solana_organizations.sql b/iot_config/migrations/20241203190910_solana_organizations.sql new file mode 100644 index 000000000..b63c0a78d --- /dev/null +++ b/iot_config/migrations/20241203190910_solana_organizations.sql @@ -0,0 +1,39 @@ +-- Migration here solely for testing purposes +-- An instance of account-postgres-sink +-- Will alter this table depending on an on-chain struct +CREATE TABLE IF NOT EXISTS solana_organizations ( + address TEXT PRIMARY KEY, + net_id TEXT NOT NULL, + authority TEXT NOT NULL, + oui BIGINT NOT NULL, + escrow_key TEXT NOT NULL, + approved BOOLEAN NOT NULL +); + +CREATE OR REPLACE FUNCTION delete_routes_on_solana_organizations_delete() RETURNS trigger AS $$ +BEGIN + DELETE FROM routes WHERE routes.oui = OLD.oui; + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER delete_routes_on_solana_organizations_delete +AFTER DELETE ON solana_organizations +FOR EACH ROW +EXECUTE FUNCTION delete_routes_on_solana_organizations_delete(); + +CREATE OR REPLACE FUNCTION add_lock_record_on_solana_organizations_insert() RETURNS trigger AS $$ +BEGIN + INSERT INTO organization_locks (organization, locked) + SELECT sol_org.address, COALESCE(org.locked, TRUE) + FROM solana_organizations sol_org + LEFT JOIN organizations org ON sol_org.oui = org.oui + WHERE sol_org.address = NEW.address; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER add_lock_record_on_solana_organizations_insert +AFTER INSERT ON solana_organizations +FOR EACH ROW +EXECUTE FUNCTION add_lock_record_on_solana_organizations_insert(); \ No newline at end of file diff --git a/iot_config/migrations/20241203190923_solana_organization_devaddr_constraints.sql b/iot_config/migrations/20241203190923_solana_organization_devaddr_constraints.sql new file mode 100644 index 000000000..8256e12d9 --- /dev/null +++ b/iot_config/migrations/20241203190923_solana_organization_devaddr_constraints.sql @@ -0,0 +1,10 @@ +-- Migration here solely for testing purposes +-- An instance of account-postgres-sink +-- Will alter this table depending on an on-chain struct +CREATE TABLE IF NOT EXISTS solana_organization_devaddr_constraints ( + address TEXT PRIMARY KEY, + net_id TEXT NOT NULL, + organization TEXT NOT NULL, + start_addr NUMERIC NOT NULL, + end_addr NUMERIC NOT NULL +) \ No newline at end of file diff --git a/iot_config/migrations/20241203190936_solana_organization_delegate_keys.sql b/iot_config/migrations/20241203190936_solana_organization_delegate_keys.sql new file mode 100644 index 000000000..5f73cd957 --- /dev/null +++ b/iot_config/migrations/20241203190936_solana_organization_delegate_keys.sql @@ -0,0 +1,8 @@ +-- Migration here solely for testing purposes +-- An instance of account-postgres-sink +-- Will alter this table depending on an on-chain struct +CREATE TABLE IF NOT EXISTS solana_organization_delegate_keys ( + address TEXT PRIMARY KEY, + organization TEXT NOT NULL, + delegate TEXT NOT NULL +); \ No newline at end of file diff --git a/iot_config/migrations/20241203190945_update_oui_references.sql b/iot_config/migrations/20241203190945_update_oui_references.sql new file mode 100644 index 000000000..5e22a646d --- /dev/null +++ b/iot_config/migrations/20241203190945_update_oui_references.sql @@ -0,0 +1 @@ +ALTER TABLE routes DROP CONSTRAINT IF EXISTS routes_oui_fkey; \ No newline at end of file diff --git a/iot_config/migrations/20241203193310_add_organization_locks.sql b/iot_config/migrations/20241203193310_add_organization_locks.sql new file mode 100644 index 000000000..eeb415255 --- /dev/null +++ b/iot_config/migrations/20241203193310_add_organization_locks.sql @@ -0,0 +1,22 @@ +create table organization_locks ( + organization TEXT PRIMARY KEY NOT NULL, + locked BOOL DEFAULT false, + inserted_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +select trigger_updated_at('organization_locks'); + +DO $$ +BEGIN + IF EXISTS ( + SELECT FROM information_schema.tables + WHERE table_name = 'solana_organizations' + ) THEN + INSERT INTO organization_locks (organization, locked) + SELECT sol_org.address, org.locked + FROM solana_organizations sol_org + LEFT JOIN organizations org ON sol_org.oui = org.oui; + END IF; +END; +$$; diff --git a/iot_config/pkg/settings-template.toml b/iot_config/pkg/settings-template.toml index 7861c7779..e3c7ae69b 100644 --- a/iot_config/pkg/settings-template.toml +++ b/iot_config/pkg/settings-template.toml @@ -1,5 +1,5 @@ # log settings for the application (RUST_LOG format). Default below -# +# # log = "iot-config=debug,poc_store=info" diff --git a/iot_config/src/client/org_client.rs b/iot_config/src/client/org_client.rs index 00d376d87..9df82a9ec 100644 --- a/iot_config/src/client/org_client.rs +++ b/iot_config/src/client/org_client.rs @@ -6,13 +6,13 @@ use async_trait::async_trait; use chrono::Utc; use file_store::traits::TimestampEncode; use helium_proto::services::iot_config::{ - OrgDisableReqV1, OrgEnableReqV1, OrgGetReqV1, OrgListReqV1, OrgResV1, OrgV1, + OrgDisableReqV1, OrgEnableReqV1, OrgGetReqV2, OrgListReqV2, OrgResV2, OrgV2, }; #[async_trait] pub trait Orgs: Send + Sync + 'static { - async fn get(&mut self, oui: u64) -> Result; - async fn list(&mut self) -> Result, ClientError>; + async fn get(&mut self, oui: u64) -> Result; + async fn list(&mut self) -> Result, ClientError>; async fn enable(&mut self, oui: u64) -> Result<(), ClientError>; async fn disable(&mut self, oui: u64) -> Result<(), ClientError>; } @@ -40,19 +40,19 @@ impl OrgClient { #[async_trait] impl Orgs for OrgClient { - async fn get(&mut self, oui: u64) -> Result { + async fn get(&mut self, oui: u64) -> Result { tracing::debug!(%oui, "retrieving org"); - let req = OrgGetReqV1 { oui }; - let res = call_with_retry!(self.client.get(req.clone()))?.into_inner(); + let req = OrgGetReqV2 { oui }; + let res = call_with_retry!(self.client.get_v2(req.clone()))?.into_inner(); res.verify(&self.config_pubkey)?; Ok(res) } - async fn list(&mut self) -> Result, ClientError> { + async fn list(&mut self) -> Result, ClientError> { tracing::debug!("retrieving org list"); - let res = call_with_retry!(self.client.list(OrgListReqV1 {}))?.into_inner(); + let res = call_with_retry!(self.client.list_v2(OrgListReqV2 {}))?.into_inner(); res.verify(&self.config_pubkey)?; Ok(res.orgs) } diff --git a/iot_config/src/gateway_service.rs b/iot_config/src/gateway_service.rs index 78c5f67d5..5b204b4c9 100644 --- a/iot_config/src/gateway_service.rs +++ b/iot_config/src/gateway_service.rs @@ -31,19 +31,23 @@ const CACHE_TTL: Duration = Duration::from_secs(60 * 60 * 3); pub struct GatewayService { auth_cache: AuthCache, gateway_cache: Arc>, + iot_config_db_pool: Pool, metadata_pool: Pool, region_map: RegionMapReader, signing_key: Arc, delegate_cache: watch::Receiver, + delegate_updater: Arc>, } impl GatewayService { pub fn new( settings: &Settings, + iot_config_db_pool: Pool, metadata_pool: Pool, region_map: RegionMapReader, auth_cache: AuthCache, delegate_cache: watch::Receiver, + delegate_updater: Arc>, ) -> Result { let gateway_cache = Arc::new(Cache::new()); let cache_clone = gateway_cache.clone(); @@ -52,10 +56,12 @@ impl GatewayService { Ok(Self { auth_cache, gateway_cache, + iot_config_db_pool, metadata_pool, region_map, signing_key: Arc::new(settings.signing_keypair()?), delegate_cache, + delegate_updater, }) } @@ -75,7 +81,7 @@ impl GatewayService { Ok(()) } - fn verify_location_request(&self, request: &GatewayLocationReqV1) -> Result<(), Status> { + async fn verify_location_request(&self, request: &GatewayLocationReqV1) -> Result<(), Status> { let signature_bytes = request.signer.clone(); let signer_pubkey = verify_public_key(&signature_bytes)?; @@ -87,15 +93,27 @@ impl GatewayService { return Ok(()); } - self.delegate_cache - .borrow() - .contains(&signature_bytes.clone().into()) - .then(|| { + let signature_key: PublicKeyBinary = signature_bytes.clone().into(); + + if self.delegate_cache.borrow().contains(&signature_key) { + return request + .verify(&signer_pubkey) + .map_err(|_| Status::invalid_argument("bad request signature")); + } + + match org::is_delegate_key(&self.iot_config_db_pool, &signature_key).await { + Ok(true) => { + let mut current_cache = self.delegate_cache.borrow().clone(); + current_cache.insert(signature_key.clone()); + self.delegate_updater.send(current_cache).ok(); + request .verify(&signer_pubkey) .map_err(|_| Status::invalid_argument("bad request signature")) - }) - .ok_or_else(|| Status::permission_denied("unauthorized request signature"))? + } + Ok(false) => Err(Status::permission_denied("unauthorized request signature")), + Err(_) => Err(Status::internal("error checking delegate permissions")), + } } async fn resolve_gateway_info(&self, pubkey: &PublicKeyBinary) -> Result { @@ -141,7 +159,7 @@ impl iot_config::Gateway for GatewayService { custom_tracing::record_b58("pub_key", &request.gateway); custom_tracing::record_b58("signer", &request.signer); - self.verify_location_request(&request)?; + self.verify_location_request(&request).await?; let address: &PublicKeyBinary = &request.gateway.into(); diff --git a/iot_config/src/helium_netids.rs b/iot_config/src/helium_netids.rs index 7b753a561..47a8fd996 100644 --- a/iot_config/src/helium_netids.rs +++ b/iot_config/src/helium_netids.rs @@ -1,13 +1,8 @@ -use crate::lora_field::{self, DevAddrConstraint, LoraField, NetIdField}; -use helium_proto::services::iot_config::org_create_helium_req_v1::HeliumNetId as ProtoNetId; -use std::{collections::HashSet, ops::RangeInclusive}; +use crate::lora_field::{LoraField, NetIdField}; const TYPE_0_ID: NetIdField = LoraField(0x00003c); const TYPE_3_ID: NetIdField = LoraField(0x60002d); const TYPE_6_ID: NetIdField = LoraField(0xc00053); -const TYPE_0_RANGE: RangeInclusive = 2_013_265_920..=2_046_820_351; -const TYPE_3_RANGE: RangeInclusive = 3_763_994_624..=3_764_125_695; -const TYPE_6_RANGE: RangeInclusive = 4_227_943_424..=4_227_944_447; #[derive(Clone, Copy)] pub enum HeliumNetId { @@ -16,24 +11,6 @@ pub enum HeliumNetId { Type6_0xc00053, } -impl HeliumNetId { - pub fn id(&self) -> NetIdField { - match *self { - HeliumNetId::Type0_0x00003c => TYPE_0_ID, - HeliumNetId::Type3_0x60002d => TYPE_3_ID, - HeliumNetId::Type6_0xc00053 => TYPE_6_ID, - } - } - - pub fn addr_range(&self) -> RangeInclusive { - match *self { - HeliumNetId::Type0_0x00003c => TYPE_0_RANGE, - HeliumNetId::Type3_0x60002d => TYPE_3_RANGE, - HeliumNetId::Type6_0xc00053 => TYPE_6_RANGE, - } - } -} - impl TryFrom for HeliumNetId { type Error = &'static str; @@ -47,408 +24,3 @@ impl TryFrom for HeliumNetId { Ok(id) } } - -#[async_trait::async_trait] -pub trait AddressStore { - type Error; - - async fn get_used_addrs(&mut self, net_id: HeliumNetId) -> Result, Self::Error>; - async fn claim_addrs( - &mut self, - net_id: HeliumNetId, - new_addrs: &[u32], - ) -> Result<(), Self::Error>; - async fn release_addrs( - &mut self, - net_id: HeliumNetId, - released_addrs: &[u32], - ) -> Result<(), Self::Error>; -} - -#[async_trait::async_trait] -impl AddressStore for sqlx::Transaction<'_, sqlx::Postgres> { - type Error = sqlx::Error; - - async fn get_used_addrs(&mut self, net_id: HeliumNetId) -> Result, Self::Error> { - Ok(sqlx::query_scalar::<_, i32>( - " select devaddr from helium_used_devaddrs where net_id = $1 order by devaddr asc ", - ) - .bind(i32::from(net_id.id())) - .fetch_all(self) - .await? - .into_iter() - .map(|addr| addr as u32) - .collect::>()) - } - - async fn claim_addrs( - &mut self, - net_id: HeliumNetId, - new_addrs: &[u32], - ) -> Result<(), Self::Error> { - let mut query_builder: sqlx::QueryBuilder = - sqlx::QueryBuilder::new(" insert into helium_used_devaddrs (devaddr, net_id) "); - query_builder.push_values(new_addrs, |mut builder, addr| { - builder - .push_bind(*addr as i32) - .push_bind(i32::from(net_id.id())); - }); - Ok(query_builder.build().execute(self).await.map(|_| ())?) - } - - async fn release_addrs( - &mut self, - net_id: HeliumNetId, - released_addrs: &[u32], - ) -> Result<(), Self::Error> { - let net_id = i32::from(net_id.id()); - let released_addrs = released_addrs - .iter() - .map(|addr| (*addr, net_id)) - .collect::>(); - let mut query_builder: sqlx::QueryBuilder = sqlx::QueryBuilder::new( - " delete from helium_used_devaddrs where (devaddr, net_id) in ", - ); - query_builder.push_tuples(released_addrs, |mut builder, (addr, id)| { - builder.push_bind(addr as i32).push_bind(id); - }); - Ok(query_builder.build().execute(self).await.map(|_| ())?) - } -} - -pub fn is_helium_netid(net_id: &NetIdField) -> bool { - [TYPE_0_ID, TYPE_3_ID, TYPE_6_ID].contains(net_id) -} - -pub async fn checkout_devaddr_constraints( - addr_store: &mut S, - count: u64, - net_id: HeliumNetId, -) -> Result, DevAddrConstraintsError> -where - S: AddressStore, -{ - let addr_range = net_id.addr_range(); - let used_addrs = addr_store - .get_used_addrs(net_id) - .await - .map_err(DevAddrConstraintsError::AddressStore)?; - - let range_start = *addr_range.start(); - let range_end = *addr_range.end(); - let last_used = used_addrs.last().copied().unwrap_or(range_start); - let used_range = (range_start..=last_used).collect::>(); - let used_addrs = used_addrs.into_iter().collect::>(); - - let mut available_diff = used_range - .difference(&used_addrs) - .copied() - .collect::>(); - available_diff.sort(); - - let mut claimed_addrs = available_diff - .drain(0..(count as usize).min(available_diff.len())) - .collect::>(); - - let mut next_addr = last_used + 1; - while claimed_addrs.len() < count as usize { - if next_addr <= range_end { - claimed_addrs.push(next_addr); - next_addr += 1 - } else { - return Err(DevAddrConstraintsError::NoAvailableAddrs); - } - } - - addr_store - .claim_addrs(net_id, &claimed_addrs) - .await - .map_err(DevAddrConstraintsError::AddressStore)?; - - let new_constraints = constraints_from_addrs(claimed_addrs)?; - Ok(new_constraints) -} - -pub async fn checkout_specified_devaddr_constraint( - addr_store: &mut S, - net_id: HeliumNetId, - requested_constraint: &DevAddrConstraint, -) -> Result<(), DevAddrConstraintsError> -where - S: AddressStore, -{ - let used_addrs = addr_store - .get_used_addrs(net_id) - .await - .map_err(DevAddrConstraintsError::AddressStore)?; - let request_addrs = (requested_constraint.start_addr.into() - ..=requested_constraint.end_addr.into()) - .collect::>(); - if request_addrs.iter().any(|&addr| used_addrs.contains(&addr)) { - return Err(DevAddrConstraintsError::ConstraintAddrInUse(format!( - "{request_addrs:?}" - ))); - }; - addr_store - .claim_addrs(net_id, &request_addrs) - .await - .map_err(DevAddrConstraintsError::AddressStore) -} - -#[derive(thiserror::Error, Debug)] -pub enum DevAddrConstraintsError { - #[error("AddressStore error: {0}")] - AddressStore(AS), - #[error("No devaddrs available for NetId")] - NoAvailableAddrs, - #[error("Error building constraint")] - InvalidConstraint(#[from] ConstraintsBuildError), - #[error("Requested constraint in use {0}")] - ConstraintAddrInUse(String), -} - -fn constraints_from_addrs( - addrs: Vec, -) -> Result, ConstraintsBuildError> { - let mut constraints = Vec::new(); - let mut start_addr: Option = None; - let mut end_addr: Option = None; - for addr in addrs { - match (start_addr, end_addr) { - (None, None) => start_addr = Some(addr), - (Some(_), None) => end_addr = Some(addr), - (Some(prev_addr), Some(next_addr)) => match addr { - addr if addr == next_addr + 1 => end_addr = Some(addr), - addr if addr > next_addr + 1 => { - constraints.push(DevAddrConstraint::new(prev_addr.into(), next_addr.into())?); - start_addr = Some(addr); - end_addr = None - } - _ => return Err(ConstraintsBuildError::EndAddr), - }, - _ => return Err(ConstraintsBuildError::StartAddr), - } - } - match (start_addr, end_addr) { - (Some(remaining_start), Some(remaining_end)) => constraints.push(DevAddrConstraint::new( - remaining_start.into(), - remaining_end.into(), - )?), - _ => return Err(ConstraintsBuildError::EndAddr), - } - Ok(constraints) -} - -#[derive(thiserror::Error, Debug)] -pub enum ConstraintsBuildError { - #[error("Constraint missing or invalid start addr")] - StartAddr, - #[error("Constraint missing or invalid end addr")] - EndAddr, - #[error("invalid constraint: {0}")] - InvalidConstraint(#[from] lora_field::DevAddrRangeError), -} - -impl From for HeliumNetId { - fn from(pni: ProtoNetId) -> Self { - match pni { - ProtoNetId::Type00x00003c => Self::Type0_0x00003c, - ProtoNetId::Type30x60002d => Self::Type3_0x60002d, - ProtoNetId::Type60xc00053 => Self::Type6_0xc00053, - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use std::collections::HashMap; - - #[async_trait::async_trait] - impl AddressStore for HashMap> { - type Error = &'static str; - - async fn get_used_addrs(&mut self, net_id: HeliumNetId) -> Result, Self::Error> { - let mut result = self.get(&net_id.id()).cloned().unwrap_or_default(); - result.sort(); - Ok(result) - } - - async fn claim_addrs( - &mut self, - net_id: HeliumNetId, - new_addrs: &[u32], - ) -> Result<(), Self::Error> { - self.entry(net_id.id()) - .and_modify(|addrs| new_addrs.iter().for_each(|addr| addrs.push(*addr))) - .or_insert(new_addrs.to_vec()); - Ok(()) - } - - async fn release_addrs( - &mut self, - net_id: HeliumNetId, - released_addrs: &[u32], - ) -> Result<(), Self::Error> { - self.entry(net_id.id()) - .and_modify(|addrs| addrs.retain(|addr| !released_addrs.contains(addr))); - Ok(()) - } - } - - #[tokio::test] - async fn get_free_addrs_from_used_range() { - let mut addr_store = HashMap::new(); - addr_store.insert( - HeliumNetId::Type0_0x00003c.id(), - vec![ - 2013265920, 2013265921, 2013265922, 2013265923, 2013265928, 2013265929, 2013265930, - 2013265931, 2013265936, 2013265937, - ], - ); - let selected_constraints = - checkout_devaddr_constraints(&mut addr_store, 10, HeliumNetId::Type0_0x00003c) - .await - .expect("constraints selected from available addrs"); - let expected_constraints = vec![ - DevAddrConstraint::new(2013265924.into(), 2013265927.into()).expect("new constraint 1"), - DevAddrConstraint::new(2013265932.into(), 2013265935.into()).expect("new constraint 2"), - DevAddrConstraint::new(2013265938.into(), 2013265939.into()).expect("new constraint 3"), - ]; - assert_eq!(selected_constraints, expected_constraints); - addr_store - .entry(HeliumNetId::Type0_0x00003c.id()) - .and_modify(|addrs| addrs.sort()); - let used_addrs = addr_store - .get(&HeliumNetId::Type0_0x00003c.id()) - .cloned() - .unwrap(); - assert_eq!(used_addrs, (2013265920..=2013265939).collect::>()); - } - - #[tokio::test] - async fn get_free_addrs_from_new_range() { - let mut addr_store = HashMap::new(); - let selected_constraints = - checkout_devaddr_constraints(&mut addr_store, 10, HeliumNetId::Type0_0x00003c) - .await - .expect("constraints selected from available addrs"); - let expected_constraints = - vec![DevAddrConstraint::new(2013265920.into(), 2013265929.into()) - .expect("new constraint")]; - assert_eq!(selected_constraints, expected_constraints); - addr_store - .entry(HeliumNetId::Type0_0x00003c.id()) - .and_modify(|addrs| addrs.sort()); - let used_addrs = addr_store - .get(&HeliumNetId::Type0_0x00003c.id()) - .cloned() - .unwrap(); - assert_eq!(used_addrs, (2013265920..=2013265929).collect::>()); - } - - #[tokio::test] - async fn error_when_no_devaddrs_available() { - let mut addr_store = HashMap::new(); - addr_store.insert( - HeliumNetId::Type6_0xc00053.id(), - (4227943424..4227944443).collect::>(), - ); - assert!( - checkout_devaddr_constraints(&mut addr_store, 6, HeliumNetId::Type6_0xc00053) - .await - .is_err() - ); - } - - #[tokio::test] - async fn error_when_odd_number_addrs_requested() { - let mut addr_store = HashMap::new(); - assert!( - checkout_devaddr_constraints(&mut addr_store, 5, HeliumNetId::Type0_0x00003c) - .await - .is_err() - ); - } - - #[tokio::test] - async fn error_when_addrs_uneven() { - let mut addr_store = HashMap::new(); - addr_store.insert( - HeliumNetId::Type3_0x60002d.id(), - vec![ - 3763994627, 3763994628, 3763994629, 3763994630, 3763994631, 3763994632, - ], - ); - assert!( - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type3_0x60002d) - .await - .is_err() - ); - } - - #[tokio::test] - async fn allocate_fewer_than_existing_gap() { - let mut addr_store = HashMap::new(); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type0_0x00003c) - .await - .expect("allocate first round"); - checkout_devaddr_constraints(&mut addr_store, 32, HeliumNetId::Type0_0x00003c) - .await - .expect("allocate second round"); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type0_0x00003c) - .await - .expect("allocate third round"); - // round 2 goes out of business, and their devaddrs are released back to the wild - let remove: Vec = addr_store - .get(&HeliumNetId::Type0_0x00003c.id()) - .cloned() - .unwrap() - .into_iter() - .skip(8) - .take(32) - .collect(); - assert_eq!( - Ok(()), - addr_store - .release_addrs(HeliumNetId::Type0_0x00003c, &remove) - .await - ); - assert_eq!( - 8 + 8, - addr_store - .get(&HeliumNetId::Type0_0x00003c.id()) - .unwrap() - .len() - ); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type0_0x00003c) - .await - .expect("allocate fourth round"); - assert_eq!( - 8 + 8 + 8, - addr_store - .get(&HeliumNetId::Type0_0x00003c.id()) - .unwrap() - .len() - ); - } - - #[tokio::test] - async fn allocate_across_net_id() { - let mut addr_store = HashMap::new(); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type6_0xc00053) - .await - .expect("testing allocation"); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type3_0x60002d) - .await - .expect("special request allocation"); - checkout_devaddr_constraints(&mut addr_store, 8, HeliumNetId::Type0_0x00003c) - .await - .expect("average allocation"); - - assert_eq!( - 8 + 8 + 8, - addr_store.values().fold(0, |acc, elem| acc + elem.len()) - ); - } -} diff --git a/iot_config/src/lora_field.rs b/iot_config/src/lora_field.rs index 0a4c8494a..4fdfb8971 100644 --- a/iot_config/src/lora_field.rs +++ b/iot_config/src/lora_field.rs @@ -11,7 +11,7 @@ pub type EuiField = LoraField<16>; pub mod proto { pub use helium_proto::services::iot_config::{ - DevaddrConstraintV1, DevaddrRangeV1, EuiPairV1, OrgV1, SkfV1, + DevaddrConstraintV1, DevaddrRangeV1, EuiPairV1, OrgV2, SkfV1, }; } diff --git a/iot_config/src/main.rs b/iot_config/src/main.rs index 02d6a1d7e..4cae9299d 100644 --- a/iot_config/src/main.rs +++ b/iot_config/src/main.rs @@ -71,15 +71,24 @@ impl Daemon { let (auth_updater, auth_cache) = AuthCache::new(settings.admin_pubkey()?, &pool).await?; let (region_updater, region_map) = RegionMapReader::new(&pool).await?; let (delegate_key_updater, delegate_key_cache) = org::delegate_keys_cache(&pool).await?; + let delegate_key_updater = Arc::new(delegate_key_updater); + + org::spawn_delegate_cache_updater( + pool.clone(), + delegate_key_updater.clone(), + Duration::from_secs(60 * 5), + ); let signing_keypair = Arc::new(settings.signing_keypair()?); let gateway_svc = GatewayService::new( settings, + pool.clone(), metadata_pool.clone(), region_map.clone(), auth_cache.clone(), delegate_key_cache, + delegate_key_updater.clone(), )?; let route_svc = @@ -90,7 +99,6 @@ impl Daemon { auth_cache.clone(), pool.clone(), route_svc.clone_update_channel(), - delegate_key_updater, )?; let admin_svc = AdminService::new( diff --git a/iot_config/src/org.rs b/iot_config/src/org.rs index d3c6cd603..0e550e291 100644 --- a/iot_config/src/org.rs +++ b/iot_config/src/org.rs @@ -1,27 +1,28 @@ -use crate::{ - helium_netids::{self, is_helium_netid, AddressStore, HeliumNetId}, - lora_field::{DevAddrConstraint, DevAddrField, NetIdField}, - org_service::UpdateAuthorizer, -}; +use crate::lora_field::{DevAddrConstraint, NetIdField}; use futures::stream::StreamExt; use helium_crypto::{PublicKey, PublicKeyBinary}; +use helium_lib::keypair::to_helium_pubkey; +use rust_decimal::{prelude::ToPrimitive, Decimal}; use serde::Serialize; -use sqlx::{postgres::PgRow, types::Uuid, FromRow, Row}; +use solana::solana_pubkey_to_helium_binary; +use sqlx::{error::Error as SqlxError, postgres::PgRow, types::Uuid, FromRow, Pool, Postgres, Row}; use std::collections::HashSet; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; use tokio::sync::watch; pub mod proto { - pub use helium_proto::services::iot_config::{ - org_update_req_v1::update_v1::Update, org_update_req_v1::UpdateV1, ActionV1, OrgResV1, - OrgV1, - }; + pub use helium_proto::services::iot_config::OrgV2; } #[derive(Clone, Debug, Serialize)] pub struct Org { pub oui: u64, + pub address: PublicKeyBinary, pub owner: PublicKeyBinary, - pub payer: PublicKeyBinary, + pub escrow_key: String, + pub approved: bool, pub locked: bool, pub delegate_keys: Option>, pub constraints: Option>, @@ -29,26 +30,60 @@ pub struct Org { impl FromRow<'_, PgRow> for Org { fn from_row(row: &PgRow) -> sqlx::Result { - let delegate_keys = row - .get::, &str>("delegate_keys") - .into_iter() - .map(Some) - .collect(); - let constraints = row - .get::, &str>("constraints") - .into_iter() - .map(|(start, end)| { - Some(DevAddrConstraint { - start_addr: start.into(), - end_addr: end.into(), + let address_str: String = row.get("address"); + let address = solana_pubkey_to_helium_binary(&address_str)?; + let approved = row.get::("approved"); + let oui = row.try_get::("oui")? as u64; + let owner_str: String = row.get("authority"); + let owner = solana_pubkey_to_helium_binary(&owner_str)?; + let escrow_key = row.get::("escrow_key"); + let locked = row.get::("locked"); + let raw_delegate_keys: Option> = row.try_get("delegate_keys")?; + let delegate_keys: Option> = raw_delegate_keys.map(|keys| { + keys.into_iter() + .filter_map(|key| solana_pubkey_to_helium_binary(&key).ok()) + .collect() + }); + let raw_constraints: Option> = row.try_get("constraints")?; + let constraints: Option> = if let Some(constraints_data) = + raw_constraints + { + let constraints_result: Result, SqlxError> = constraints_data + .into_iter() + .map(|(start_decimal, end_decimal)| { + let start_u64 = start_decimal.to_u64().ok_or_else(|| { + SqlxError::Decode(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to convert NUMERIC 'start_addr' to u64", + ))) + })?; + + let end_u64 = end_decimal.to_u64().ok_or_else(|| { + SqlxError::Decode(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to convert NUMERIC 'end_addr' to u64", + ))) + })?; + + Ok(DevAddrConstraint { + start_addr: start_u64.into(), + end_addr: end_u64.into(), + }) }) - }) - .collect(); + .collect(); + + Some(constraints_result?) + } else { + None + }; + Ok(Self { - oui: row.get::("oui") as u64, - owner: row.get("owner_pubkey"), - payer: row.get("payer_pubkey"), - locked: row.get("locked"), + oui, + address, + owner, + escrow_key, + approved, + locked, delegate_keys, constraints, }) @@ -57,422 +92,125 @@ impl FromRow<'_, PgRow> for Org { pub type DelegateCache = HashSet; -pub async fn delegate_keys_cache( +async fn fetch_delegate_keys( db: impl sqlx::PgExecutor<'_>, -) -> Result<(watch::Sender, watch::Receiver), sqlx::Error> { - let key_set = sqlx::query(r#" select delegate_pubkey from organization_delegate_keys "#) - .fetch(db) - .filter_map(|row| async move { row.ok() }) - .map(|row| row.get("delegate_pubkey")) - .collect::>() - .await; - - Ok(watch::channel(key_set)) -} - -pub async fn create_org( - owner: PublicKeyBinary, - payer: PublicKeyBinary, - delegate_keys: Vec, - net_id: NetIdField, - devaddr_ranges: &[DevAddrConstraint], - db: impl sqlx::PgExecutor<'_> + sqlx::Acquire<'_, Database = sqlx::Postgres>, -) -> Result { - let mut txn = db.begin().await?; - - let oui = sqlx::query( - r#" - insert into organizations (owner_pubkey, payer_pubkey) - values ($1, $2) - returning oui - "#, +) -> Result, sqlx::Error> { + let key_set: HashSet = sqlx::query_scalar::<_, String>( + "SELECT delegate FROM solana_organization_delegate_keys WHERE delegate IS NOT NULL", ) - .bind(&owner) - .bind(&payer) - .fetch_one(&mut txn) - .await - .map_err(|_| { - OrgStoreError::SaveOrg(format!("owner: {owner}, payer: {payer}, net_id: {net_id}")) - })? - .get::("oui"); - - if !delegate_keys.is_empty() { - let delegate_keys = delegate_keys - .into_iter() - .map(|key| (key, oui)) - .collect::>(); - let mut query_builder: sqlx::QueryBuilder = sqlx::QueryBuilder::new( - " insert into organization_delegate_keys (delegate_pubkey, oui) ", - ); - query_builder.push_values(delegate_keys, |mut builder, (key, oui)| { - builder.push_bind(key).push_bind(oui); - }); - query_builder - .build() - .execute(&mut txn) - .await - .map_err(|_| { - OrgStoreError::SaveDelegates(format!( - "owner: {owner}, payer: {payer}, net_id: {net_id}" - )) - }) - .map(|_| ())? - }; - - if is_helium_netid(&net_id) { - insert_helium_constraints(oui as u64, net_id, devaddr_ranges, &mut txn).await - } else { - let constraint = devaddr_ranges - .first() - .ok_or(OrgStoreError::SaveConstraints( - "no devaddr constraints supplied".to_string(), - ))?; - if check_roamer_constraint_count(net_id, &mut txn).await? == 0 { - insert_roamer_constraint(oui as u64, net_id, constraint, &mut txn).await - } else { - return Err(OrgStoreError::SaveConstraints(format!( - "constraint already in use {constraint:?}" - ))); - } - } - .map_err(|err| OrgStoreError::SaveConstraints(format!("{devaddr_ranges:?}: {err:?}")))?; - - let org = get(oui as u64, &mut txn) - .await? - .ok_or_else(|| OrgStoreError::SaveOrg(format!("{oui}")))?; - - txn.commit().await?; - - Ok(org) -} - -pub async fn update_org( - oui: u64, - authorizer: UpdateAuthorizer, - updates: Vec, - db: impl sqlx::PgExecutor<'_> + sqlx::Acquire<'_, Database = sqlx::Postgres>, - delegate_cache: &watch::Sender, -) -> Result { - let mut txn = db.begin().await?; - - let current_org = get(oui, &mut txn) - .await? - .ok_or_else(|| OrgStoreError::NotFound(format!("{oui}")))?; - let net_id = get_org_netid(oui, &mut txn).await?; - let is_helium_org = is_helium_netid(&net_id); - - for update in updates.iter() { - match update.update { - Some(proto::Update::Owner(ref pubkeybin)) if authorizer == UpdateAuthorizer::Admin => { - let pubkeybin: PublicKeyBinary = pubkeybin.clone().into(); - update_owner(oui, &pubkeybin, &mut txn).await?; - tracing::info!(oui, pubkey = %pubkeybin, "owner pubkey updated"); - } - Some(proto::Update::Payer(ref pubkeybin)) if authorizer == UpdateAuthorizer::Admin => { - let pubkeybin: PublicKeyBinary = pubkeybin.clone().into(); - update_payer(oui, &pubkeybin, &mut txn).await?; - tracing::info!(oui, pubkey = %pubkeybin, "payer pubkey updated"); - } - Some(proto::Update::Devaddrs(addr_count)) - if authorizer == UpdateAuthorizer::Admin && is_helium_org => - { - add_devaddr_slab(oui, net_id, addr_count, &mut txn).await?; - tracing::info!(oui, addrs = addr_count, "new devaddr slab assigned"); - } - Some(proto::Update::Constraint(ref constraint_update)) - if authorizer == UpdateAuthorizer::Admin && is_helium_org => - { - match (constraint_update.action(), &constraint_update.constraint) { - (proto::ActionV1::Add, Some(ref constraint)) => { - let constraint: DevAddrConstraint = constraint.into(); - add_constraint_update(oui, net_id, constraint.clone(), &mut txn).await?; - tracing::info!(oui, %net_id, ?constraint, "devaddr constraint added"); - } - (proto::ActionV1::Remove, Some(ref constraint)) => { - let constraint: DevAddrConstraint = constraint.into(); - remove_constraint_update(oui, net_id, current_org.constraints.as_ref(), constraint.clone(), &mut txn).await?; - tracing::info!(oui, %net_id, ?constraint, "devaddr constraint removed"); - } - _ => return Err(OrgStoreError::InvalidUpdate(format!("invalid action or missing devaddr constraint update: {constraint_update:?}"))) - } - } - Some(proto::Update::DelegateKey(ref delegate_key_update)) => { - match delegate_key_update.action() { - proto::ActionV1::Add => { - let delegate = delegate_key_update.delegate_key.clone().into(); - add_delegate_key(oui, &delegate, &mut txn).await?; - tracing::info!(oui, %delegate, "delegate key authorized"); - } - proto::ActionV1::Remove => { - let delegate = delegate_key_update.delegate_key.clone().into(); - remove_delegate_key(oui, &delegate, &mut txn).await?; - tracing::info!(oui, %delegate, "delegate key de-authorized"); - } - } - } - _ => { - return Err(OrgStoreError::InvalidUpdate(format!( - "update: {update:?}, authorizer: {authorizer:?}" - ))) - } - }; - } - - let updated_org = get(oui, &mut txn) - .await? - .ok_or_else(|| OrgStoreError::SaveOrg(format!("{oui}")))?; - - txn.commit().await?; - - for update in updates.iter() { - if let Some(proto::Update::DelegateKey(ref delegate_key_update)) = update.update { - match delegate_key_update.action() { - proto::ActionV1::Add => { - delegate_cache.send_if_modified(|cache| { - cache.insert(delegate_key_update.delegate_key.clone().into()) - }); - } - proto::ActionV1::Remove => { - delegate_cache.send_if_modified(|cache| { - cache.remove(&delegate_key_update.delegate_key.clone().into()) - }); - } - } - } - } + .fetch_all(db) + .await? + .into_iter() + .filter_map(|key| solana_pubkey_to_helium_binary(&key).ok()) + .collect(); - Ok(updated_org) + Ok(key_set) } -pub async fn get_org_netid( - oui: u64, +pub async fn is_delegate_key( db: impl sqlx::PgExecutor<'_>, -) -> Result { - let netid = sqlx::query_scalar::<_, i32>( - " select net_id from organization_devaddr_constraints where oui = $1 limit 1 ", + key: &PublicKeyBinary, +) -> Result { + let count: i64 = sqlx::query_scalar( + "SELECT COUNT(*) FROM solana_organization_delegate_keys WHERE delegate = $1", ) - .bind(oui as i64) + .bind(key.to_string()) .fetch_one(db) .await?; - Ok(netid.into()) -} -async fn update_owner( - oui: u64, - owner_pubkey: &PublicKeyBinary, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), sqlx::Error> { - sqlx::query(" update organizations set owner_pubkey = $1 where oui = $2 ") - .bind(owner_pubkey) - .bind(oui as i64) - .execute(db) - .await - .map(|_| ()) -} - -async fn update_payer( - oui: u64, - payer_pubkey: &PublicKeyBinary, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), sqlx::Error> { - sqlx::query(" update organizations set payer_pubkey = $1 where oui = $2 ") - .bind(payer_pubkey) - .bind(oui as i64) - .execute(db) - .await - .map(|_| ()) + Ok(count > 0) } -async fn add_delegate_key( - oui: u64, - delegate_pubkey: &PublicKeyBinary, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), sqlx::Error> { - sqlx::query(" insert into organization_delegate_keys (delegate_pubkey, oui) values ($1, $2) ") - .bind(delegate_pubkey) - .bind(oui as i64) - .execute(db) - .await - .map(|_| ()) +pub async fn delegate_keys_cache( + db: impl sqlx::PgExecutor<'_>, +) -> Result<(watch::Sender, watch::Receiver), sqlx::Error> { + let key_set = fetch_delegate_keys(db).await?; + Ok(watch::channel(key_set)) } -async fn remove_delegate_key( - oui: u64, - delegate_pubkey: &PublicKeyBinary, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, +pub async fn refresh_delegate_keys_cache( + db: impl sqlx::PgExecutor<'_>, + cache_sender: &Arc>, ) -> Result<(), sqlx::Error> { - sqlx::query(" delete from organization_delegate_keys where delegate_pubkey = $1 and oui = $2 ") - .bind(delegate_pubkey) - .bind(oui as i64) - .execute(db) - .await - .map(|_| ()) -} - -async fn add_constraint_update( - oui: u64, - net_id: NetIdField, - added_constraint: DevAddrConstraint, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), OrgStoreError> { - let helium_net_id: HeliumNetId = net_id - .try_into() - .map_err(|err: &'static str| OrgStoreError::InvalidUpdate(err.to_string()))?; - helium_netids::checkout_specified_devaddr_constraint(db, helium_net_id, &added_constraint) - .await - .map_err(|err| OrgStoreError::InvalidUpdate(format!("{err:?}")))?; - insert_helium_constraints(oui, net_id, &[added_constraint], db).await?; + let key_set = fetch_delegate_keys(db).await?; + cache_sender.send_replace(key_set); Ok(()) } -async fn remove_constraint_update( - oui: u64, - net_id: NetIdField, - org_constraints: Option<&Vec>, - removed_constraint: DevAddrConstraint, - db: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), OrgStoreError> { - let helium_net_id: HeliumNetId = net_id - .try_into() - .map_err(|err: &'static str| OrgStoreError::InvalidUpdate(err.to_string()))?; - if let Some(org_constraints) = org_constraints { - if org_constraints.contains(&removed_constraint) && org_constraints.len() > 1 { - let remove_range = (u32::from(removed_constraint.start_addr) - ..=u32::from(removed_constraint.end_addr)) - .collect::>(); - db.release_addrs(helium_net_id, &remove_range).await?; - remove_helium_constraints(oui, &[removed_constraint], db).await?; - Ok(()) - } else if org_constraints.len() == 1 { - return Err(OrgStoreError::InvalidUpdate( - "org must have at least one constraint range".to_string(), - )); - } else { - return Err(OrgStoreError::InvalidUpdate( - "cannot remove constraint leased by other org".to_string(), - )); +pub fn spawn_delegate_cache_updater( + pool: Pool, + delegate_updater: Arc>, + interval: Duration, +) { + tokio::spawn(async move { + let mut interval_timer = tokio::time::interval(interval); + loop { + interval_timer.tick().await; + if let Err(err) = refresh_delegate_keys_cache(&pool, &delegate_updater).await { + tracing::error!(reason = ?err, "Failed to refresh delegate cache"); + } } - } else { - Err(OrgStoreError::InvalidUpdate( - "no org constraints defined".to_string(), - )) - } -} - -async fn add_devaddr_slab( - oui: u64, - net_id: NetIdField, - addr_count: u64, - txn: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> Result<(), OrgStoreError> { - let helium_net_id: HeliumNetId = net_id - .try_into() - .map_err(|err: &'static str| OrgStoreError::InvalidUpdate(err.to_string()))?; - let constraints = helium_netids::checkout_devaddr_constraints(txn, addr_count, helium_net_id) - .await - .map_err(|err| OrgStoreError::SaveConstraints(format!("{err:?}")))?; - insert_helium_constraints(oui, net_id, &constraints, txn).await?; - Ok(()) + }); } -async fn insert_helium_constraints( +pub async fn get_org_netid( oui: u64, - net_id: NetIdField, - devaddr_ranges: &[DevAddrConstraint], db: impl sqlx::PgExecutor<'_>, -) -> Result<(), sqlx::Error> { - let mut query_builder: sqlx::QueryBuilder = sqlx::QueryBuilder::new( +) -> Result { + let netid = sqlx::query_scalar::<_, i64>( r#" - insert into organization_devaddr_constraints (oui, net_id, start_addr, end_addr) + SELECT sol_ni.id::bigint + FROM solana_organizations sol_org + JOIN solana_net_ids sol_ni ON sol_ni.address = sol_org.net_id + WHERE sol_org.oui = $1 + LIMIT 1 "#, - ); - query_builder.push_values(devaddr_ranges, |mut builder, range| { - builder - .push_bind(oui as i64) - .push_bind(i32::from(net_id)) - .push_bind(i32::from(range.start_addr)) - .push_bind(i32::from(range.end_addr)); - }); - - query_builder.build().execute(db).await.map(|_| ()) -} - -async fn remove_helium_constraints( - oui: u64, - devaddr_ranges: &[DevAddrConstraint], - db: impl sqlx::PgExecutor<'_>, -) -> Result<(), sqlx::Error> { - let constraints = devaddr_ranges - .iter() - .map(|constraint| (oui, constraint.start_addr, constraint.end_addr)) - .collect::>(); - let mut query_builder: sqlx::QueryBuilder = sqlx::QueryBuilder::new( - "delete from organization_devaddr_constraints where (oui, start_addr, end_addr) in ", - ); - query_builder.push_tuples(constraints, |mut builder, (oui, start_addr, end_addr)| { - builder - .push_bind(oui as i64) - .push_bind(i32::from(start_addr)) - .push_bind(i32::from(end_addr)); - }); - - query_builder.build().execute(db).await.map(|_| ()) -} - -async fn check_roamer_constraint_count( - net_id: NetIdField, - db: impl sqlx::PgExecutor<'_>, -) -> Result { - sqlx::query_scalar( - " select count(net_id) from organization_devaddr_constraints where net_id = $1 ", ) - .bind(i32::from(net_id)) + .bind(Decimal::from(oui)) .fetch_one(db) - .await -} + .await?; -async fn insert_roamer_constraint( - oui: u64, - net_id: NetIdField, - devaddr_range: &DevAddrConstraint, - db: impl sqlx::PgExecutor<'_>, -) -> Result<(), sqlx::Error> { - sqlx::query( - r#" - insert into organization_devaddr_constraints (oui, net_id, start_addr, end_addr) - values ($1, $2, $3, $4) - "#, - ) - .bind(oui as i64) - .bind(i32::from(net_id)) - .bind(i32::from(devaddr_range.start_addr)) - .bind(i32::from(devaddr_range.end_addr)) - .execute(db) - .await - .map(|_| ()) + Ok(netid.into()) } const GET_ORG_SQL: &str = r#" - select org.oui, org.owner_pubkey, org.payer_pubkey, org.locked, - array(select (start_addr, end_addr) from organization_devaddr_constraints org_const where org_const.oui = org.oui) as constraints, - array(select delegate_pubkey from organization_delegate_keys org_delegates where org_delegates.oui = org.oui) as delegate_keys - from organizations org - "#; +SELECT + sol_org.oui::bigint, + sol_org.address, + sol_org.authority, + sol_org.escrow_key, + sol_org.approved, + COALESCE(ol.locked, true) AS locked, + ARRAY( + SELECT (start_addr, end_addr) + FROM solana_organization_devaddr_constraints + WHERE organization = sol_org.address + ) AS constraints, + ARRAY( + SELECT delegate + FROM solana_organization_delegate_keys + WHERE organization = sol_org.address + ) AS delegate_keys +FROM solana_organizations sol_org +LEFT JOIN organization_locks ol ON sol_org.address = ol.organization +"#; pub async fn list(db: impl sqlx::PgExecutor<'_>) -> Result, sqlx::Error> { - Ok(sqlx::query_as::<_, Org>(GET_ORG_SQL) + let orgs = sqlx::query_as::<_, Org>(GET_ORG_SQL) .fetch(db) .filter_map(|row| async move { row.ok() }) .collect::>() - .await) + .await; + + Ok(orgs) } pub async fn get(oui: u64, db: impl sqlx::PgExecutor<'_>) -> Result, sqlx::Error> { let mut query: sqlx::QueryBuilder = sqlx::QueryBuilder::new(GET_ORG_SQL); - query.push(" where org.oui = $1 "); + query.push(" where sol_org.oui = $1 "); query .build_query_as::() - .bind(oui as i64) + .bind(Decimal::from(oui)) .fetch_optional(db) .await } @@ -482,23 +220,38 @@ pub async fn get_constraints_by_route( db: impl sqlx::PgExecutor<'_>, ) -> Result, OrgStoreError> { let uuid = Uuid::try_parse(route_id)?; - let constraints = sqlx::query( r#" - select consts.start_addr, consts.end_addr from organization_devaddr_constraints consts - join routes on routes.oui = consts.oui - where routes.id = $1 + SELECT sol_odc.start_addr, sol_odc.end_addr + FROM solana_organization_devaddr_constraints sol_odc + JOIN solana_organizations sol_orgs ON sol_odc.organization = sol_orgs.address + JOIN routes ON routes.oui = sol_orgs.oui + WHERE routes.id = $1 "#, ) .bind(uuid) .fetch_all(db) .await? .into_iter() - .map(|row| DevAddrConstraint { - start_addr: row.get::("start_addr").into(), - end_addr: row.get::("end_addr").into(), + .map(|row| -> Result { + let start_decimal: Decimal = row.get("start_addr"); + let start_u64 = start_decimal.to_u64().ok_or_else(|| { + OrgStoreError::DecodeNumeric( + "Failed to convert NUMERIC 'start_addr' to u64".to_string(), + ) + })?; + + let end_decimal: Decimal = row.get("end_addr"); + let end_u64 = end_decimal.to_u64().ok_or_else(|| { + OrgStoreError::DecodeNumeric("Failed to convert NUMERIC 'end_addr' to u64".to_string()) + })?; + + Ok(DevAddrConstraint { + start_addr: start_u64.into(), + end_addr: end_u64.into(), + }) }) - .collect(); + .collect::, OrgStoreError>>()?; Ok(constraints) } @@ -511,12 +264,10 @@ pub async fn get_route_ids_by_route( let route_ids = sqlx::query( r#" - select routes.id from routes - where oui = ( - select organizations.oui from organizations - join routes on organizations.oui = routes.oui - where routes.id = $1 - ) + SELECT r2.id + FROM routes r1 + JOIN routes r2 ON r1.oui = r2.oui + WHERE r1.id = $1 "#, ) .bind(uuid) @@ -532,10 +283,13 @@ pub async fn get_route_ids_by_route( pub async fn is_locked(oui: u64, db: impl sqlx::PgExecutor<'_>) -> Result { sqlx::query_scalar::<_, bool>( r#" - select locked from organizations where oui = $1 + SELECT COALESCE(org_lock.locked, true) + FROM solana_organizations sol_org + LEFT JOIN organization_locks org_lock ON sol_org.address = org_lock.organization + WHERE sol_org.oui = $1 "#, ) - .bind(oui as i64) + .bind(Decimal::from(oui)) .fetch_one(db) .await } @@ -543,12 +297,16 @@ pub async fn is_locked(oui: u64, db: impl sqlx::PgExecutor<'_>) -> Result) -> Result<(), sqlx::Error> { sqlx::query( r#" - update organizations - set locked = not locked - where oui = $1 + INSERT INTO organization_locks (organization, locked) + SELECT address, NOT COALESCE(org_lock.locked, false) + FROM solana_organizations sol_org + LEFT JOIN organization_locks org_lock ON sol_org.address = org_lock.organization + WHERE sol_org.oui = $1 + ON CONFLICT (organization) DO UPDATE + SET locked = NOT organization_locks.locked "#, ) - .bind(oui as i64) + .bind(Decimal::from(oui)) .execute(db) .await?; @@ -573,6 +331,8 @@ pub enum OrgStoreError { RouteIdParse(#[from] sqlx::types::uuid::Error), #[error("Invalid update: {0}")] InvalidUpdate(String), + #[error("unable to decode numeric field: {0}")] + DecodeNumeric(String), } pub async fn get_org_pubkeys( @@ -583,11 +343,7 @@ pub async fn get_org_pubkeys( .await? .ok_or_else(|| OrgStoreError::NotFound(format!("{oui}")))?; - let mut pubkeys: Vec = vec![ - PublicKey::try_from(org.owner)?, - PublicKey::try_from(org.payer)?, - ]; - + let mut pubkeys: Vec = vec![PublicKey::try_from(org.owner)?]; if let Some(ref mut delegate_pubkeys) = org .delegate_keys .map(|keys| { @@ -608,26 +364,36 @@ pub async fn get_org_pubkeys_by_route( db: impl sqlx::PgExecutor<'_>, ) -> Result, OrgStoreError> { let uuid = Uuid::try_parse(route_id)?; - let org = sqlx::query_as::<_, Org>( r#" - select org.oui, org.owner_pubkey, org.payer_pubkey, org.locked, - array(select (start_addr, end_addr) from organization_devaddr_constraints org_const where org_const.oui = org.oui) as constraints, - array(select delegate_pubkey from organization_delegate_keys org_delegates where org_delegates.oui = org.oui) as delegate_keys - from organizations org - join routes on org.oui = routes.oui - where routes.id = $1 + SELECT + sol_org.oui::bigint, + sol_org.address, + sol_org.authority, + sol_org.escrow_key, + sol_org.approved, + COALESCE(ol.locked, true) AS locked, + ARRAY( + SELECT (start_addr, end_addr) + FROM solana_organization_devaddr_constraints + WHERE organization = sol_org.address + ) AS constraints, + ARRAY( + SELECT delegate + FROM solana_organization_delegate_keys + WHERE organization = sol_org.address + ) AS delegate_keys + FROM solana_organizations sol_org + LEFT JOIN organization_locks ol ON sol_org.address = ol.organization + JOIN routes r on sol_org.oui = r.oui + WHERE r.id = $1 "#, ) .bind(uuid) .fetch_one(db) .await?; - let mut pubkeys: Vec = vec![ - PublicKey::try_from(org.owner)?, - PublicKey::try_from(org.payer)?, - ]; - + let mut pubkeys: Vec = vec![PublicKey::try_from(org.owner)?]; if let Some(ref mut delegate_keys) = org .delegate_keys .map(|keys| { @@ -643,12 +409,14 @@ pub async fn get_org_pubkeys_by_route( Ok(pubkeys) } -impl From for proto::OrgV1 { +impl From for proto::OrgV2 { fn from(org: Org) -> Self { Self { oui: org.oui, + address: org.address.into(), owner: org.owner.into(), - payer: org.payer.into(), + escrow_key: org.escrow_key, + approved: org.approved, locked: org.locked, delegate_keys: org.delegate_keys.map_or_else(Vec::new, |keys| { keys.iter().map(|key| key.as_ref().into()).collect() diff --git a/iot_config/src/org_service.rs b/iot_config/src/org_service.rs index a7379811f..5104a383d 100644 --- a/iot_config/src/org_service.rs +++ b/iot_config/src/org_service.rs @@ -1,33 +1,32 @@ use std::sync::Arc; -use crate::{ - admin::{AuthCache, KeyType}, - broadcast_update, helium_netids, lora_field, org, - route::list_routes, - telemetry, verify_public_key, GrpcResult, -}; use anyhow::Result; use chrono::Utc; use file_store::traits::{MsgVerify, TimestampEncode}; use helium_crypto::{Keypair, PublicKey, Sign}; use helium_proto::{ services::iot_config::{ - self, route_stream_res_v1, ActionV1, DevaddrConstraintV1, OrgCreateHeliumReqV1, - OrgCreateRoamerReqV1, OrgDisableReqV1, OrgDisableResV1, OrgEnableReqV1, OrgEnableResV1, - OrgGetReqV1, OrgListReqV1, OrgListResV1, OrgResV1, OrgUpdateReqV1, OrgV1, RouteStreamResV1, + self, route_stream_res_v1, ActionV1, OrgCreateHeliumReqV1, OrgCreateRoamerReqV1, + OrgDisableReqV1, OrgDisableResV1, OrgEnableReqV1, OrgEnableResV1, OrgGetReqV1, OrgGetReqV2, + OrgListReqV1, OrgListReqV2, OrgListResV1, OrgListResV2, OrgResV1, OrgResV2, OrgUpdateReqV1, + OrgV2, RouteStreamResV1, }, Message, }; use sqlx::{Pool, Postgres}; -use tokio::sync::{broadcast, watch}; +use tokio::sync::broadcast; use tonic::{Request, Response, Status}; +use crate::{ + admin::AuthCache, broadcast_update, org, route::list_routes, telemetry, verify_public_key, + GrpcResult, +}; + pub struct OrgService { auth_cache: AuthCache, pool: Pool, route_update_tx: broadcast::Sender, signing_key: Arc, - delegate_updater: watch::Sender, } #[derive(Clone, Debug, PartialEq)] @@ -42,31 +41,15 @@ impl OrgService { auth_cache: AuthCache, pool: Pool, route_update_tx: broadcast::Sender, - delegate_updater: watch::Sender, ) -> Result { Ok(Self { auth_cache, pool, route_update_tx, signing_key, - delegate_updater, }) } - fn verify_admin_request_signature( - &self, - signer: &PublicKey, - request: &R, - ) -> Result<(), Status> - where - R: MsgVerify, - { - self.auth_cache - .verify_signature_with_type(KeyType::Administrator, signer, request) - .map_err(|_| Status::permission_denied("invalid admin signature"))?; - Ok(()) - } - fn verify_request_signature(&self, signer: &PublicKey, request: &R) -> Result<(), Status> where R: MsgVerify, @@ -77,37 +60,6 @@ impl OrgService { Ok(()) } - async fn verify_update_request_signature( - &self, - signer: &PublicKey, - request: &OrgUpdateReqV1, - ) -> Result { - if self - .auth_cache - .verify_signature_with_type(KeyType::Administrator, signer, request) - .is_ok() - { - tracing::debug!(signer = signer.to_string(), "request authorized by admin"); - return Ok(UpdateAuthorizer::Admin); - } - - let org_owner = org::get(request.oui, &self.pool) - .await - .transpose() - .ok_or_else(|| Status::not_found(format!("oui: {}", request.oui)))? - .map(|org| org.owner) - .map_err(|_| Status::internal("auth verification error"))?; - if org_owner == signer.clone().into() && request.verify(signer).is_ok() { - tracing::debug!( - signer = signer.to_string(), - "request authorized by delegate" - ); - return Ok(UpdateAuthorizer::Org); - } - - Err(Status::permission_denied("unauthorized request signature")) - } - fn sign_response(&self, response: &[u8]) -> Result, Status> { self.signing_key .sign(response) @@ -150,16 +102,28 @@ impl OrgService { #[tonic::async_trait] impl iot_config::Org for OrgService { async fn list(&self, _request: Request) -> GrpcResult { - telemetry::count_request("org", "list"); + telemetry::count_request("org", "list_deprecated_call"); + tracing::warn!( + "Deprecated API endpoint 'org.list' was called. This endpoint is no longer supported." + ); + + Err(Status::failed_precondition( + "This API endpoint (org.list) has been deprecated and is no longer supported. \ + Please use org.list_v2 instead. Refer to API documentation for migration details.", + )) + } + + async fn list_v2(&self, _request: Request) -> GrpcResult { + telemetry::count_request("org", "list_v2"); - let proto_orgs: Vec = org::list(&self.pool) + let proto_orgs: Vec = org::list(&self.pool) .await .map_err(|_| Status::internal("org list failed"))? .into_iter() .map(|org| org.into()) .collect(); - let mut resp = OrgListResV1 { + let mut resp = OrgListResV2 { orgs: proto_orgs, timestamp: Utc::now().encode_timestamp(), signer: self.signing_key.public_key().into(), @@ -170,7 +134,19 @@ impl iot_config::Org for OrgService { Ok(Response::new(resp)) } - async fn get(&self, request: Request) -> GrpcResult { + async fn get(&self, _request: Request) -> GrpcResult { + telemetry::count_request("org", "get_deprecated_call"); + tracing::warn!( + "Deprecated API endpoint 'org.get' was called. This endpoint is no longer supported." + ); + + Err(Status::failed_precondition( + "This API endpoint (org.get) has been deprecated and is no longer supported. \ + Please use org.get_v2 instead. Refer to API documentation for migration details.", + )) + } + + async fn get_v2(&self, request: Request) -> GrpcResult { let request = request.into_inner(); telemetry::count_request("org", "get"); custom_tracing::record("oui", request.oui); @@ -182,11 +158,12 @@ impl iot_config::Org for OrgService { Status::internal("org get failed") })? .ok_or_else(|| Status::not_found(format!("oui: {}", request.oui)))?; + let net_id = org::get_org_netid(org.oui, &self.pool) .await .map_err(|err| { tracing::error!(oui = org.oui, reason = ?err, "get org net id failed"); - Status::not_found("invalid org; no valid devaddr constraints") + Status::not_found("invalid org; no net id found") })?; let devaddr_constraints = org @@ -199,7 +176,7 @@ impl iot_config::Org for OrgService { .collect() }); - let mut resp = OrgResV1 { + let mut resp = OrgResV2 { org: Some(org.into()), net_id: net_id.into(), devaddr_constraints, @@ -207,251 +184,42 @@ impl iot_config::Org for OrgService { signer: self.signing_key.public_key().into(), signature: vec![], }; - resp.signature = self.sign_response(&resp.encode_to_vec())?; - - Ok(Response::new(resp)) - } - - async fn create_helium(&self, request: Request) -> GrpcResult { - let request = request.into_inner(); - telemetry::count_request("org", "create-helium"); - custom_tracing::record_b58("pub_key", &request.owner); - custom_tracing::record_b58("signer", &request.signer); - - let signer = verify_public_key(&request.signer)?; - self.verify_admin_request_signature(&signer, &request)?; - - let mut verify_keys: Vec<&[u8]> = vec![request.owner.as_ref(), request.payer.as_ref()]; - let mut verify_delegates: Vec<&[u8]> = request - .delegate_keys - .iter() - .map(|key| key.as_slice()) - .collect(); - verify_keys.append(&mut verify_delegates); - _ = verify_keys - .iter() - .map(|key| { - verify_public_key(key).map_err(|err| { - tracing::error!(reason = ?err, "failed pubkey validation"); - Status::invalid_argument(format!("failed pubkey validation: {err:?}")) - }) - }) - .collect::, Status>>()?; - - tracing::info!(?request, "create helium org"); - - let net_id = request.net_id(); - let requested_addrs = if request.devaddrs >= 8 && request.devaddrs % 2 == 0 { - request.devaddrs - } else { - return Err(Status::invalid_argument(format!( - "{} devaddrs requested; minimum 8, even number required", - request.devaddrs - ))); - }; - - let mut txn = self - .pool - .begin() - .await - .map_err(|_| Status::internal("error saving org record"))?; - let devaddr_constraints = helium_netids::checkout_devaddr_constraints(&mut txn, requested_addrs, net_id.into()) - .await - .map_err(|err| { - tracing::error!(?net_id, count = %requested_addrs, reason = ?err, "failed to retrieve available helium devaddrs"); - Status::failed_precondition("helium addresses unavailable") - })?; - tracing::info!(constraints = ?devaddr_constraints, "devaddr constraints issued"); - let helium_netid_field = helium_netids::HeliumNetId::from(net_id).id(); - - let org = org::create_org( - request.owner.into(), - request.payer.into(), - request - .delegate_keys - .into_iter() - .map(|key| key.into()) - .collect(), - helium_netid_field, - &devaddr_constraints, - &mut txn, - ) - .await - .map_err(|err| { - tracing::error!(reason = ?err, "org save failed"); - Status::internal(format!("org save failed: {err:?}")) - })?; - - txn.commit() - .await - .map_err(|_| Status::internal("error saving org record"))?; - - org.delegate_keys.as_ref().map(|keys| { - self.delegate_updater.send_if_modified(|cache| { - keys.iter().fold(false, |acc, key| { - if cache.insert(key.clone()) { - tracing::info!(%key, "delegate key authorized"); - true - } else { - acc - } - }) - }) - }); - let devaddr_constraints = org - .constraints - .clone() - .unwrap_or_default() - .into_iter() - .map(DevaddrConstraintV1::from) - .collect(); - let mut resp = OrgResV1 { - org: Some(org.into()), - net_id: helium_netid_field.into(), - devaddr_constraints, - timestamp: Utc::now().encode_timestamp(), - signer: self.signing_key.public_key().into(), - signature: vec![], - }; resp.signature = self.sign_response(&resp.encode_to_vec())?; - Ok(Response::new(resp)) } - async fn create_roamer(&self, request: Request) -> GrpcResult { - let request = request.into_inner(); - telemetry::count_request("org", "create-roamer"); - custom_tracing::record_b58("pub_key", &request.owner); - custom_tracing::record_b58("signer", &request.signer); + async fn create_helium(&self, _request: Request) -> GrpcResult { + telemetry::count_request("org", "create_helium"); + tracing::warn!( + "Deprecated API endpoint 'org.create_helium' was called. This endpoint is no longer supported." + ); - let signer = verify_public_key(&request.signer)?; - self.verify_admin_request_signature(&signer, &request)?; - - let mut verify_keys: Vec<&[u8]> = vec![request.owner.as_ref(), request.payer.as_ref()]; - let mut verify_delegates: Vec<&[u8]> = request - .delegate_keys - .iter() - .map(|key| key.as_slice()) - .collect(); - verify_keys.append(&mut verify_delegates); - _ = verify_keys - .iter() - .map(|key| { - verify_public_key(key).map_err(|err| { - Status::invalid_argument(format!("failed pubkey validation: {err:?}")) - }) - }) - .collect::, Status>>()?; - - tracing::info!(?request, "create roamer org"); - - let net_id = lora_field::net_id(request.net_id); - let devaddr_range = net_id - .full_range() - .map_err(|_| Status::invalid_argument("invalid net_id"))?; - tracing::info!(constraints = ?devaddr_range, "roaming devaddr range"); - - let org = org::create_org( - request.owner.into(), - request.payer.into(), - request - .delegate_keys - .into_iter() - .map(|key| key.into()) - .collect(), - net_id, - &[devaddr_range], - &self.pool, - ) - .await - .map_err(|err| { - tracing::error!(reason = ?err, "failed to create org"); - Status::internal(format!("org save failed: {err:?}")) - })?; - - org.delegate_keys.as_ref().map(|keys| { - self.delegate_updater.send_if_modified(|cache| { - keys.iter().fold(false, |acc, key| { - if cache.insert(key.clone()) { - tracing::info!(?key, "delegate key authorized"); - true - } else { - acc - } - }) - }) - }); + Err(Status::failed_precondition( + "This API endpoint (org.create_helium) has been deprecated and is no longer supported.", + )) + } - let devaddr_constraints = org - .constraints - .clone() - .unwrap_or_default() - .into_iter() - .map(DevaddrConstraintV1::from) - .collect(); - let mut resp = OrgResV1 { - org: Some(org.into()), - net_id: net_id.into(), - devaddr_constraints, - timestamp: Utc::now().encode_timestamp(), - signer: self.signing_key.public_key().into(), - signature: vec![], - }; - resp.signature = self.sign_response(&resp.encode_to_vec())?; + async fn create_roamer(&self, _request: Request) -> GrpcResult { + telemetry::count_request("org", "create_roamer"); + tracing::warn!( + "Deprecated API endpoint 'org.create_roamer' was called. This endpoint is no longer supported." + ); - Ok(Response::new(resp)) + Err(Status::failed_precondition( + "This API endpoint (org.create_roamer) has been deprecated and is no longer supported.", + )) } - async fn update(&self, request: Request) -> GrpcResult { - let request = request.into_inner(); + async fn update(&self, _request: Request) -> GrpcResult { telemetry::count_request("org", "update"); - custom_tracing::record("oui", request.oui); - custom_tracing::record_b58("signer", &request.signer); - - let signer = verify_public_key(&request.signer)?; - let authorizer = self - .verify_update_request_signature(&signer, &request) - .await?; - - let org = org::update_org( - request.oui, - authorizer, - request.updates, - &self.pool, - &self.delegate_updater, - ) - .await - .map_err(|err| { - tracing::error!(reason = ?err, "org update failed"); - Status::internal(format!("org update failed: {err:?}")) - })?; - - let net_id = org::get_org_netid(org.oui, &self.pool) - .await - .map_err(|err| { - tracing::error!(oui = org.oui, reason = ?err, "get org net id failed"); - Status::not_found("invalid org; no valid devaddr constraints") - })?; - - let devaddr_constraints = org - .constraints - .clone() - .unwrap_or_default() - .into_iter() - .map(DevaddrConstraintV1::from) - .collect(); - let mut resp = OrgResV1 { - org: Some(org.into()), - net_id: net_id.into(), - devaddr_constraints, - timestamp: Utc::now().encode_timestamp(), - signer: self.signing_key.public_key().into(), - signature: vec![], - }; - resp.signature = self.sign_response(&resp.encode_to_vec())?; + tracing::warn!( + "Deprecated API endpoint 'org.update' was called. This endpoint is no longer supported." + ); - Ok(Response::new(resp)) + Err(Status::failed_precondition( + "This API endpoint (org.update) has been deprecated and is no longer supported.", + )) } async fn disable(&self, request: Request) -> GrpcResult { diff --git a/iot_config/src/route.rs b/iot_config/src/route.rs index f050f1324..05e0d6740 100644 --- a/iot_config/src/route.rs +++ b/iot_config/src/route.rs @@ -132,13 +132,13 @@ pub async fn create_route( .await?; let route_id = row.get::("id").to_string(); - let new_route = get_route(&route_id, &mut transaction).await?; transaction.commit().await?; let timestamp = Utc::now().encode_timestamp(); let signer = signing_key.public_key().into(); + let mut update = proto::RouteStreamResV1 { action: proto::ActionV1::Add.into(), data: Some(proto::route_stream_res_v1::Data::Route( @@ -148,6 +148,7 @@ pub async fn create_route( signer, signature: vec![], }; + _ = futures::future::ready(signing_key.sign(&update.encode_to_vec())) .map_err(|err| { tracing::error!(error = ?err, "error signing route create"); @@ -462,26 +463,42 @@ pub async fn update_devaddr_ranges( pub async fn list_routes(oui: u64, db: impl sqlx::PgExecutor<'_>) -> anyhow::Result> { Ok(sqlx::query_as::<_, StorageRoute>( r#" - select r.id, r.oui, r.net_id, r.max_copies, r.server_host, r.server_port, r.server_protocol_opts, r.active, r.ignore_empty_skf, o.locked - from routes r - join organizations o on r.oui = o.oui - where o.oui = $1 and r.deleted = false - group by r.id, o.locked + SELECT + r.id, + r.oui, + r.net_id, + r.max_copies, + r.server_host, + r.server_port, + r.server_protocol_opts, + r.active, + r.ignore_empty_skf, + COALESCE(ol.locked, true) AS locked + FROM routes r + JOIN solana_organizations o ON r.oui = o.oui + LEFT JOIN organization_locks ol ON ol.organization = o.address + WHERE o.oui = $1 AND r.deleted = false "#, ) .bind(oui as i64) .fetch(db) .map_err(RouteStorageError::from) - .and_then(|route| async move { Ok(Route { + .and_then(|route| async move { + Ok(Route { id: route.id.to_string(), net_id: route.net_id.into(), oui: route.oui as u64, - server: RouteServer::new(route.server_host, route.server_port as u32, serde_json::from_value(route.server_protocol_opts)?), + server: RouteServer::new( + route.server_host, + route.server_port as u32, + serde_json::from_value(route.server_protocol_opts)?, + ), max_copies: route.max_copies as u32, active: route.active, locked: route.locked, ignore_empty_skf: route.ignore_empty_skf, - })}) + }) + }) .filter_map(|route| async move { route.ok() }) .collect::>() .await) @@ -527,27 +544,47 @@ pub fn route_stream<'a>( ) -> impl Stream + 'a { sqlx::query( r#" - select r.id, r.oui, r.net_id, r.max_copies, r.server_host, r.server_port, r.server_protocol_opts, r.active, r.ignore_empty_skf, o.locked, r.deleted - from routes r - join organizations o on r.oui = o.oui - where r.updated_at >= $1 - group by r.id, o.locked + SELECT + r.id, + r.oui, + r.net_id, + r.max_copies, + r.server_host, + r.server_port, + r.server_protocol_opts, + r.active, + r.ignore_empty_skf, + COALESCE(ol.locked, true) AS locked, + r.deleted + FROM routes r + JOIN solana_organizations o ON r.oui = o.oui + LEFT JOIN organization_locks ol ON ol.organization = o.address + WHERE r.updated_at >= $1 "#, ) .bind(since) .fetch(db) .and_then(|row| async move { StorageRoute::from_row(&row).map(|sr| (sr, row.get("deleted"))) }) .map_err(RouteStorageError::from) - .and_then(|(route, deleted)| async move { Ok((Route { - id: route.id.to_string(), - net_id: route.net_id.into(), - oui: route.oui as u64, - server: RouteServer::new(route.server_host, route.server_port as u32, serde_json::from_value(route.server_protocol_opts)?), - max_copies: route.max_copies as u32, - active: route.active, - locked: route.locked, - ignore_empty_skf: route.ignore_empty_skf, - }, deleted))}) + .and_then(|(route, deleted)| async move { + Ok(( + Route { + id: route.id.to_string(), + net_id: route.net_id.into(), + oui: route.oui as u64, + server: RouteServer::new( + route.server_host, + route.server_port as u32, + serde_json::from_value(route.server_protocol_opts)?, + ), + max_copies: route.max_copies as u32, + active: route.active, + locked: route.locked, + ignore_empty_skf: route.ignore_empty_skf, + }, + deleted, + )) + }) .filter_map(|result| async move { result.ok() }) .boxed() } @@ -615,11 +652,21 @@ pub async fn get_route(id: &str, db: impl sqlx::PgExecutor<'_>) -> anyhow::Resul let uuid = Uuid::try_parse(id)?; let route = sqlx::query_as::<_, StorageRoute>( r#" - select r.id, r.oui, r.net_id, r.max_copies, r.server_host, r.server_port, r.server_protocol_opts, r.active, r.ignore_empty_skf, o.locked - from routes r - join organizations o on r.oui = o.oui - where r.id = $1 and r.deleted = false - group by r.id, o.locked + SELECT + r.id, + r.oui, + r.net_id, + r.max_copies, + r.server_host, + r.server_port, + r.server_protocol_opts, + r.active, + r.ignore_empty_skf, + COALESCE(ol.locked, true) AS locked + FROM routes r + JOIN solana_organizations o ON r.oui = o.oui + LEFT JOIN organization_locks ol ON ol.organization = o.address + WHERE r.id = $1 AND r.deleted = false "#, ) .bind(uuid) diff --git a/iot_config/tests/fixtures.rs b/iot_config/tests/fixtures.rs new file mode 100644 index 000000000..f6bfbd06e --- /dev/null +++ b/iot_config/tests/fixtures.rs @@ -0,0 +1,154 @@ +use backon::{ExponentialBuilder, Retryable}; +use helium_proto::services::iot_config::{self as proto, config_org_client::OrgClient}; +use solana_sdk::pubkey::Pubkey; +use sqlx::{Pool, Postgres}; +use std::net::SocketAddr; + +pub async fn create_solana_org( + pool: &Pool, + authority: &String, + escrow_key: &String, + net_id: &String, + oui: Option, +) -> anyhow::Result<(String, u64)> { + let address = Pubkey::new_unique().to_string(); + let oui = oui.unwrap_or(1); + + sqlx::query( + r#" + INSERT INTO solana_organizations ( + address, + net_id, + authority, + oui, + escrow_key, + approved + ) + VALUES ($1, $2, $3, $4, $5, $6) + "#, + ) + .bind(address.clone()) + .bind(net_id) + .bind(authority) + .bind(oui) + .bind(escrow_key) + .bind(true) + .execute(pool) + .await?; + + Ok((address, oui as u64)) +} + +pub async fn create_solana_org_devaddr_constraint( + pool: &Pool, + net_id: &String, + organization: &String, + current_addr_offset: Option, + num_blocks: i64, +) -> anyhow::Result { + let address = Pubkey::new_unique().to_string(); + let end_addr = current_addr_offset.unwrap_or(0) + num_blocks * 8; + + sqlx::query( + r#" + INSERT INTO solana_organization_devaddr_constraints ( + address, + net_id, + organization, + start_addr, + end_addr + ) + VALUES ($1, $2, $3, $4, $5) + "#, + ) + .bind(address.clone()) + .bind(net_id) + .bind(organization) + .bind(current_addr_offset.unwrap_or(0)) + .bind(end_addr) + .execute(pool) + .await?; + + Ok(address) +} + +pub async fn create_solana_org_delegate_key( + pool: &Pool, + organization: &String, + delegate: &String, +) -> anyhow::Result { + let address = Pubkey::new_unique().to_string(); + + sqlx::query( + r#" + INSERT INTO solana_organization_delegate_keys ( + address, + organization, + delegate + ) + VALUES ($1, $2, $3) + "#, + ) + .bind(address.clone()) + .bind(organization) + .bind(delegate) + .execute(pool) + .await?; + + Ok(address) +} + +pub async fn create_solana_net_id( + pool: &Pool, + authority: &String, + id: Option, + current_addr_offset: Option, +) -> anyhow::Result { + let address = Pubkey::new_unique().to_string(); + + sqlx::query( + r#" + INSERT INTO solana_net_ids ( + address, + id, + authority, + current_addr_offset + ) + VALUES ($1, $2, $3, $4) + "#, + ) + .bind(address.clone()) + .bind(id.unwrap_or(6)) + .bind(authority) + .bind(current_addr_offset.unwrap_or(0)) + .execute(pool) + .await?; + + Ok(address) +} + +pub async fn create_org(socket_addr: SocketAddr, pool: &Pool) -> proto::OrgResV2 { + let mut client = (|| OrgClient::connect(format!("http://{socket_addr}"))) + .retry(&ExponentialBuilder::default()) + .await + .expect("org client"); + + let payer = Pubkey::new_unique().to_string(); + let net_id_res = create_solana_net_id(pool, &payer, None, None).await; + let net_id = net_id_res.unwrap(); + + let org_res = create_solana_org(pool, &payer, &payer, &net_id, None).await; + let (org_id, oui) = org_res.unwrap(); + + let devaddr_res = create_solana_org_devaddr_constraint(pool, &net_id, &org_id, None, 8).await; + let _devaddr = devaddr_res.unwrap(); + + let response = match client.get_v2(proto::OrgGetReqV2 { oui }).await { + Ok(resp) => resp, + Err(e) => { + panic!("Failed to get the org: {:?}", e); + } + }; + + response.into_inner() +} diff --git a/iot_config/tests/route_service.rs b/iot_config/tests/route_service.rs index 3c10e51e4..bac1d0f0b 100644 --- a/iot_config/tests/route_service.rs +++ b/iot_config/tests/route_service.rs @@ -8,12 +8,11 @@ use chrono::Utc; use futures::{Future, StreamExt, TryFutureExt}; use helium_crypto::{KeyTag, Keypair, PublicKey, Sign}; use helium_proto::services::iot_config::{ - self as proto, config_org_client::OrgClient, config_route_client::RouteClient, RouteGetReqV1, - RouteListReqV1, RouteStreamReqV1, + self as proto, config_route_client::RouteClient, RouteGetReqV1, RouteListReqV1, + RouteStreamReqV1, }; use iot_config::{ admin::{AuthCache, KeyType}, - org::{self}, OrgService, RouteService, }; use prost::Message; @@ -25,6 +24,8 @@ use tonic::{ Streaming, }; +mod fixtures; + #[sqlx::test] async fn packet_router_can_access_route_list(pool: Pool) { let signing_keypair = Arc::new(generate_keypair()); @@ -43,7 +44,7 @@ async fn packet_router_can_access_route_list(pool: Pool) { let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; let mut client = connect_client(socket_addr).await; - let org = create_org(socket_addr, &admin_keypair).await; + let org = fixtures::create_org(socket_addr, &pool).await; let route = create_route(&mut client, &org.org.unwrap(), &admin_keypair).await; // List Routes for OUI @@ -85,7 +86,7 @@ async fn stream_sends_all_data_when_since_is_0(pool: Pool) { let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; let mut client = connect_client(socket_addr).await; - let org = create_org(socket_addr, &admin_keypair).await; + let org = fixtures::create_org(socket_addr, &pool).await; let route = create_route(&mut client, &org.org.unwrap(), &admin_keypair).await; create_euis( @@ -187,17 +188,16 @@ async fn stream_only_sends_data_modified_since(pool: Pool) { let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; let mut client = connect_client(socket_addr).await; - let org_res_v1 = create_org(socket_addr, &admin_keypair).await; - - let proto::OrgResV1 { org: Some(org), .. } = org_res_v1 else { - panic!("invalid OrgResV1") + let org_res_v2 = fixtures::create_org(socket_addr, &pool).await; + let proto::OrgResV2 { org: Some(org), .. } = org_res_v2 else { + panic!("invalid OrgResV2") }; let route1 = create_route(&mut client, &org, &admin_keypair).await; create_euis(&mut client, &route1, vec![(200, 201)], &admin_keypair).await; - let constraint = org_res_v1.devaddr_constraints.first().unwrap(); + let constraint = org_res_v2.devaddr_constraints.first().unwrap(); create_devaddr_ranges( &mut client, &route1, @@ -293,10 +293,10 @@ async fn stream_updates_with_deactivate_reactivate(pool: Pool) { let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; let mut client = connect_client(socket_addr).await; - let org_res_v1 = create_org(socket_addr, &admin_keypair).await; + let org_res_v2 = fixtures::create_org(socket_addr, &pool).await; - let proto::OrgResV1 { org: Some(org), .. } = org_res_v1 else { - panic!("invalid OrgResV1") + let proto::OrgResV2 { org: Some(org), .. } = org_res_v2 else { + panic!("invalid OrgResV2") }; let route = create_route(&mut client, &org, &admin_keypair).await; @@ -498,10 +498,6 @@ async fn start_server( auth_cache: AuthCache, pool: Pool, ) -> JoinHandle> { - let (delegate_key_updater, _delegate_key_cache) = org::delegate_keys_cache(&pool) - .await - .expect("delete keys cache"); - let route_service = RouteService::new(signing_keypair.clone(), auth_cache.clone(), pool.clone()); @@ -510,7 +506,6 @@ async fn start_server( auth_cache.clone(), pool.clone(), route_service.clone_update_channel(), - delegate_key_updater, ) .expect("org service"); @@ -532,43 +527,9 @@ fn get_socket_addr() -> anyhow::Result { Ok(listener.local_addr()?) } -async fn create_org(socket_addr: SocketAddr, admin_keypair: &Keypair) -> proto::OrgResV1 { - let mut client = (|| OrgClient::connect(format!("http://{socket_addr}"))) - .retry(&ExponentialBuilder::default()) - .await - .expect("org client"); - - let mut request = proto::OrgCreateHeliumReqV1 { - owner: generate_keypair().public_key().to_vec(), - payer: generate_keypair().public_key().to_vec(), - devaddrs: 8, - timestamp: Utc::now().timestamp() as u64, - signature: vec![], - delegate_keys: vec![], - signer: admin_keypair.public_key().into(), - net_id: 6, - }; - - request.signature = admin_keypair - .sign(&request.encode_to_vec()) - .expect("sign create org"); - - let response = client.create_helium(request).await; - - let proto::OrgResV1 { org: Some(org), .. } = response.unwrap().into_inner() else { - panic!("org response is incorrect") - }; - - let Ok(response) = client.get(proto::OrgGetReqV1 { oui: org.oui }).await else { - panic!("could not get the org") - }; - - response.into_inner() -} - async fn create_route( client: &mut RouteClient, - org: &proto::OrgV1, + org: &proto::OrgV2, signing_keypair: &Keypair, ) -> proto::RouteV1 { let mut request = proto::RouteCreateReqV1 { @@ -640,6 +601,7 @@ async fn create_euis( }) .collect::>(); + println!("Logging Route {:?}", route); let Ok(_) = client.update_euis(futures::stream::iter(requests)).await else { panic!("unable to create eui pairs") }; diff --git a/iot_packet_verifier/migrations/0007_rename_payer_to_escrow_key.sql b/iot_packet_verifier/migrations/0007_rename_payer_to_escrow_key.sql new file mode 100644 index 000000000..a12c5567e --- /dev/null +++ b/iot_packet_verifier/migrations/0007_rename_payer_to_escrow_key.sql @@ -0,0 +1,2 @@ +ALTER TABLE pending_burns RENAME COLUMN payer TO escrow_key; +ALTER TABLE pending_txns RENAME COLUMN payer TO escrow_key; \ No newline at end of file diff --git a/iot_packet_verifier/src/balances.rs b/iot_packet_verifier/src/balances.rs index 51deee5b2..d381b5716 100644 --- a/iot_packet_verifier/src/balances.rs +++ b/iot_packet_verifier/src/balances.rs @@ -2,7 +2,6 @@ use crate::{ pending::{Burn, PendingTables}, verifier::Debiter, }; -use helium_crypto::PublicKeyBinary; use solana::{burn::SolanaNetwork, SolanaRpcError}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -14,11 +13,11 @@ use tokio::sync::Mutex; /// packet verifier. #[derive(Clone)] pub struct BalanceCache { - payer_accounts: BalanceStore, + escrow_accounts: BalanceStore, solana: S, } -pub type BalanceStore = Arc>>; +pub type BalanceStore = Arc>>; impl BalanceCache where @@ -30,15 +29,15 @@ where let mut balances = HashMap::new(); for Burn { - payer, + escrow_key, amount: burn_amount, } in pending_tables.fetch_all_pending_burns().await? { - // Look up the current balance of the payer - let balance = solana.payer_balance(&payer).await?; + // Look up the current balance of the escrow_account + let balance = solana.escrow_balance(&escrow_key).await?; balances.insert( - payer, - PayerAccount { + escrow_key, + EscrowAccount { burned: burn_amount, balance, }, @@ -46,7 +45,7 @@ where } Ok(Self { - payer_accounts: Arc::new(Mutex::new(balances)), + escrow_accounts: Arc::new(Mutex::new(balances)), solana, }) } @@ -54,11 +53,11 @@ where impl BalanceCache { pub fn balances(&self) -> BalanceStore { - self.payer_accounts.clone() + self.escrow_accounts.clone() } - pub async fn get_payer_balance(&self, payer: &PublicKeyBinary) -> Option { - self.payer_accounts.lock().await.get(payer).cloned() + pub async fn get_escrow_balance(&self, escrow_key: &String) -> Option { + self.escrow_accounts.lock().await.get(escrow_key).cloned() } } @@ -71,33 +70,34 @@ where /// option if there was enough and none otherwise. async fn debit_if_sufficient( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, trigger_balance_check_threshold: u64, ) -> Result, SolanaRpcError> { - let mut payer_accounts = self.payer_accounts.lock().await; + let mut escrow_accounts = self.escrow_accounts.lock().await; - // Fetch the balance if we haven't seen the payer before - if let Entry::Vacant(payer_account) = payer_accounts.entry(payer.clone()) { - let payer_account = - payer_account.insert(PayerAccount::new(self.solana.payer_balance(payer).await?)); - return Ok((payer_account.balance >= amount).then(|| { - payer_account.burned += amount; - payer_account.balance - amount + // Fetch the balance if we haven't seen the escrow_account before + if let Entry::Vacant(escrow_account) = escrow_accounts.entry(escrow_key.clone()) { + let escrow_account = escrow_account.insert(EscrowAccount::new( + self.solana.escrow_balance(escrow_key).await?, + )); + return Ok((escrow_account.balance >= amount).then(|| { + escrow_account.burned += amount; + escrow_account.balance - amount })); } - let payer_account = payer_accounts.get_mut(payer).unwrap(); - match payer_account + let escrow_account = escrow_accounts.get_mut(escrow_key).unwrap(); + match escrow_account .balance - .checked_sub(amount + payer_account.burned) + .checked_sub(amount + escrow_account.burned) { Some(remaining_balance) => { if remaining_balance < trigger_balance_check_threshold { - payer_account.balance = self.solana.payer_balance(payer).await?; + escrow_account.balance = self.solana.escrow_balance(escrow_key).await?; } - payer_account.burned += amount; - Ok(Some(payer_account.balance - payer_account.burned)) + escrow_account.burned += amount; + Ok(Some(escrow_account.balance - escrow_account.burned)) } None => Ok(None), } @@ -105,12 +105,12 @@ where } #[derive(Copy, Clone, Debug, Default)] -pub struct PayerAccount { +pub struct EscrowAccount { pub balance: u64, pub burned: u64, } -impl PayerAccount { +impl EscrowAccount { pub fn new(balance: u64) -> Self { Self { balance, burned: 0 } } diff --git a/iot_packet_verifier/src/burner.rs b/iot_packet_verifier/src/burner.rs index a42ce70aa..0649b9da0 100644 --- a/iot_packet_verifier/src/burner.rs +++ b/iot_packet_verifier/src/burner.rs @@ -102,23 +102,23 @@ where return Err(BurnError::ExistingPendingTransactions(pending_txns.len())); } - // Fetch the next payer and amount that should be burn. If no such burn + // Fetch the next escrow and amount that should be burn. If no such burn // exists, perform no action. - let Some(Burn { payer, amount }) = self.pending_tables.fetch_next_burn().await? else { + let Some(Burn { escrow_key, amount }) = self.pending_tables.fetch_next_burn().await? else { tracing::info!("no pending burns"); return Ok(()); }; - tracing::info!(%amount, %payer, "Burning DC"); + tracing::info!(%amount, %escrow_key, "Burning DC"); // Create a burn transaction and execute it: let txn = self .solana - .make_burn_transaction(&payer, amount) + .make_burn_transaction(&escrow_key, amount) .await .map_err(BurnError::SolanaError)?; self.pending_tables - .add_pending_transaction(&payer, amount, txn.get_signature()) + .add_pending_transaction(&escrow_key, amount, txn.get_signature()) .await?; self.solana .submit_transaction(&txn) @@ -133,18 +133,18 @@ where .remove_pending_transaction(txn.get_signature()) .await?; pending_tables_txn - .subtract_burned_amount(&payer, amount) + .subtract_burned_amount(&escrow_key, amount) .await?; pending_tables_txn.commit().await?; let mut balance_lock = self.balances.lock().await; - let payer_account = balance_lock.get_mut(&payer).unwrap(); - // Reduce the pending burn amount and the payer's balance by the amount + let escrow_account = balance_lock.get_mut(&escrow_key).unwrap(); + // Reduce the pending burn amount and the escrow_accounts's balance by the amount // we've burned. - payer_account.burned = payer_account.burned.saturating_sub(amount); - payer_account.balance = payer_account.balance.saturating_sub(amount); + escrow_account.burned = escrow_account.burned.saturating_sub(amount); + escrow_account.balance = escrow_account.balance.saturating_sub(amount); - metrics::counter!("burned", "payer" => payer.to_string()).increment(amount); + metrics::counter!("burned", "escrow_key" => escrow_key.to_string()).increment(amount); Ok(()) } diff --git a/iot_packet_verifier/src/daemon.rs b/iot_packet_verifier/src/daemon.rs index 5fe4621c2..baffeb03b 100644 --- a/iot_packet_verifier/src/daemon.rs +++ b/iot_packet_verifier/src/daemon.rs @@ -3,7 +3,7 @@ use crate::{ burner::Burner, pending::confirm_pending_txns, settings::Settings, - verifier::{CachedOrgClient, ConfigServer, Verifier}, + verifier::{CachedOrgClient, ConfigServer, ConfigServerError, Verifier}, }; use anyhow::{bail, Result}; use file_store::{ @@ -37,6 +37,7 @@ struct Daemon { impl ManagedTask for Daemon where O: Orgs, + ConfigServerError: From<::Error>, { fn start_task( self: Box, @@ -49,6 +50,7 @@ where impl Daemon where O: Orgs, + ConfigServerError: From<::Error>, { pub async fn run(mut self, shutdown: triggered::Listener) -> Result<()> { tracing::info!("Starting verifier daemon"); diff --git a/iot_packet_verifier/src/pending.rs b/iot_packet_verifier/src/pending.rs index 83a0418ef..653f5f06a 100644 --- a/iot_packet_verifier/src/pending.rs +++ b/iot_packet_verifier/src/pending.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; -use helium_crypto::PublicKeyBinary; use solana::{burn::SolanaNetwork, SolanaRpcError}; use solana_sdk::signature::Signature; use sqlx::{postgres::PgRow, FromRow, PgPool, Postgres, Row, Transaction}; @@ -10,14 +9,14 @@ use tokio::sync::Mutex; use crate::balances::BalanceStore; /// To avoid excessive burn transaction (which cost us money), we institute a minimum -/// amount of Data Credits accounted for before we burn from a payer: +/// amount of Data Credits accounted for before we burn from a escrow account: pub const BURN_THRESHOLD: i64 = 10_000; #[async_trait] pub trait AddPendingBurn { async fn add_burned_amount( &mut self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result<(), sqlx::Error>; } @@ -36,17 +35,17 @@ pub trait PendingTables { async fn add_pending_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, signature: &Signature, ) -> Result<(), sqlx::Error> { - self.do_add_pending_transaction(payer, amount, signature, Utc::now()) + self.do_add_pending_transaction(escrow_key, amount, signature, Utc::now()) .await } async fn do_add_pending_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, signature: &Signature, time_of_submission: DateTime, @@ -94,12 +93,12 @@ where .await .map_err(ConfirmPendingError::SolanaError)? { - txn.subtract_burned_amount(&pending.payer, pending.amount) + txn.subtract_burned_amount(&pending.escrow_key, pending.amount) .await?; let mut balance_lock = balances.lock().await; - let payer_account = balance_lock.get_mut(&pending.payer).unwrap(); - payer_account.burned = payer_account.burned.saturating_sub(pending.amount); - payer_account.balance = payer_account.balance.saturating_sub(pending.amount); + let escrow_account = balance_lock.get_mut(&pending.escrow_key).unwrap(); + escrow_account.burned = escrow_account.burned.saturating_sub(pending.amount); + escrow_account.balance = escrow_account.balance.saturating_sub(pending.amount); } // Commit our work: txn.commit().await?; @@ -117,7 +116,7 @@ pub trait PendingTablesTransaction<'a> { async fn subtract_burned_amount( &mut self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result<(), sqlx::Error>; @@ -155,19 +154,19 @@ impl PendingTables for PgPool { async fn do_add_pending_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, signature: &Signature, time_of_submission: DateTime, ) -> Result<(), sqlx::Error> { sqlx::query( r#" - INSERT INTO pending_txns (signature, payer, amount, time_of_submission) + INSERT INTO pending_txns (signature, escrow_key, amount, time_of_submission) VALUES ($1, $2, $3, $4) "#, ) .bind(signature.to_string()) - .bind(payer) + .bind(escrow_key) .bind(amount as i64) .bind(time_of_submission) .execute(self) @@ -180,18 +179,18 @@ impl PendingTables for PgPool { impl AddPendingBurn for Transaction<'_, Postgres> { async fn add_burned_amount( &mut self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result<(), sqlx::Error> { sqlx::query( r#" - INSERT INTO pending_burns (payer, amount, last_burn) + INSERT INTO pending_burns (escrow_key, amount, last_burn) VALUES ($1, $2, $3) - ON CONFLICT (payer) DO UPDATE SET + ON CONFLICT (escrow_key) DO UPDATE SET amount = pending_burns.amount + $2 "#, ) - .bind(payer) + .bind(escrow_key) .bind(amount as i64) .bind(Utc::now()) .execute(&mut **self) @@ -215,7 +214,7 @@ impl<'a> PendingTablesTransaction<'a> for Transaction<'a, Postgres> { async fn subtract_burned_amount( &mut self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result<(), sqlx::Error> { sqlx::query( @@ -224,12 +223,12 @@ impl<'a> PendingTablesTransaction<'a> for Transaction<'a, Postgres> { amount = amount - $1, last_burn = $2 WHERE - payer = $3 + escrow_key = $3 "#, ) .bind(amount as i64) .bind(Utc::now()) - .bind(payer) + .bind(escrow_key) .execute(&mut *self) .await?; Ok(()) @@ -242,14 +241,14 @@ impl<'a> PendingTablesTransaction<'a> for Transaction<'a, Postgres> { #[derive(Debug)] pub struct Burn { - pub payer: PublicKeyBinary, + pub escrow_key: String, pub amount: u64, } impl FromRow<'_, PgRow> for Burn { fn from_row(row: &PgRow) -> sqlx::Result { Ok(Self { - payer: row.try_get("payer")?, + escrow_key: row.try_get("escrow_key")?, amount: row.try_get::("amount")? as u64, }) } @@ -257,7 +256,7 @@ impl FromRow<'_, PgRow> for Burn { pub struct PendingTxn { pub signature: Signature, - pub payer: PublicKeyBinary, + pub escrow_key: String, pub amount: u64, pub time_of_submission: DateTime, } @@ -265,7 +264,7 @@ pub struct PendingTxn { impl FromRow<'_, PgRow> for PendingTxn { fn from_row(row: &PgRow) -> sqlx::Result { Ok(Self { - payer: row.try_get("payer")?, + escrow_key: row.try_get("escrow_key")?, amount: row.try_get::("amount")? as u64, time_of_submission: row.try_get("time_of_submission")?, signature: row @@ -280,14 +279,14 @@ impl FromRow<'_, PgRow> for PendingTxn { } #[async_trait] -impl AddPendingBurn for Arc>> { +impl AddPendingBurn for Arc>> { async fn add_burned_amount( &mut self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result<(), sqlx::Error> { let mut map = self.lock().await; - *map.entry(payer.clone()).or_default() += amount; + *map.entry(escrow_key.clone()).or_default() += amount; Ok(()) } } diff --git a/iot_packet_verifier/src/verifier.rs b/iot_packet_verifier/src/verifier.rs index f0e6f10f0..a28edc0e0 100644 --- a/iot_packet_verifier/src/verifier.rs +++ b/iot_packet_verifier/src/verifier.rs @@ -6,7 +6,6 @@ use file_store::{ traits::{MsgBytes, MsgTimestamp}, }; use futures::{Stream, StreamExt}; -use helium_crypto::PublicKeyBinary; use helium_proto::services::{ packet_verifier::{InvalidPacket, InvalidPacketReason, ValidPacket}, router::packet_router_packet_report_v1::PacketType, @@ -57,7 +56,7 @@ where valid_packets: &mut impl PacketWriter, invalid_packets: &mut impl PacketWriter, ) -> Result<(), VerificationError> { - let mut org_cache = HashMap::::new(); + let mut org_cache = HashMap::::new(); tokio::pin!(reports); @@ -72,18 +71,18 @@ where payload_size_to_dc(report.payload_size as u64) }; - let payer = self + let escrow_key = self .config_server .fetch_org(report.oui, &mut org_cache) .await?; if let Some(remaining_balance) = self .debiter - .debit_if_sufficient(&payer, debit_amount, minimum_allowed_balance) + .debit_if_sufficient(&escrow_key, debit_amount, minimum_allowed_balance) .await? { pending_burns - .add_burned_amount(&payer, debit_amount) + .add_burned_amount(&escrow_key, debit_amount) .await?; valid_packets @@ -132,7 +131,7 @@ pub trait Debiter { /// return the remaining amount. async fn debit_if_sufficient( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, trigger_balance_check_threshold: u64, ) -> Result, SolanaRpcError>; @@ -142,7 +141,7 @@ pub trait Debiter { pub struct Org { pub oui: u64, - pub payer: PublicKeyBinary, + pub escrow_key: String, pub locked: bool, } @@ -151,8 +150,8 @@ pub trait ConfigServer: Sized + Send + Sync + 'static { async fn fetch_org( &self, oui: u64, - cache: &mut HashMap, - ) -> Result; + cache: &mut HashMap, + ) -> Result; async fn disable_org(&self, oui: u64) -> Result<(), ConfigServerError>; @@ -176,11 +175,16 @@ pub trait ConfigServer: Sized + Send + Sync + 'static { loop { tracing::info!("Checking if any orgs need to be re-enabled"); - for Org { locked, payer, oui } in self.list_orgs().await?.into_iter() { + for Org { + locked, + escrow_key, + oui, + } in self.list_orgs().await?.into_iter() + { if locked { - let balance = solana.payer_balance(&payer).await?; + let balance = solana.escrow_balance(&escrow_key).await?; if balance >= minimum_allowed_balance { - balances.set_balance(&payer, balance).await; + balances.set_balance(&escrow_key, balance).await; self.enable_org(oui).await?; } } @@ -202,21 +206,25 @@ pub trait ConfigServer: Sized + Send + Sync + 'static { #[async_trait] pub trait BalanceStore: Send + Sync + 'static { - async fn set_balance(&self, payer: &PublicKeyBinary, balance: u64); + async fn set_balance(&self, escrow_key: &String, balance: u64); } #[async_trait] impl BalanceStore for crate::balances::BalanceStore { - async fn set_balance(&self, payer: &PublicKeyBinary, balance: u64) { - self.lock().await.entry(payer.clone()).or_default().balance = balance; + async fn set_balance(&self, escrow_key: &String, balance: u64) { + self.lock() + .await + .entry(escrow_key.clone()) + .or_default() + .balance = balance; } } #[async_trait] // differs from the BalanceStore in the value stored in the contained HashMap; a u64 here instead of a Balance {} struct -impl BalanceStore for Arc>> { - async fn set_balance(&self, payer: &PublicKeyBinary, balance: u64) { - *self.lock().await.entry(payer.clone()).or_default() = balance; +impl BalanceStore for Arc>> { + async fn set_balance(&self, escrow_key: &String, balance: u64) { + *self.lock().await.entry(escrow_key.clone()).or_default() = balance; } } @@ -256,24 +264,25 @@ impl CachedOrgClient { impl ConfigServer for Arc>> where O: Orgs, + ConfigServerError: From<::Error>, { async fn fetch_org( &self, oui: u64, - oui_cache: &mut HashMap, - ) -> Result { + oui_cache: &mut HashMap, + ) -> Result { if let Entry::Vacant(e) = oui_cache.entry(oui) { - let pubkey = PublicKeyBinary::from( - self.lock() - .await - .orgs - .get(oui) - .await? - .org - .ok_or(ConfigServerError::NotFound(oui))? - .payer, - ); - e.insert(pubkey); + let escrow_key = self + .lock() + .await + .orgs + .get(oui) + .await? + .org + .ok_or(ConfigServerError::NotFound(oui))? + .escrow_key; + + e.insert(escrow_key); } Ok(oui_cache.get(&oui).unwrap().clone()) } @@ -306,7 +315,7 @@ where .into_iter() .map(|org| Org { oui: org.oui, - payer: PublicKeyBinary::from(org.payer), + escrow_key: org.escrow_key, locked: org.locked, }) .collect()) diff --git a/iot_packet_verifier/tests/integration_tests.rs b/iot_packet_verifier/tests/integration_tests.rs index ec867fae3..5e463580a 100644 --- a/iot_packet_verifier/tests/integration_tests.rs +++ b/iot_packet_verifier/tests/integration_tests.rs @@ -11,7 +11,7 @@ use helium_proto::{ DataRate, Region, }; use iot_packet_verifier::{ - balances::{BalanceCache, PayerAccount}, + balances::{BalanceCache, EscrowAccount}, burner::{BurnError, Burner}, pending::{confirm_pending_txns, AddPendingBurn, Burn, PendingTables, BURN_THRESHOLD}, verifier::{payload_size_to_dc, ConfigServer, ConfigServerError, Org, Verifier, BYTES_PER_DC}, @@ -25,21 +25,21 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio::sync::Mutex; struct MockConfig { - payer: PublicKeyBinary, + escrow_key: String, enabled: bool, } #[derive(Default, Clone)] struct MockConfigServer { - payers: Arc>>, + escrow_keys: Arc>>, } impl MockConfigServer { - async fn insert(&self, oui: u64, payer: PublicKeyBinary) { - self.payers.lock().await.insert( + async fn insert(&self, oui: u64, escrow_key: String) { + self.escrow_keys.lock().await.insert( oui, MockConfig { - payer, + escrow_key, enabled: true, }, ); @@ -51,30 +51,37 @@ impl ConfigServer for MockConfigServer { async fn fetch_org( &self, oui: u64, - _cache: &mut HashMap, - ) -> Result { - Ok(self.payers.lock().await.get(&oui).unwrap().payer.clone()) + _cache: &mut HashMap, + ) -> Result { + Ok(self + .escrow_keys + .lock() + .await + .get(&oui) + .unwrap() + .escrow_key + .clone()) } async fn disable_org(&self, oui: u64) -> Result<(), ConfigServerError> { - self.payers.lock().await.get_mut(&oui).unwrap().enabled = false; + self.escrow_keys.lock().await.get_mut(&oui).unwrap().enabled = false; Ok(()) } async fn enable_org(&self, oui: u64) -> Result<(), ConfigServerError> { - self.payers.lock().await.get_mut(&oui).unwrap().enabled = true; + self.escrow_keys.lock().await.get_mut(&oui).unwrap().enabled = true; Ok(()) } async fn list_orgs(&self) -> Result, ConfigServerError> { Ok(self - .payers + .escrow_keys .lock() .await .iter() .map(|(oui, config)| Org { oui: *oui, - payer: config.payer.clone(), + escrow_key: config.escrow_key.clone(), locked: !config.enabled, }) .collect()) @@ -158,18 +165,18 @@ fn invalid_packet(payload_size: u32, payload_hash: Vec) -> InvalidPacket { #[sqlx::test] async fn test_config_unlocking(pool: PgPool) -> anyhow::Result<()> { - let payer = PublicKeyBinary::from(vec![0]); + let escrow_key = format!("OUI_{}", 0); // Set up orgs: let orgs = MockConfigServer::default(); - orgs.insert(0_u64, payer.clone()).await; + orgs.insert(0_u64, escrow_key.clone()).await; // Set up balances: let solana_network = TestSolanaClientMap::default(); - solana_network.insert(&payer, 3).await; + solana_network.insert(&escrow_key, 3).await; // Set up cache: let mut txn = pool.begin().await?; - txn.add_burned_amount(&payer, 3).await?; + txn.add_burned_amount(&escrow_key, 3).await?; txn.commit().await?; let balances = BalanceCache::new(&pool, solana_network.clone()).await?; @@ -197,10 +204,10 @@ async fn test_config_unlocking(pool: PgPool) -> anyhow::Result<()> { .unwrap(); pending_burn_txn.commit().await?; - assert!(!orgs.payers.lock().await.get(&0).unwrap().enabled); + assert!(!orgs.escrow_keys.lock().await.get(&0).unwrap().enabled); // Update the solana network: - solana_network.insert(&payer, 50).await; + solana_network.insert(&escrow_key, 50).await; let (trigger, listener) = triggered::trigger(); @@ -224,13 +231,13 @@ async fn test_config_unlocking(pool: PgPool) -> anyhow::Result<()> { tokio::time::sleep(Duration::from_secs(1)).await; // We should be re-enabled - assert!(orgs.payers.lock().await.get(&0).unwrap().enabled); + assert!(orgs.escrow_keys.lock().await.get(&0).unwrap().enabled); assert_eq!( balances .balances() .lock() .await - .get(&payer) + .get(&escrow_key) .unwrap() .balance, 50 @@ -256,18 +263,18 @@ async fn test_config_unlocking(pool: PgPool) -> anyhow::Result<()> { pending_burn_txn.commit().await?; // Still enabled: - assert!(orgs.payers.lock().await.get(&0).unwrap().enabled); + assert!(orgs.escrow_keys.lock().await.get(&0).unwrap().enabled); - let payer_account = balances - .get_payer_balance(&payer) + let escrow_account = balances + .get_escrow_balance(&escrow_key) .await - .expect("known payer"); + .expect("known escrow"); assert_eq!( - payer_account.balance, 50, + escrow_account.balance, 50, "balance has not been deducted because no solana burn has been sent" ); assert_eq!( - payer_account.burned, 7, + escrow_account.burned, 7, "burned is previous amount plus new sufficient amount" ); @@ -283,15 +290,15 @@ async fn test_verifier_free_packets(pool: PgPool) -> anyhow::Result<()> { packet_report(0, 2, 1, vec![6], true), ]; - let org_pubkey = PublicKeyBinary::from(vec![0]); + let org_escrow_key = format!("OUI_{}", 0); // Set up orgs: let orgs = MockConfigServer::default(); - orgs.insert(0_u64, org_pubkey.clone()).await; + orgs.insert(0_u64, org_escrow_key.clone()).await; // Set up balances: let solana_network = TestSolanaClientMap::default(); - solana_network.insert(&org_pubkey, 5).await; + solana_network.insert(&org_escrow_key, 5).await; let balances = BalanceCache::new(&pool, solana_network).await?; // Set up output: @@ -330,15 +337,15 @@ async fn test_verifier_free_packets(pool: PgPool) -> anyhow::Result<()> { assert!(invalid_packets.is_empty()); - let payers = verifier.config_server.payers.lock().await; - assert!(payers.get(&0).unwrap().enabled); + let escrow_keys = verifier.config_server.escrow_keys.lock().await; + assert!(escrow_keys.get(&0).unwrap().enabled); - let payer_balance = balances - .get_payer_balance(&org_pubkey) + let escrow_balance = balances + .get_escrow_balance(&org_escrow_key) .await .expect("known payer"); - assert_eq!(payer_balance.balance, 5, "balance should not have changed"); - assert_eq!(payer_balance.burned, 0, "nothing should be burned"); + assert_eq!(escrow_balance.balance, 5, "balance should not have changed"); + assert_eq!(escrow_balance.burned, 0, "nothing should be burned"); Ok(()) } @@ -362,20 +369,14 @@ async fn test_verifier(pool: PgPool) -> anyhow::Result<()> { ]; // Set up orgs: let orgs = MockConfigServer::default(); - orgs.insert(0_u64, PublicKeyBinary::from(vec![0])).await; - orgs.insert(1_u64, PublicKeyBinary::from(vec![1])).await; - orgs.insert(2_u64, PublicKeyBinary::from(vec![2])).await; + orgs.insert(0_u64, format!("OUI_{}", 0)).await; + orgs.insert(1_u64, format!("OUI_{}", 1)).await; + orgs.insert(2_u64, format!("OUI_{}", 2)).await; // Set up balances: let solana_network = TestSolanaClientMap::default(); - solana_network - .insert(&PublicKeyBinary::from(vec![0]), 3) - .await; - solana_network - .insert(&PublicKeyBinary::from(vec![1]), 5) - .await; - solana_network - .insert(&PublicKeyBinary::from(vec![2]), 2) - .await; + solana_network.insert(&format!("OUI_{}", 0), 3).await; + solana_network.insert(&format!("OUI_{}", 1), 5).await; + solana_network.insert(&format!("OUI_{}", 2), 2).await; let balances = BalanceCache::new(&pool, solana_network.clone()).await?; // Set up output: @@ -420,17 +421,17 @@ async fn test_verifier(pool: PgPool) -> anyhow::Result<()> { assert_eq!(invalid_packets, vec![invalid_packet(1, vec![3]),]); // Verify that only org #0 is disabled: - let payers = verifier.config_server.payers.lock().await; - assert!(!payers.get(&0).unwrap().enabled); - assert!(payers.get(&1).unwrap().enabled); - assert!(payers.get(&2).unwrap().enabled); + let escrow_keys = verifier.config_server.escrow_keys.lock().await; + assert!(!escrow_keys.get(&0).unwrap().enabled); + assert!(escrow_keys.get(&1).unwrap().enabled); + assert!(escrow_keys.get(&2).unwrap().enabled); Ok(()) } #[sqlx::test] async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { - let payer = PublicKeyBinary::from(vec![0]); + let escrow_key = format!("OUI_{}", 0); // Our balance and packet size has to surpass BURN_THRESHOLD // for burning to consider the verified packets. @@ -439,7 +440,7 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { // Solana network: let solana_network = TestSolanaClientMap::default(); - solana_network.insert(&payer, STARTING_BALANCE).await; // Start with 3 data credits + solana_network.insert(&escrow_key, STARTING_BALANCE).await; // Start with 3 data credits // Balance cache: let balance_cache = BalanceCache::new(&pool, solana_network.clone()).await?; @@ -454,7 +455,7 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { // Orgs: let orgs = MockConfigServer::default(); - orgs.insert(0_u64, payer.clone()).await; + orgs.insert(0_u64, escrow_key.clone()).await; // Packet output: let mut valid_packets = Vec::new(); @@ -489,7 +490,7 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { assert!( !verifier .config_server - .payers + .escrow_keys .lock() .await .get(&0) @@ -516,7 +517,7 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { // Check that 3x the BURN_THRESHOLD DC are pending to be burned: let balance = verifier .debiter - .get_payer_balance(&payer) + .get_escrow_balance(&escrow_key) .await .expect("known payer"); assert_eq!(balance.balance, STARTING_BALANCE); @@ -528,21 +529,21 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { // Now that we've burn, the balances and burn amount should be reset: let balance = verifier .debiter - .get_payer_balance(&payer) + .get_escrow_balance(&escrow_key) .await - .expect("known payer"); + .expect("known escrow"); assert_eq!(balance.balance, 0); assert_eq!(balance.burned, 0); // Pending burns should be empty as well: - let payer_balance = balance_cache - .get_payer_balance(&payer) + let escrow_balance = balance_cache + .get_escrow_balance(&escrow_key) .await .expect("known payer"); - assert_eq!(payer_balance.burned, 0, "pending was burned"); + assert_eq!(escrow_balance.burned, 0, "pending was burned"); // Additionally, the balance on the solana network should be zero: - let solana_balance = solana_network.get_payer_balance(&payer).await; + let solana_balance = solana_network.get_escrow_balance(&escrow_key).await; assert_eq!(solana_balance, 0, "solana balance"); // Attempting to validate one packet should fail now: @@ -576,32 +577,30 @@ async fn test_end_to_end(pool: PgPool) -> anyhow::Result<()> { async fn test_pending_txns(pool: PgPool) -> anyhow::Result<()> { const CONFIRMED_BURN_AMOUNT: u64 = 7; const UNCONFIRMED_BURN_AMOUNT: u64 = 11; - let payer: PublicKeyBinary = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6" - .parse() - .unwrap(); + let escrow_key = format!("112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6"); let mut ledger = HashMap::new(); ledger.insert( - payer.clone(), + escrow_key.clone(), CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT, ); let mut cache = HashMap::new(); cache.insert( - payer.clone(), - PayerAccount { + escrow_key.clone(), + EscrowAccount { balance: CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT, burned: CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT, }, ); let solana_network = TestSolanaClientMap::default(); solana_network - .insert(&payer, CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT) + .insert(&escrow_key, CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT) .await; // Add both the burn amounts to the pending burns table { let mut transaction = pool.begin().await.unwrap(); transaction - .add_burned_amount(&payer, CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT) + .add_burned_amount(&escrow_key, CONFIRMED_BURN_AMOUNT + UNCONFIRMED_BURN_AMOUNT) .await .unwrap(); transaction.commit().await.unwrap(); @@ -610,12 +609,12 @@ async fn test_pending_txns(pool: PgPool) -> anyhow::Result<()> { // First transaction is confirmed { let txn = solana_network - .make_burn_transaction(&payer, CONFIRMED_BURN_AMOUNT) + .make_burn_transaction(&escrow_key, CONFIRMED_BURN_AMOUNT) .await .unwrap(); pool.do_add_pending_transaction( - &payer, + &escrow_key, CONFIRMED_BURN_AMOUNT, txn.get_signature(), Utc::now() - chrono::Duration::minutes(2), @@ -630,11 +629,11 @@ async fn test_pending_txns(pool: PgPool) -> anyhow::Result<()> { // Make submission time in past to bypass confirm txn sleep { let txn = solana_network - .make_burn_transaction(&payer, UNCONFIRMED_BURN_AMOUNT) + .make_burn_transaction(&escrow_key, UNCONFIRMED_BURN_AMOUNT) .await .unwrap(); pool.do_add_pending_transaction( - &payer, + &escrow_key, UNCONFIRMED_BURN_AMOUNT, txn.get_signature(), Utc::now() - chrono::Duration::minutes(3), @@ -661,8 +660,8 @@ async fn test_pending_txns(pool: PgPool) -> anyhow::Result<()> { #[sqlx::test] async fn test_burn_with_pending_txn_triggers_confirmation(pool: PgPool) -> anyhow::Result<()> { - let org_one = PublicKeyBinary::from(vec![0]); - let org_two = PublicKeyBinary::from(vec![1]); + let org_one = format!("OUI_{}", 0); + let org_two = format!("OUI_{}", 1); let solana_network = TestSolanaClientMap::default(); solana_network.insert(&org_one, 50_000).await; @@ -731,20 +730,17 @@ async fn test_burn_with_pending_txn_triggers_confirmation(pool: PgPool) -> anyho Ok(()) } -async fn assert_pending_burns( - pool: &PgPool, - expected: &[(&PublicKeyBinary, u64)], -) -> anyhow::Result<()> { +async fn assert_pending_burns(pool: &PgPool, expected: &[(&String, u64)]) -> anyhow::Result<()> { let burns = sqlx::query_as("SELECT * from pending_burns") .fetch_all(pool) .await? .into_iter() - .map(|burn: Burn| (burn.payer, burn.amount)) + .map(|burn: Burn| (burn.escrow_key, burn.amount)) .collect::>(); for (key, expected_amount) in expected { let amount = burns - .get(key) + .get(key.as_str()) .unwrap_or_else(|| panic!("{key:?} does not exist in pending burns")); assert_eq!(amount, expected_amount); } diff --git a/mobile_packet_verifier/src/burner.rs b/mobile_packet_verifier/src/burner.rs index a325a2a37..203a9eca6 100644 --- a/mobile_packet_verifier/src/burner.rs +++ b/mobile_packet_verifier/src/burner.rs @@ -92,8 +92,7 @@ where let total_dcs = payer_pending_burn.total_dcs; let payer = payer_pending_burn.payer; let sessions = payer_pending_burn.sessions; - - let payer_balance = self.solana.payer_balance(&payer).await?; + let payer_balance = self.solana.escrow_balance(&payer.to_string()).await?; if payer_balance < total_dcs { tracing::warn!( @@ -106,7 +105,10 @@ where } tracing::info!(%total_dcs, %payer, "Burning DC"); - let txn = self.solana.make_burn_transaction(&payer, total_dcs).await?; + let txn = self + .solana + .make_burn_transaction(&payer.to_string(), total_dcs) + .await?; pending_txns::add_pending_txn(pool, &payer, total_dcs, txn.get_signature()) .await .context("adding pending txns and moving sessions")?; diff --git a/mobile_packet_verifier/tests/integrations/burner.rs b/mobile_packet_verifier/tests/integrations/burner.rs index a31971d84..267457477 100644 --- a/mobile_packet_verifier/tests/integrations/burner.rs +++ b/mobile_packet_verifier/tests/integrations/burner.rs @@ -19,10 +19,10 @@ fn burn_checks_for_sufficient_balance(pool: PgPool) -> anyhow::Result<()> { // Initialize payers with balances let solana_network = TestSolanaClientMap::default(); solana_network - .insert(&payer_insufficent, ORIGINAL_BALANCE) + .insert(&payer_insufficent.to_string(), ORIGINAL_BALANCE) .await; solana_network - .insert(&payer_sufficient, ORIGINAL_BALANCE) + .insert(&payer_sufficient.to_string(), ORIGINAL_BALANCE) .await; // Add Data Transfer Session for both payers @@ -65,12 +65,17 @@ fn burn_checks_for_sufficient_balance(pool: PgPool) -> anyhow::Result<()> { // Ensure balance for payers through solana mock assert_eq!( - solana_network.get_payer_balance(&payer_insufficent).await, + solana_network + .get_escrow_balance(&payer_insufficent.to_string()) + .await, ORIGINAL_BALANCE, "original balance" ); assert!( - solana_network.get_payer_balance(&payer_sufficient).await < ORIGINAL_BALANCE, + solana_network + .get_escrow_balance(&payer_sufficient.to_string()) + .await + < ORIGINAL_BALANCE, "reduced balance" ); @@ -87,7 +92,7 @@ async fn test_confirm_pending_txns(pool: PgPool) -> anyhow::Result<()> { let payer_two = PublicKeyBinary::from(vec![2]); let solana_network = TestSolanaClientMap::default(); - solana_network.insert(&payer_one, 10_000).await; + solana_network.insert(&payer_one.to_string(), 10_000).await; save_data_transfer_sessions( &pool, @@ -345,7 +350,7 @@ fn will_not_burn_when_pending_txns(pool: PgPool) -> anyhow::Result<()> { // Burn does nothing because of pending transactions let solana_network = TestSolanaClientMap::default(); - solana_network.insert(&payer, 10_000).await; + solana_network.insert(&payer.to_string(), 10_000).await; let (valid_sessions_tx, mut valid_sessions_rx) = tokio::sync::mpsc::channel(10); let valid_sessions = FileSinkClient::new(valid_sessions_tx, "test"); diff --git a/solana/src/burn.rs b/solana/src/burn.rs index 0d4b993bd..ba424fe30 100644 --- a/solana/src/burn.rs +++ b/solana/src/burn.rs @@ -7,7 +7,6 @@ use helium_anchor_gen::{ data_credits::{self, accounts, instruction}, helium_sub_daos::{self, DaoV0, SubDaoV0}, }; -use helium_crypto::PublicKeyBinary; use itertools::Itertools; use serde::Deserialize; use sha2::{Digest, Sha256}; @@ -23,10 +22,7 @@ use solana_sdk::{ signer::Signer, transaction::Transaction, }; -use std::{ - collections::{HashMap, HashSet}, - str::FromStr, -}; +use std::collections::{HashMap, HashSet}; use std::{sync::Arc, time::SystemTime}; use tokio::sync::Mutex; @@ -34,11 +30,12 @@ use tokio::sync::Mutex; pub trait SolanaNetwork: Clone + Send + Sync + 'static { type Transaction: GetSignature + Send + Sync + 'static; - async fn payer_balance(&self, payer: &PublicKeyBinary) -> Result; + async fn escrow_balance(&self, escrow_key: &String) -> Result; + #[allow(clippy::ptr_arg)] async fn make_burn_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result; @@ -58,7 +55,7 @@ pub struct Settings { dc_mint: String, dnt_mint: String, #[serde(default)] - payers_to_monitor: Vec, + escrow_keys_to_monitor: Vec, #[serde(default = "min_priority_fee")] min_priority_fee: u64, } @@ -68,12 +65,8 @@ fn min_priority_fee() -> u64 { } impl Settings { - pub fn payers_to_monitor(&self) -> Result, SolanaRpcError> { - self.payers_to_monitor - .iter() - .map(|payer| PublicKeyBinary::from_str(payer)) - .collect::>() - .map_err(SolanaRpcError::from) + pub fn escrow_keys_to_monitor(&self) -> Vec { + self.escrow_keys_to_monitor.clone() } } @@ -83,7 +76,7 @@ pub struct SolanaRpc { program_cache: BurnProgramCache, cluster: String, keypair: [u8; 64], - payers_to_monitor: Vec, + escrow_keys_to_monitor: Vec, priority_fee: PriorityFee, min_priority_fee: u64, } @@ -108,7 +101,7 @@ impl SolanaRpc { provider: Arc::new(provider), program_cache, keypair: keypair.to_bytes(), - payers_to_monitor: settings.payers_to_monitor()?, + escrow_keys_to_monitor: settings.escrow_keys_to_monitor()?, priority_fee: PriorityFee::default(), min_priority_fee: settings.min_priority_fee, })) @@ -119,8 +112,8 @@ impl SolanaRpc { impl SolanaNetwork for SolanaRpc { type Transaction = Transaction; - async fn payer_balance(&self, payer: &PublicKeyBinary) -> Result { - let ddc_key = delegated_data_credits(&self.program_cache.sub_dao, payer); + async fn escrow_balance(&self, escrow_key: &String) -> Result { + let ddc_key = delegated_data_credits(&self.program_cache.sub_dao, escrow_key); let (escrow_account, _) = Pubkey::find_program_address( &["escrow_dc_account".as_bytes(), &ddc_key.to_bytes()], &data_credits::ID, @@ -131,7 +124,7 @@ impl SolanaNetwork for SolanaRpc { .await? { Response { value: None, .. } => { - tracing::info!(%payer, "Account not found, therefore no balance"); + tracing::info!(%escrow_key, "Account not found, therefore no balance"); return Ok(0); } Response { @@ -141,10 +134,10 @@ impl SolanaNetwork for SolanaRpc { }; let account_layout = spl_token::state::Account::unpack(account_data.as_slice())?; - if self.payers_to_monitor.contains(payer) { + if self.escrow_keys_to_monitor.contains(escrow_key) { metrics::gauge!( "balance", - "payer" => payer.to_string() + "escrow_key" => escrow_key.to_string() ) .set(account_layout.amount as f64); } @@ -154,7 +147,7 @@ impl SolanaNetwork for SolanaRpc { async fn make_burn_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result { // Fetch the sub dao epoch info: @@ -173,7 +166,7 @@ impl SolanaNetwork for SolanaRpc { ); // Fetch escrow account - let ddc_key = delegated_data_credits(&self.program_cache.sub_dao, payer); + let ddc_key = delegated_data_credits(&self.program_cache.sub_dao, escrow_key); let (escrow_account, _) = Pubkey::find_program_address( &["escrow_dc_account".as_bytes(), &ddc_key.to_bytes()], &data_credits::ID, @@ -185,7 +178,7 @@ impl SolanaNetwork for SolanaRpc { sub_dao: self.program_cache.sub_dao, account_payer: self.program_cache.account_payer, data_credits: self.program_cache.data_credits, - delegated_data_credits: delegated_data_credits(&self.program_cache.sub_dao, payer), + delegated_data_credits: delegated_data_credits(&self.program_cache.sub_dao, escrow_key), token_program: spl_token::id(), helium_sub_daos_program: helium_sub_daos::id(), system_program: solana_program::system_program::id(), @@ -432,9 +425,9 @@ impl GetSignature for PossibleTransaction { impl SolanaNetwork for Option> { type Transaction = PossibleTransaction; - async fn payer_balance(&self, payer: &PublicKeyBinary) -> Result { + async fn escrow_balance(&self, escrow_key: &String) -> Result { if let Some(ref rpc) = self { - rpc.payer_balance(payer).await + rpc.escrow_balance(escrow_key).await } else { Ok(u64::MAX) } @@ -442,12 +435,12 @@ impl SolanaNetwork for Option> { async fn make_burn_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result { if let Some(ref rpc) = self { Ok(PossibleTransaction::Transaction( - rpc.make_burn_transaction(payer, amount).await?, + rpc.make_burn_transaction(escrow_key, amount).await?, )) } else { Ok(PossibleTransaction::NoTransaction(Signature::new_unique())) @@ -479,7 +472,7 @@ impl SolanaNetwork for Option> { pub struct MockTransaction { pub signature: Signature, - pub payer: PublicKeyBinary, + pub escrow_key: String, pub amount: u64, } @@ -491,7 +484,7 @@ impl GetSignature for MockTransaction { #[derive(Clone)] pub struct TestSolanaClientMap { - payer_balances: Arc>>, + escrow_balances: Arc>>, confirm_all_txns: Arc>, confirmed_txns: Arc>>, } @@ -499,7 +492,7 @@ pub struct TestSolanaClientMap { impl Default for TestSolanaClientMap { fn default() -> Self { Self { - payer_balances: Default::default(), + escrow_balances: Default::default(), confirm_all_txns: Arc::new(Mutex::new(true)), confirmed_txns: Default::default(), } @@ -507,18 +500,18 @@ impl Default for TestSolanaClientMap { } impl TestSolanaClientMap { - pub async fn insert(&self, payer: &PublicKeyBinary, amount: u64) { - self.payer_balances + pub async fn insert(&self, escrow_key: &String, amount: u64) { + self.escrow_balances .lock() .await - .insert(payer.clone(), amount); + .insert(escrow_key.clone(), amount); } - pub async fn get_payer_balance(&self, payer: &PublicKeyBinary) -> u64 { - self.payer_balances + pub async fn get_escrow_balance(&self, escrow_key: &String) -> u64 { + self.escrow_balances .lock() .await - .get(payer) + .get(escrow_key) .cloned() .unwrap_or_default() } @@ -533,28 +526,28 @@ impl TestSolanaClientMap { impl SolanaNetwork for TestSolanaClientMap { type Transaction = MockTransaction; - async fn payer_balance(&self, payer: &PublicKeyBinary) -> Result { - Ok(*self.payer_balances.lock().await.get(payer).unwrap()) + async fn escrow_balance(&self, escrow_key: &String) -> Result { + Ok(*self.escrow_balances.lock().await.get(escrow_key).unwrap()) } async fn make_burn_transaction( &self, - payer: &PublicKeyBinary, + escrow_key: &String, amount: u64, ) -> Result { Ok(MockTransaction { signature: Signature::new_unique(), - payer: payer.clone(), + escrow_key: escrow_key.clone(), amount, }) } async fn submit_transaction(&self, txn: &MockTransaction) -> Result<(), SolanaRpcError> { *self - .payer_balances + .escrow_balances .lock() .await - .get_mut(&txn.payer) + .get_mut(&txn.escrow_key) .unwrap() -= txn.amount; Ok(()) } @@ -568,10 +561,10 @@ impl SolanaNetwork for TestSolanaClientMap { } } -/// Returns the PDA for the Delegated Data Credits of the given `payer`. -pub fn delegated_data_credits(sub_dao: &Pubkey, payer: &PublicKeyBinary) -> Pubkey { +/// Returns the PDA for the Delegated Data Credits of the given `escrow_key`. +pub fn delegated_data_credits(sub_dao: &Pubkey, escrow_key: &String) -> Pubkey { let mut hasher = Sha256::new(); - hasher.update(payer.to_string()); + hasher.update(escrow_key); let sha_digest = hasher.finalize(); let (ddc_key, _) = Pubkey::find_program_address( &[ diff --git a/solana/src/lib.rs b/solana/src/lib.rs index 35964bb99..73d157ac1 100644 --- a/solana/src/lib.rs +++ b/solana/src/lib.rs @@ -1,5 +1,6 @@ use solana_client::client_error::ClientError; use solana_sdk::pubkey::ParsePubkeyError; +use solana_sdk::pubkey::Pubkey; use solana_sdk::transaction::Transaction; use std::time::SystemTimeError; @@ -82,3 +83,9 @@ impl GetSignature for Signature { self } } + +pub fn solana_pubkey_to_helium_binary(pubkey_str: &str) -> Result { + let pubkey = Pubkey::from_str(pubkey_str).map_err(|e| SqlxError::Decode(Box::new(e)))?; + let helium_pubkey = to_helium_pubkey(&pubkey).map_err(|e| SqlxError::Decode(Box::new(e)))?; + Ok(PublicKeyBinary::from(helium_pubkey)) +}