diff --git a/Cargo.lock b/Cargo.lock index 7af64d47795..55f4da974a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,7 +166,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -408,7 +408,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -476,7 +476,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -493,7 +493,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -561,7 +561,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -726,7 +726,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.98", + "syn 2.0.101", "which", ] @@ -747,7 +747,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -794,7 +794,7 @@ checksum = "adc0846593a56638b74e136a45610f9934c052e14761bebca6b092d5522599e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -958,7 +958,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "progenitor 0.9.1", + "progenitor 0.10.0", "regress", "reqwest", "schemars", @@ -1095,7 +1095,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "thiserror 1.0.69", @@ -1109,7 +1109,7 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "thiserror 2.0.12", @@ -1379,7 +1379,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -1435,7 +1435,7 @@ dependencies = [ "clickhouse-admin-types", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -1450,7 +1450,7 @@ dependencies = [ "clickhouse-admin-types", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -1465,7 +1465,7 @@ dependencies = [ "clickhouse-admin-types", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -1581,7 +1581,7 @@ dependencies = [ "chrono", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -1952,13 +1952,13 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=da3cf198a0e000bb89efc3a1c77d7ba09340a600#da3cf198a0e000bb89efc3a1c77d7ba09340a600" +source = "git+https://github.com/oxidecomputer/crucible?rev=e164393a88e7b62598897dc4f53315f083e25333#e164393a88e7b62598897dc4f53315f083e25333" dependencies = [ "anyhow", "chrono", "crucible-workspace-hack", "percent-encoding", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -1981,7 +1981,7 @@ dependencies = [ [[package]] name = "crucible-common" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=da3cf198a0e000bb89efc3a1c77d7ba09340a600#da3cf198a0e000bb89efc3a1c77d7ba09340a600" +source = "git+https://github.com/oxidecomputer/crucible?rev=e164393a88e7b62598897dc4f53315f083e25333#e164393a88e7b62598897dc4f53315f083e25333" dependencies = [ "anyhow", "atty", @@ -2010,13 +2010,13 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=da3cf198a0e000bb89efc3a1c77d7ba09340a600#da3cf198a0e000bb89efc3a1c77d7ba09340a600" +source = "git+https://github.com/oxidecomputer/crucible?rev=e164393a88e7b62598897dc4f53315f083e25333#e164393a88e7b62598897dc4f53315f083e25333" dependencies = [ "anyhow", "chrono", "crucible-workspace-hack", "percent-encoding", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -2027,7 +2027,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=da3cf198a0e000bb89efc3a1c77d7ba09340a600#da3cf198a0e000bb89efc3a1c77d7ba09340a600" +source = "git+https://github.com/oxidecomputer/crucible?rev=e164393a88e7b62598897dc4f53315f083e25333#e164393a88e7b62598897dc4f53315f083e25333" dependencies = [ "crucible-workspace-hack", "libc", @@ -2136,7 +2136,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2160,7 +2160,7 @@ checksum = "1c3ea205baff86c8c0fc59f725aa55acfe159aaad7b1a99d84bcd203e4103245" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2184,7 +2184,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2195,7 +2195,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2238,7 +2238,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2282,7 +2282,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2315,7 +2315,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2336,7 +2336,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2347,7 +2347,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2368,7 +2368,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2378,7 +2378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2391,7 +2391,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2488,7 +2488,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2497,7 +2497,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2559,7 +2559,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2600,7 +2600,10 @@ dependencies = [ "openapi-lint", "openapiv3", "pretty-hex 0.4.1", + "progenitor 0.10.0", + "reqwest", "schemars", + "semver 1.0.26", "serde", "serde_json", "sled", @@ -2626,7 +2629,7 @@ dependencies = [ "omicron-workspace-hack", "openapi-manager-types", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", ] @@ -2639,7 +2642,7 @@ dependencies = [ "http", "internal-dns-types", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -2720,7 +2723,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", - "indexmap 2.7.1", + "indexmap 2.9.0", "multer", "openapiv3", "paste", @@ -2768,7 +2771,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", - "indexmap 2.7.1", + "indexmap 2.9.0", "multer", "openapiv3", "paste", @@ -2777,7 +2780,7 @@ dependencies = [ "rustls-pemfile 2.2.0", "schemars", "scopeguard", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_path_to_error", @@ -2809,7 +2812,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2821,10 +2824,10 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -2838,7 +2841,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -3026,7 +3029,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -3074,7 +3077,7 @@ dependencies = [ "omicron-workspace-hack", "openapi-manager-types", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "thiserror 1.0.69", @@ -3091,7 +3094,7 @@ dependencies = [ "http", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -3323,7 +3326,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -3448,7 +3451,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -3543,7 +3546,7 @@ dependencies = [ "daft", "gateway-messages", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "rand 0.8.5", "reqwest", "schemars", @@ -3792,13 +3795,13 @@ dependencies = [ "debug-ignore", "fixedbitset 0.5.7", "guppy-workspace-hack", - "indexmap 2.7.1", + "indexmap 2.9.0", "itertools 0.14.0", "nested", "once_cell", "pathdiff", "petgraph 0.7.1", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "smallvec 1.14.0", @@ -3824,7 +3827,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -4130,9 +4133,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -4540,7 +4543,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -4672,9 +4675,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.1", @@ -4730,7 +4733,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -4760,7 +4763,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -4837,7 +4840,7 @@ dependencies = [ "installinator-common", "omicron-common", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "regress", "reqwest", "schemars", @@ -4911,10 +4914,10 @@ dependencies = [ "omicron-test-utils", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "qorb", "reqwest", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "sled", @@ -5096,7 +5099,7 @@ checksum = "43ce13c40ec6956157a3635d97a1ee2df323b263f09ea14165131289cb0f5c19" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -5163,7 +5166,7 @@ version = "0.1.0" source = "git+https://github.com/oxidecomputer/opte?rev=26d949762112189b7196d2b8fdf0eeb6d9d5a28f#26d949762112189b7196d2b8fdf0eeb6d9d5a28f" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -5476,7 +5479,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -5515,9 +5518,9 @@ checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e" [[package]] name = "log" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" dependencies = [ "value-bag", ] @@ -5908,7 +5911,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "progenitor 0.9.1", + "progenitor 0.10.0", "regress", "reqwest", "schemars", @@ -6027,7 +6030,7 @@ dependencies = [ "rand 0.8.5", "ref-cast", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "sled-agent-client", @@ -6106,7 +6109,7 @@ dependencies = [ "regex", "rustls 0.22.4", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_with", @@ -6219,7 +6222,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -6407,7 +6410,7 @@ dependencies = [ "gateway-client", "id-map", "illumos-utils", - "indexmap 2.7.1", + "indexmap 2.9.0", "internal-dns-resolver", "ipnet", "itertools 0.14.0", @@ -6484,7 +6487,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "indexmap 2.7.1", + "indexmap 2.9.0", "nexus-inventory", "nexus-reconfigurator-planning", "nexus-types", @@ -6621,7 +6624,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -6661,7 +6664,7 @@ dependencies = [ "parse-display", "proptest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_with", @@ -6829,7 +6832,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -7083,14 +7086,14 @@ dependencies = [ "omicron-workspace-hack", "oxnet", "parse-display", - "progenitor-client 0.9.1", + "progenitor-client 0.10.0", "proptest", "protocol", "rand 0.8.5", "regress", "reqwest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_human_bytes", "serde_json", @@ -7116,7 +7119,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "oxnet", - "progenitor-client 0.9.1", + "progenitor-client 0.10.0", "reqwest", "serde", "sled-hardware-types", @@ -7372,7 +7375,7 @@ dependencies = [ "petgraph 0.7.1", "pq-sys", "pretty_assertions", - "progenitor-client 0.9.1", + "progenitor-client 0.10.0", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=e5c85d84b0a51803caffb335a1063612edb02f6d)", "qorb", "rand 0.8.5", @@ -7386,7 +7389,7 @@ dependencies = [ "rustls-pemfile 2.2.0", "samael", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_urlencoded", @@ -7515,7 +7518,7 @@ dependencies = [ "rayon", "reqwest", "ring", - "semver 1.0.25", + "semver 1.0.26", "serde", "shell-words", "sled-hardware", @@ -7609,7 +7612,7 @@ dependencies = [ "omicron-workspace-hack", "omicron-zone-package", "reqwest", - "semver 1.0.25", + "semver 1.0.26", "serde", "sha2", "shell-words", @@ -7894,7 +7897,7 @@ dependencies = [ "hyper-rustls 0.27.3", "hyper-util", "idna", - "indexmap 2.7.1", + "indexmap 2.9.0", "indicatif", "inout", "ipnetwork", @@ -7939,7 +7942,7 @@ dependencies = [ "rustls-webpki 0.102.8", "schemars", "scopeguard", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "sha1", @@ -7952,7 +7955,7 @@ dependencies = [ "strum", "subtle", "syn 1.0.109", - "syn 2.0.98", + "syn 2.0.101", "time", "time-macros", "tokio", @@ -7995,7 +7998,7 @@ dependencies = [ "futures-util", "hex", "reqwest", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_derive", "serde_json", @@ -8033,7 +8036,7 @@ version = "0.4.0" source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#ef442ee4343e97b6d9c217d3e7533962fe7d7236" dependencies = [ "heck 0.4.1", - "indexmap 2.7.1", + "indexmap 2.9.0", "lazy_static", "openapiv3", "regex", @@ -8071,7 +8074,7 @@ dependencies = [ "owo-colors", "oximeter-api", "repo-depot-api", - "semver 1.0.25", + "semver 1.0.26", "serde_json", "sha2", "similar", @@ -8091,7 +8094,7 @@ dependencies = [ "camino", "omicron-workspace-hack", "paste", - "semver 1.0.25", + "semver 1.0.26", ] [[package]] @@ -8100,7 +8103,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.9.0", "serde", "serde_json", ] @@ -8128,7 +8131,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -8238,8 +8241,8 @@ dependencies = [ "http", "hyper", "omicron-workspace-hack", - "progenitor 0.9.1", - "progenitor-client 0.9.1", + "progenitor 0.10.0", + "progenitor-client 0.10.0", "rand 0.8.5", "regress", "reqwest", @@ -8279,7 +8282,7 @@ dependencies = [ "oximeter-timeseries-macro 0.1.0", "oximeter-types 0.1.0", "prettyplease", - "syn 2.0.98", + "syn 2.0.101", "toml 0.8.20", "uuid", ] @@ -8298,7 +8301,7 @@ dependencies = [ "oximeter-timeseries-macro 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "oximeter-types 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "prettyplease", - "syn 2.0.98", + "syn 2.0.101", "toml 0.8.20", "uuid", ] @@ -8324,7 +8327,7 @@ dependencies = [ "futures", "omicron-common", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "serde", "slog", @@ -8361,7 +8364,7 @@ dependencies = [ "rand 0.8.5", "reqwest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "slog", @@ -8404,7 +8407,7 @@ dependencies = [ "gethostname", "highway", "iana-time-zone", - "indexmap 2.7.1", + "indexmap 2.9.0", "itertools 0.14.0", "libc", "nom", @@ -8476,7 +8479,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -8487,7 +8490,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -8532,7 +8535,7 @@ dependencies = [ "schemars", "serde", "slog-error-chain", - "syn 2.0.98", + "syn 2.0.101", "toml 0.8.20", ] @@ -8553,7 +8556,7 @@ dependencies = [ "schemars", "serde", "slog-error-chain", - "syn 2.0.98", + "syn 2.0.101", "toml 0.8.20", ] @@ -8582,7 +8585,7 @@ dependencies = [ "oximeter-types 0.1.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -8595,7 +8598,7 @@ dependencies = [ "oximeter-types 0.1.0 (git+https://github.com/oxidecomputer/omicron?branch=main)", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -8843,7 +8846,7 @@ dependencies = [ "regex", "regex-syntax 0.8.5", "structmeta", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -9030,7 +9033,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -9051,7 +9054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset 0.4.2", - "indexmap 2.7.1", + "indexmap 2.9.0", "serde", "serde_derive", ] @@ -9063,7 +9066,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset 0.5.7", - "indexmap 2.7.1", + "indexmap 2.9.0", ] [[package]] @@ -9134,7 +9137,7 @@ checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -9445,7 +9448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" dependencies = [ "proc-macro2", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -9519,14 +9522,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -9553,6 +9556,17 @@ dependencies = [ "progenitor-macro 0.9.1", ] +[[package]] +name = "progenitor" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced2eadb9776a201d0585b4b072fd44d7d2104e0f3452d967b5a78966f4855cf" +dependencies = [ + "progenitor-client 0.10.0", + "progenitor-impl 0.10.0", + "progenitor-macro 0.10.0", +] + [[package]] name = "progenitor-client" version = "0.8.0" @@ -9583,6 +9597,21 @@ dependencies = [ "serde_urlencoded", ] +[[package]] +name = "progenitor-client" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "296003fd74e64c77aeb2c10eae850eb543211a8557dd3b3de6f4230b5071e44b" +dependencies = [ + "bytes", + "futures-core", + "percent-encoding", + "reqwest", + "serde", + "serde_json", + "serde_urlencoded", +] + [[package]] name = "progenitor-impl" version = "0.8.0" @@ -9591,7 +9620,7 @@ checksum = "d85934a440963a69f9f04f48507ff6e7aa2952a5b2d8f96cc37fa3dd5c270f66" dependencies = [ "heck 0.5.0", "http", - "indexmap 2.7.1", + "indexmap 2.9.0", "openapiv3", "proc-macro2", "quote", @@ -9599,7 +9628,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.98", + "syn 2.0.101", "thiserror 1.0.69", "typify 0.2.0", "unicode-ident", @@ -9613,7 +9642,7 @@ checksum = "37adc80a94c9cae890e82deeeecc9d8f2a5cb153256caaf1bf0f03611e537214" dependencies = [ "heck 0.5.0", "http", - "indexmap 2.7.1", + "indexmap 2.9.0", "openapiv3", "proc-macro2", "quote", @@ -9621,12 +9650,34 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.98", + "syn 2.0.101", "thiserror 2.0.12", "typify 0.3.0", "unicode-ident", ] +[[package]] +name = "progenitor-impl" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b17e5363daa50bf1cccfade6b0fb970d2278758fd5cfa9ab69f25028e4b1afa3" +dependencies = [ + "heck 0.5.0", + "http", + "indexmap 2.9.0", + "openapiv3", + "proc-macro2", + "quote", + "regex", + "schemars", + "serde", + "serde_json", + "syn 2.0.101", + "thiserror 2.0.12", + "typify 0.4.1", + "unicode-ident", +] + [[package]] name = "progenitor-macro" version = "0.8.0" @@ -9642,7 +9693,7 @@ dependencies = [ "serde_json", "serde_tokenstream", "serde_yaml", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -9660,7 +9711,25 @@ dependencies = [ "serde_json", "serde_tokenstream", "serde_yaml", - "syn 2.0.98", + "syn 2.0.101", +] + +[[package]] +name = "progenitor-macro" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4972aec926d1e06d6abc11ab3f063d2f7063be3dd46fd2839442c14d8e48f3ed" +dependencies = [ + "openapiv3", + "proc-macro2", + "progenitor-impl 0.10.0", + "quote", + "schemars", + "serde", + "serde_json", + "serde_tokenstream", + "serde_yaml", + "syn 2.0.101", ] [[package]] @@ -9907,9 +9976,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] @@ -10131,7 +10200,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "pq-sys", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "slog", @@ -10235,7 +10304,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -10305,7 +10374,7 @@ name = "repo-depot-client" version = "0.1.0" dependencies = [ "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "reqwest", "schemars", "serde", @@ -10482,7 +10551,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.98", + "syn 2.0.101", "unicode-ident", ] @@ -10644,7 +10713,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.25", + "semver 1.0.26", ] [[package]] @@ -10922,7 +10991,7 @@ dependencies = [ "chrono", "dyn-clone", "schemars_derive", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "uuid", @@ -10937,7 +11006,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -10963,7 +11032,7 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11041,9 +11110,9 @@ checksum = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" dependencies = [ "serde", ] @@ -11085,7 +11154,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11096,7 +11165,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11157,7 +11226,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11178,7 +11247,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11203,7 +11272,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.1", + "indexmap 2.9.0", "serde", "serde_derive", "serde_json", @@ -11220,7 +11289,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11229,7 +11298,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.9.0", "itoa", "ryu", "serde", @@ -11430,7 +11499,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "progenitor 0.9.1", + "progenitor 0.10.0", "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=e5c85d84b0a51803caffb335a1063612edb02f6d)", "regress", "reqwest", @@ -11640,7 +11709,7 @@ source = "git+https://github.com/oxidecomputer/slog-error-chain?branch=main#15f6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11778,7 +11847,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -11891,7 +11960,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12033,7 +12102,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12044,7 +12113,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12079,7 +12148,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12092,7 +12161,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12139,9 +12208,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" dependencies = [ "proc-macro2", "quote", @@ -12171,7 +12240,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12239,7 +12308,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12380,7 +12449,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12421,7 +12490,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12432,7 +12501,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12570,7 +12639,7 @@ checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12640,7 +12709,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -12800,7 +12869,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", @@ -12813,7 +12882,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", @@ -12908,7 +12977,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -13082,7 +13151,7 @@ dependencies = [ "clap", "console", "humantime", - "semver 1.0.25", + "semver 1.0.26", "slog", "slog-async", "slog-envlogger", @@ -13101,7 +13170,7 @@ dependencies = [ "hex", "proptest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_human_bytes", "strum", @@ -13114,7 +13183,7 @@ name = "tufaceous-brand-metadata" version = "0.1.0" source = "git+https://github.com/oxidecomputer/tufaceous?branch=main#04681f26ba09e144e5467dd6bd22c4887692a670" dependencies = [ - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "tar", @@ -13146,7 +13215,7 @@ dependencies = [ "itertools 0.13.0", "parse-size", "rand 0.8.5", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_path_to_error", @@ -13263,6 +13332,16 @@ dependencies = [ "typify-macro 0.3.0", ] +[[package]] +name = "typify" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc5bec3cdff70fd542e579aa2e52967833e543a25fae0d14579043d2e868a50" +dependencies = [ + "typify-impl 0.4.1", + "typify-macro 0.4.1", +] + [[package]] name = "typify-impl" version = "0.2.0" @@ -13275,10 +13354,10 @@ dependencies = [ "quote", "regress", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", - "syn 2.0.98", + "syn 2.0.101", "thiserror 1.0.69", "unicode-ident", ] @@ -13295,10 +13374,30 @@ dependencies = [ "quote", "regress", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", - "syn 2.0.98", + "syn 2.0.101", + "thiserror 2.0.12", + "unicode-ident", +] + +[[package]] +name = "typify-impl" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b52a67305054e1da6f3d99ad94875dcd0c7c49adbd17b4b64f0eefb7ae5bf8ab" +dependencies = [ + "heck 0.5.0", + "log", + "proc-macro2", + "quote", + "regress", + "schemars", + "semver 1.0.26", + "serde", + "serde_json", + "syn 2.0.101", "thiserror 2.0.12", "unicode-ident", ] @@ -13312,11 +13411,11 @@ dependencies = [ "proc-macro2", "quote", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", "typify-impl 0.2.0", ] @@ -13329,14 +13428,31 @@ dependencies = [ "proc-macro2", "quote", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", "typify-impl 0.3.0", ] +[[package]] +name = "typify-macro" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ff5799be156e4f635c348c6051d165e1c59997827155133351a8c4d333d9841" +dependencies = [ + "proc-macro2", + "quote", + "schemars", + "semver 1.0.26", + "serde", + "serde_json", + "serde_tokenstream", + "syn 2.0.101", + "typify-impl 0.4.1", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -13366,9 +13482,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-linebreak" @@ -13482,7 +13598,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "rand 0.8.5", - "semver 1.0.25", + "semver 1.0.26", "sha2", "slog", "tar", @@ -13513,7 +13629,7 @@ dependencies = [ "either", "futures", "indent_write", - "indexmap 2.7.1", + "indexmap 2.9.0", "indicatif", "indoc 2.0.6", "libsw", @@ -13572,7 +13688,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", "usdt-impl", ] @@ -13590,7 +13706,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.98", + "syn 2.0.101", "thiserror 1.0.69", "thread-id", "version_check", @@ -13606,7 +13722,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream", - "syn 2.0.98", + "syn 2.0.101", "usdt-impl", ] @@ -13812,7 +13928,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", "wasm-bindgen-shared", ] @@ -13846,7 +13962,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13937,7 +14053,7 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.7.1", + "indexmap 2.9.0", "indicatif", "itertools 0.14.0", "maplit", @@ -13949,7 +14065,7 @@ dependencies = [ "ratatui", "reqwest", "rpassword", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "shell-words", @@ -14077,7 +14193,7 @@ dependencies = [ "rand 0.8.5", "reqwest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "sha2", @@ -14117,7 +14233,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "sled-hardware-types", "slog", @@ -14134,11 +14250,11 @@ dependencies = [ "omicron-common", "omicron-uuid-kinds", "omicron-workspace-hack", - "progenitor 0.9.1", + "progenitor 0.10.0", "regress", "reqwest", "schemars", - "semver 1.0.25", + "semver 1.0.26", "serde", "serde_json", "sled-agent-types", @@ -14226,7 +14342,7 @@ checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14237,7 +14353,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14632,7 +14748,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", "synstructure", ] @@ -14673,7 +14789,7 @@ checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14684,7 +14800,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14695,7 +14811,7 @@ checksum = "593e7c96176495043fcb9e87cf7659f4d18679b5bab6b92bdef359c76a7795dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14715,7 +14831,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", "synstructure", ] @@ -14736,7 +14852,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14758,7 +14874,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.101", ] [[package]] @@ -14785,7 +14901,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "flate2", - "indexmap 2.7.1", + "indexmap 2.9.0", "memchr", "zopfli", ] diff --git a/Cargo.toml b/Cargo.toml index a9262efafc4..718b551e360 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -387,10 +387,10 @@ crossterm = { version = "0.28.1", features = ["event-stream"] } # NOTE: if you change the pinned revision of the `crucible` dependencies, you # must also update the references in package-manifest.toml to match the new # revision. -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "da3cf198a0e000bb89efc3a1c77d7ba09340a600" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "da3cf198a0e000bb89efc3a1c77d7ba09340a600" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "da3cf198a0e000bb89efc3a1c77d7ba09340a600" } -crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "da3cf198a0e000bb89efc3a1c77d7ba09340a600" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "e164393a88e7b62598897dc4f53315f083e25333" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "e164393a88e7b62598897dc4f53315f083e25333" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "e164393a88e7b62598897dc4f53315f083e25333" } +crucible-common = { git = "https://github.com/oxidecomputer/crucible", rev = "e164393a88e7b62598897dc4f53315f083e25333" } # NOTE: See above! csv = "1.3.1" curve25519-dalek = "4" @@ -598,8 +598,8 @@ pretty_assertions = "1.4.1" pretty-hex = "0.4.1" prettyplease = { version = "0.2.30", features = ["verbatim"] } proc-macro2 = "1.0" -progenitor = "0.9.1" -progenitor-client = "0.9.1" +progenitor = "0.10.0" +progenitor-client = "0.10.0" # NOTE: if you change the pinned revision of the `bhyve_api` and propolis # dependencies, you must also update the references in package-manifest.toml to # match the new revision. diff --git a/clients/dns-service-client/src/lib.rs b/clients/dns-service-client/src/lib.rs index 2b083accc2b..c901776de29 100644 --- a/clients/dns-service-client/src/lib.rs +++ b/clients/dns-service-client/src/lib.rs @@ -31,6 +31,8 @@ pub type DnsError = crate::Error; pub const ERROR_CODE_UPDATE_IN_PROGRESS: &'static str = "UpdateInProgress"; pub const ERROR_CODE_BAD_UPDATE_GENERATION: &'static str = "BadUpdateGeneration"; +pub const ERROR_CODE_UPDATE_DEFINES_SOA_RECORD: &'static str = + "UpdateDefinesSoaRecord"; /// Returns whether an error from this client should be retried pub fn is_retryable(error: &DnsError) -> bool { diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 7301ff9680d..0de35d236dc 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -748,6 +748,10 @@ impl Generation { ); Generation(next_gen) } + + pub const fn as_u64(self) -> u64 { + self.0 + } } impl<'de> Deserialize<'de> for Generation { diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 822fa5fdfa6..00c9fcbcc62 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -6629,7 +6629,10 @@ fn print_name( if records.len() == 1 { match &records[0] { DnsRecord::Srv(_) => (), - DnsRecord::Aaaa(_) | DnsRecord::A(_) => { + DnsRecord::Aaaa(_) + | DnsRecord::A(_) + | DnsRecord::Ns(_) + | DnsRecord::Soa(_) => { println!( "{} {:50} {}", prefix, @@ -6654,6 +6657,10 @@ fn format_record(record: &DnsRecord) -> impl Display { DnsRecord::Srv(Srv { port, target, .. }) => { format!("SRV port {:5} {}", port, target) } + DnsRecord::Ns(ns) => format!("NS {}", ns), + DnsRecord::Soa(soa) => { + format!("SOA {}, serial {}", soa.mname, soa.serial) + } } } diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 9da078737f8..f0c3ff0500a 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -28,8 +28,10 @@ DNS zone: oxide-dev.test (External) requested version: 2 (created at ) version created by Nexus: ..................... version created because: create silo: "test-suite-silo" -changes: names added: 1, names removed: 0 +changes: names added: 3, names removed: 0 ++ @ NS ns1.oxide-dev.test ++ ns1 AAAA ::1 + test-suite-silo.sys A 127.0.0.1 --------------------------------------------- stderr: @@ -42,6 +44,8 @@ termination: Exited(0) stdout: External zone: oxide-dev.test NAME RECORDS + @ NS ns1.oxide-dev.test + ns1 AAAA ::1 test-suite-silo.sys A 127.0.0.1 --------------------------------------------- stderr: diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-set-mgs-updates-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-set-mgs-updates-stdout index 24b28919cbc..923359cbe43 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-set-mgs-updates-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-set-mgs-updates-stdout @@ -665,10 +665,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 @@ -965,10 +975,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 @@ -1457,10 +1477,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 @@ -1758,10 +1788,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 @@ -2253,10 +2293,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 @@ -2742,10 +2792,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.2 A 192.0.2.3 A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-set-zone-images-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-set-zone-images-stdout index 202a36b5927..922c0d08dae 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-set-zone-images-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-set-zone-images-stdout @@ -379,10 +379,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.4 A 192.0.2.2 A 192.0.2.3 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.3 + name: ns3 (records: 1) + A 198.51.100.2 @@ -662,10 +672,20 @@ internal DNS: external DNS: DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example name: example-silo.sys (records: 3) A 192.0.2.4 A 192.0.2.2 A 192.0.2.3 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.3 + name: ns3 (records: 1) + A 198.51.100.2 diff --git a/dns-server-api/src/lib.rs b/dns-server-api/src/lib.rs index da55d545e0e..99892f08c7e 100644 --- a/dns-server-api/src/lib.rs +++ b/dns-server-api/src/lib.rs @@ -90,7 +90,6 @@ //! we'll need to stop queueing them. So why bother at all? use dropshot::{HttpError, HttpResponseOk, RequestContext}; -use internal_dns_types::config::{DnsConfig, DnsConfigParams}; use openapi_manager_types::{ SupportedVersion, SupportedVersions, api_versions, }; @@ -107,6 +106,7 @@ api_versions!([ // | example for the next person. // v // (next_int, IDENT), + (2, SOA_AND_NS), (1, INITIAL), ]); @@ -129,17 +129,52 @@ pub trait DnsServerApi { #[endpoint( method = GET, path = "/config", + operation_id = "dns_config_get", + versions = "1.0.0".."2.0.0" )] - async fn dns_config_get( + async fn dns_config_get_v1( rqctx: RequestContext, - ) -> Result, HttpError>; + ) -> Result< + HttpResponseOk, + HttpError, + >; + + #[endpoint( + method = GET, + path = "/config", + operation_id = "dns_config_get", + versions = "2.0.0".. + )] + async fn dns_config_get_v2( + rqctx: RequestContext, + ) -> Result< + HttpResponseOk, + HttpError, + >; + + #[endpoint( + method = PUT, + path = "/config", + operation_id = "dns_config_put", + versions = "1.0.0".."2.0.0", + )] + async fn dns_config_put_v1( + rqctx: RequestContext, + rq: dropshot::TypedBody< + internal_dns_types::v1::config::DnsConfigParams, + >, + ) -> Result; #[endpoint( method = PUT, path = "/config", + operation_id = "dns_config_put", + versions = "2.0.0".. )] - async fn dns_config_put( + async fn dns_config_put_v2( rqctx: RequestContext, - rq: dropshot::TypedBody, + rq: dropshot::TypedBody< + internal_dns_types::v2::config::DnsConfigParams, + >, ) -> Result; } diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index c0f25700f6a..4b2721e9a32 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -24,6 +24,7 @@ internal-dns-types.workspace = true omicron-common.workspace = true pretty-hex.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true sled.workspace = true @@ -44,5 +45,7 @@ expectorate.workspace = true omicron-test-utils.workspace = true openapiv3.workspace = true openapi-lint.workspace = true +progenitor.workspace = true +reqwest.workspace = true serde_json.workspace = true subprocess.workspace = true diff --git a/dns-server/src/bin/dnsadm.rs b/dns-server/src/bin/dnsadm.rs index b285cf1e38a..5aa30eaa9a7 100644 --- a/dns-server/src/bin/dnsadm.rs +++ b/dns-server/src/bin/dnsadm.rs @@ -153,6 +153,40 @@ async fn main() -> Result<()> { srv.weight ); } + DnsRecord::Ns(name) => { + println!(" NS: {:?}", name); + } + DnsRecord::Soa(soa) => { + println!(" SOA"); + println!( + " mname {}", + soa.mname + ); + println!( + " rname {}", + soa.rname + ); + println!( + " serial {}", + soa.serial + ); + println!( + " refresh {}", + soa.refresh + ); + println!( + " retry {}", + soa.retry + ); + println!( + " expire {}", + soa.expire + ); + println!( + " minimum {}", + soa.minimum + ); + } } } } diff --git a/dns-server/src/dns_server.rs b/dns-server/src/dns_server.rs index 9f8e848089c..e34d0096fea 100644 --- a/dns-server/src/dns_server.rs +++ b/dns-server/src/dns_server.rs @@ -26,6 +26,7 @@ use hickory_server::authority::MessageRequest; use hickory_server::authority::MessageResponse; use hickory_server::authority::MessageResponseBuilder; use internal_dns_types::config::DnsRecord; +use internal_dns_types::config::Soa; use internal_dns_types::config::Srv; use pretty_hex::*; use serde::Deserialize; @@ -254,6 +255,56 @@ fn dns_record_to_record( .set_data(Some(RData::SRV(SRV::new(prio, weight, port, tgt)))); Ok(srv) } + + DnsRecord::Ns(nsdname) => { + let nsdname = Name::from_str(&nsdname).map_err(|error| { + RequestError::ServFail(anyhow!( + "serialization failed due to bad NS dname {:?}: {:#}", + &nsdname, + error + )) + })?; + let mut ns = Record::new(); + use hickory_proto::rr::rdata::NS; + ns.set_name(name.clone()) + .set_rr_type(RecordType::NS) + .set_data(Some(RData::NS(NS(nsdname)))); + Ok(ns) + } + + DnsRecord::Soa(Soa { + mname, + rname, + serial, + refresh, + retry, + expire, + minimum, + }) => { + let mname = Name::from_str(&mname).map_err(|error| { + RequestError::ServFail(anyhow!( + "serialization failed due to bad SOA mname {:?}: {:#}", + &mname, + error + )) + })?; + let rname = Name::from_str(&rname).map_err(|error| { + RequestError::ServFail(anyhow!( + "serialization failed due to bad SOA rname {:?}: {:#}", + &rname, + error + )) + })?; + let mut record = Record::new(); + use hickory_proto::rr::rdata::SOA; + record + .set_name(name.clone()) + .set_rr_type(RecordType::SOA) + .set_data(Some(RData::SOA(SOA::new( + mname, rname, serial, refresh, retry, expire, minimum, + )))); + Ok(record) + } } } @@ -284,30 +335,38 @@ async fn handle_dns_message( (RecordType::A, DnsRecord::A(_)) => true, (RecordType::AAAA, DnsRecord::Aaaa(_)) => true, (RecordType::SRV, DnsRecord::Srv(_)) => true, + (RecordType::NS, DnsRecord::Ns(_)) => true, + (RecordType::SOA, DnsRecord::Soa(_)) => true, _ => false, } }) .map(|record| { let record = dns_record_to_record(&name, record)?; - // DNS allows for the server to return additional records - // that weren't explicitly asked for by the client but that - // the server expects the client will want. The records - // corresponding to a lookup on a SRV target is one such case. - // We opportunistically attempt to resolve the target here - // and if successful return those additional records in the - // response. - // NOTE: we only do this one-layer deep. - if let Some(RData::SRV(srv)) = record.data() { - let target_records = - store.query_name(srv.target()).map(|records| { - records - .into_iter() - .map(|record| { - dns_record_to_record(srv.target(), record) - }) - .collect::, _>>() - }); + // DNS allows for the server to return additional records that + // weren't explicitly asked for by the client but that the server + // expects the client will want. SRV and NS records both use names + // for their referents (rather than IP addresses dierctly). If + // someone has queried for one of those kinds of records, they'll + // almost certainly be needing the IP addresses that go with them as + // well. We opportunistically attempt to resovle the target here and + // if successful return those additional records in the response. + // + // NOTE: we only do this one-layer deep. If the target of a SRV or + // NS is a CNAME instead of A/AAAA directly, it will be lost here. + let additionals_target = match record.data() { + Some(RData::SRV(srv)) => Some(srv.target()), + Some(RData::NS(ns)) => Some(&ns.0), + _ => None, + }; + + if let Some(target) = additionals_target { + let target_records = store.query_name(target).map(|records| { + records + .into_iter() + .map(|record| dns_record_to_record(target, record)) + .collect::, _>>() + }); match target_records { Ok(Ok(target_records)) => { additional_records.extend(target_records); @@ -321,7 +380,7 @@ async fn handle_dns_message( &log, "SRV target lookup failed"; "original_mr" => #?mr, - "target" => ?srv.target(), + "target" => ?target, "error" => ?error, ); } @@ -330,7 +389,7 @@ async fn handle_dns_message( &log, "SRV target unexpected response"; "original_mr" => #?mr, - "target" => ?srv.target(), + "target" => ?target, "error" => ?error, ); } diff --git a/dns-server/src/http_server.rs b/dns-server/src/http_server.rs index 17e3167232c..8652034fc17 100644 --- a/dns-server/src/http_server.rs +++ b/dns-server/src/http_server.rs @@ -7,10 +7,11 @@ use crate::storage::{self, UpdateError}; use dns_server_api::DnsServerApi; use dns_service_client::{ - ERROR_CODE_BAD_UPDATE_GENERATION, ERROR_CODE_UPDATE_IN_PROGRESS, + ERROR_CODE_BAD_UPDATE_GENERATION, ERROR_CODE_UPDATE_DEFINES_SOA_RECORD, + ERROR_CODE_UPDATE_IN_PROGRESS, }; use dropshot::RequestContext; -use internal_dns_types::config::{DnsConfig, DnsConfigParams}; +use internal_dns_types::{v1, v2}; pub struct Context { store: storage::Store, @@ -32,9 +33,54 @@ enum DnsServerApiImpl {} impl DnsServerApi for DnsServerApiImpl { type Context = Context; + async fn dns_config_get_v1( + rqctx: RequestContext, + ) -> Result< + dropshot::HttpResponseOk, + dropshot::HttpError, + > { + Self::dns_config_get(rqctx) + .await + .map(|ok| dropshot::HttpResponseOk(ok.0.as_v1())) + } + + async fn dns_config_get_v2( + rqctx: RequestContext, + ) -> Result< + dropshot::HttpResponseOk, + dropshot::HttpError, + > { + Self::dns_config_get(rqctx).await + } + + async fn dns_config_put_v1( + rqctx: RequestContext, + rq: dropshot::TypedBody, + ) -> Result + { + Self::dns_config_put( + rqctx, + rq.into_inner().into() + ) + .await + } + + async fn dns_config_put_v2( + rqctx: RequestContext, + rq: dropshot::TypedBody, + ) -> Result + { + Self::dns_config_put(rqctx, rq.into_inner()).await + } +} + +impl DnsServerApiImpl { async fn dns_config_get( rqctx: RequestContext, - ) -> Result, dropshot::HttpError> { + ) -> Result< + dropshot::HttpResponseOk, + dropshot::HttpError, + > { let apictx = rqctx.context(); let config = apictx.store.dns_config().await.map_err(|e| { dropshot::HttpError::for_internal_error(format!( @@ -47,14 +93,11 @@ impl DnsServerApi for DnsServerApiImpl { async fn dns_config_put( rqctx: RequestContext, - rq: dropshot::TypedBody, + params: v2::config::DnsConfigParams, ) -> Result { let apictx = rqctx.context(); - apictx - .store - .dns_config_update(&rq.into_inner(), &rqctx.request_id) - .await?; + apictx.store.dns_config_update(¶ms, &rqctx.request_id).await?; Ok(dropshot::HttpResponseUpdatedNoContent()) } } @@ -81,6 +124,16 @@ impl From for dropshot::HttpError { headers: None, }, + UpdateError::UpdateDefinesSoaRecord => dropshot::HttpError { + status_code: dropshot::ErrorStatusCode::BAD_REQUEST, + error_code: Some(String::from( + ERROR_CODE_UPDATE_DEFINES_SOA_RECORD, + )), + external_message: message.clone(), + internal_message: message, + headers: None, + }, + UpdateError::InternalError(_) => { dropshot::HttpError::for_internal_error(message) } diff --git a/dns-server/src/lib.rs b/dns-server/src/lib.rs index 0e54ec73b47..3ec1f25486c 100644 --- a/dns-server/src/lib.rs +++ b/dns-server/src/lib.rs @@ -86,6 +86,14 @@ pub async fn start_servers( log.new(o!("component" => "http")), ) .config(dropshot_config.clone()) + .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( + dropshot::ClientSpecifiesVersionInHeader::new( + "api-version" + .parse::() + .expect("api-version is a valid header name"), + semver::Version::new(2, 0, 0), + ), + ))) .start() .map_err(|error| anyhow!("setting up HTTP server: {:#}", error))? }; diff --git a/dns-server/src/storage.rs b/dns-server/src/storage.rs index 01ade2d383f..47a22010acd 100644 --- a/dns-server/src/storage.rs +++ b/dns-server/src/storage.rs @@ -96,8 +96,9 @@ use anyhow::{Context, anyhow}; use camino::Utf8PathBuf; use hickory_proto::rr::LowerName; use hickory_resolver::Name; -use internal_dns_types::config::{ - DnsConfig, DnsConfigParams, DnsConfigZone, DnsRecord, +use internal_dns_types::{ + config::{DnsConfig, DnsConfigParams, DnsConfigZone, DnsRecord}, + names::ZONE_APEX_NAME, }; use omicron_common::api::external::Generation; use serde::{Deserialize, Serialize}; @@ -161,6 +162,12 @@ pub enum UpdateError { req_id: String, }, + #[error( + "update declares at least one SOA record, but updates \ + may not provide SOA records" + )] + UpdateDefinesSoaRecord, + #[error("internal error")] InternalError(#[from] anyhow::Error), } @@ -351,6 +358,22 @@ impl Store { "new_generation" => u64::from(config.generation), )); + // We disallow updates that provide SOA records because the only SOA + // records we should have are ones we define when we're told we're + // authoritative for zones. SOA records are in the API types here + // because they are included when reporting this server's records (such + // as via `dns_config_get()`). + for zone in config.zones.iter() { + if let Some(apex_records) = zone.records.get(ZONE_APEX_NAME) { + if apex_records + .iter() + .any(|record| matches!(record, DnsRecord::Soa(_))) + { + return Err(UpdateError::UpdateDefinesSoaRecord); + } + } + } + // Lock out concurrent updates. We must not return until we've released // the "updating" lock. let update = self.begin_update(req_id, config.generation).await?; @@ -399,6 +422,12 @@ impl Store { // For each zone in the config, create the corresponding tree. Populate // it with the data from the config. + // + // We are authoritative for zones whose records we serve, so we also + // create an SOA record at this point. This record is not provided by + // the control plane for simplicity; we can determine the serial from + // the generation we are updating to. + // // TODO-performance This would probably be a lot faster with a batch // operation. for zone_config in &config.zones { @@ -411,6 +440,12 @@ impl Store { .with_context(|| format!("creating tree {:?}", &tree_name))?; for (name, records) in &zone_config.records { + if name == ZONE_APEX_NAME { + // If any records are present on the zone itself, we'll + // handle those separately. + continue; + } + if records.is_empty() { // There's no distinction between in DNS between a name that // doesn't exist at all and one with no records associated @@ -418,6 +453,7 @@ impl Store { // the name. continue; } + let records_json = serde_json::to_vec(&records).with_context(|| { format!( @@ -433,6 +469,63 @@ impl Store { })?; } + // We've gone through all non-apex names for this zone. Now process + // records for the zone apex. We should have NS records here, and + // we'll want to add an SOA record here as well. + let records: Option> = + zone_config.records.get(ZONE_APEX_NAME).map(|x| x.clone()); + if let Some(mut apex_records) = records { + // Sort for a stable ordering of NS records. We'll pick the first + // NS record we see for the SOA record we'll create later. It + // really doesn't matter *which* NS record comes first, just that + // all DNS servers see the same NS record first. + apex_records.sort(); + + let mut representative_ns = None; + for record in &apex_records { + if let DnsRecord::Ns(nsdname) = record { + representative_ns = Some(nsdname.clone()); + break; + } + } + + if let Some(nsdname) = representative_ns { + // The SOA serial number field is a 32-bit field, but we + // want to use the 63-bit generation number here. The only + // issue with wrapping is, of course, that the serial number + // will decrease to 0 when the generation gets high enough. + // This will likely result in other systems consuming the + // rack's SOA records needing to reset their expected serial + // numbers, perhaps by dropping caches or other manual + // intervention. + // + // Assuming one generation bump every minute, this overflow + // would affect operations after 8,171 years. + let soa_serial = config.generation.as_u64() as u32; + apex_records.push(DnsRecord::Soa( + internal_dns_types::config::Soa::new( + nsdname, soa_serial, + ), + )); + } + + let records_json = serde_json::to_vec(&apex_records) + .with_context(|| { + format!( + "serializing records for zone {:?} apex", + zone_name, + ) + })?; + tree.insert(ZONE_APEX_NAME, records_json).with_context( + || { + format!( + "inserting records for zone {:?} apex", + zone_name, + ) + }, + )?; + } + // Flush this tree. We do this here to make sure the tree is fully // written before we update the config in the main tree below. // Otherwise, if Sled reorders writes between flush points, it's @@ -649,7 +742,8 @@ impl Store { name_only.set_fqdn(false); let key = name_only.to_string().to_lowercase(); assert!(!key.ends_with('.')); - key + + if key.is_empty() { ZONE_APEX_NAME.to_string() } else { key } }; debug!(&self.log, "query key"; "key" => &key); @@ -793,6 +887,7 @@ mod test { use internal_dns_types::config::DnsConfigParams; use internal_dns_types::config::DnsConfigZone; use internal_dns_types::config::DnsRecord; + use internal_dns_types::names::ZONE_APEX_NAME; use omicron_common::api::external::Generation; use omicron_test_utils::dev::test_setup_log; use std::collections::BTreeSet; @@ -851,6 +946,7 @@ mod test { enum Expect<'a> { NoZone, NoName, + Only(&'a DnsRecord), Record(&'a DnsRecord), } @@ -868,11 +964,12 @@ mod test { match (expect, result) { (Expect::NoZone, Err(QueryError::NoZone(n))) if n == name => (), (Expect::NoName, Err(QueryError::NoName(n))) if n == name => (), - (Expect::Record(r), Ok(records)) + (Expect::Only(r), Ok(records)) if records.len() == 1 && records[0] == *r => { () } + (Expect::Record(r), Ok(records)) if records.contains(r) => (), _ => panic!("did not get what we expected from DNS query"), } } @@ -926,22 +1023,22 @@ mod test { expect( &tc.store, "gen1_name.zone1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect( &tc.store, "gen1_name.ZONE1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect( &tc.store, "Gen1_name.zone1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect( &tc.store, "shared_name.zone1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect(&tc.store, "enoent.zone1.internal", Expect::NoName); expect(&tc.store, "gen2_name.zone2.internal", Expect::NoZone); @@ -980,12 +1077,12 @@ mod test { expect( &tc.store, "shared_name.zone1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect( &tc.store, "gen2_name.zone2.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect(&tc.store, "gen8_name.zone8.internal", Expect::NoZone); @@ -1016,7 +1113,7 @@ mod test { expect( &tc.store, "gen8_name.zone8.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); // Updating to generation 8 again should be a no-op. It should succeed @@ -1036,7 +1133,7 @@ mod test { expect( &tc.store, "gen8_name.zone8.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); // Failure: try a backwards update. @@ -1151,7 +1248,7 @@ mod test { expect( &tc.store, "gen1_name.zone1.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); expect(&tc.store, "gen2_name.zone2.internal", Expect::NoZone); @@ -1166,7 +1263,7 @@ mod test { expect( &tc.store, "gen2_name.zone2.internal", - Expect::Record(&dummy_record), + Expect::Only(&dummy_record), ); // At this point, we want to drop the Store, but we need to keep around @@ -1203,11 +1300,7 @@ mod test { generations_with_trees(&store) ); // The rest of the behavior ought to be like generation 1. - expect( - &store, - "gen1_name.zone1.internal", - Expect::Record(&dummy_record), - ); + expect(&store, "gen1_name.zone1.internal", Expect::Only(&dummy_record)); expect(&store, "gen2_name.zone2.internal", Expect::NoZone); // Now we can do another update to generation 2. @@ -1219,11 +1312,7 @@ mod test { let gen2_config = store.read_config().unwrap(); assert_eq!(Generation::from_u32(2), gen2_config.generation); expect(&store, "gen1_name.zone1.internal", Expect::NoZone); - expect( - &store, - "gen2_name.zone2.internal", - Expect::Record(&dummy_record), - ); + expect(&store, "gen2_name.zone2.internal", Expect::Only(&dummy_record)); let tc = TestContext { logctx, tmpdir, store, db }; tc.cleanup_successful(); @@ -1297,4 +1386,118 @@ mod test { tc.cleanup_successful(); } + + #[tokio::test] + async fn test_zone_gets_soa_record() { + let tc = TestContext::new("test_zone_gets_soa_record"); + + let ns1_a = DnsRecord::Aaaa(Ipv6Addr::LOCALHOST); + let ns1_ns = DnsRecord::Ns("ns1.zone1.internal".to_string()); + let update = DnsConfigParams { + time_created: chrono::Utc::now(), + generation: Generation::from_u32(1), + zones: vec![DnsConfigZone { + zone_name: "zone1.internal".to_string(), + records: HashMap::from([ + ("ns1".to_string(), vec![ns1_a.clone()]), + (ZONE_APEX_NAME.to_string(), vec![ns1_ns.clone()]), + ]), + }], + }; + + tc.store + .dns_config_update(&update, "my request id") + .await + .expect("can apply update"); + + // These two records are ones we provided, they ought to be there. + expect(&tc.store, "ns1.zone1.internal", Expect::Only(&ns1_a)); + + expect( + &tc.store, + "zone1.internal", + Expect::Record(&DnsRecord::Ns("ns1.zone1.internal".to_string())), + ); + + let zone_soa = DnsRecord::Soa(internal_dns_types::config::Soa::new( + "ns1.zone1.internal".to_string(), + update.generation.as_u64() as u32, + )); + + // The SOA record is created when the server is told it is serving + // records for a zone. + expect(&tc.store, "zone1.internal", Expect::Record(&zone_soa)); + + // The use of `@` as a name for labels + expect(&tc.store, "zone1.internal", Expect::Record(&zone_soa)); + + // We can update DNS to a configuration without NS records and the + // server will survive the encounter. We won't have an SOA record + // without a nameserver to indicate as the primary source for this zone, + // though. + + let update2 = DnsConfigParams { + time_created: chrono::Utc::now(), + generation: Generation::from_u32(2), + zones: vec![DnsConfigZone { + zone_name: "zone1.internal".to_string(), + records: HashMap::from([( + "ns1".to_string(), + vec![ns1_a.clone()], + )]), + }], + }; + + tc.store + .dns_config_update(&update2, "my request id") + .await + .expect("can apply update"); + + // At this point we have a zone `zone1.internal`, but no records on the + // zone itself. + expect(&tc.store, "zone1.internal", Expect::NoName); + + let ns2_a = DnsRecord::Aaaa(Ipv6Addr::LOCALHOST); + let ns2_ns = DnsRecord::Ns("ns2.zone1.internal".to_string()); + + // Finally, even if the NS records are ordered in a strange way, we'll + // consistently reorder records in the update so that the + // lowest-numbered NS record is first and used as the SOA mname. + let update3 = DnsConfigParams { + time_created: chrono::Utc::now(), + generation: Generation::from_u32(3), + zones: vec![DnsConfigZone { + zone_name: "zone1.internal".to_string(), + records: HashMap::from([ + ("ns2".to_string(), vec![ns2_a.clone()]), + ("ns1".to_string(), vec![ns1_a.clone()]), + ( + ZONE_APEX_NAME.to_string(), + vec![ns1_ns.clone(), ns2_ns.clone()], + ), + ]), + }], + }; + + tc.store + .dns_config_update(&update3, "my request id") + .await + .expect("can apply update"); + + let zone_soa = DnsRecord::Soa(internal_dns_types::config::Soa::new( + "ns1.zone1.internal".to_string(), + update3.generation.as_u64() as u32, + )); + + // The SOA record is created when the server is told it is serving + // records for a zone. + expect(&tc.store, "zone1.internal", Expect::Record(&zone_soa)); + + // And both NS records *are* present at the zone apex. + expect(&tc.store, "zone1.internal", Expect::Record(&ns1_ns)); + + expect(&tc.store, "zone1.internal", Expect::Record(&ns2_ns)); + + tc.cleanup_successful(); + } } diff --git a/dns-server/tests/basic_test.rs b/dns-server/tests/basic_test.rs index 5ca515320e2..db9f0faa680 100644 --- a/dns-server/tests/basic_test.rs +++ b/dns-server/tests/basic_test.rs @@ -9,20 +9,23 @@ use dropshot::{HandlerTaskMode, test_util::LogContext}; use hickory_client::{ client::{AsyncClient, ClientHandle}, error::ClientError, + rr::RData, udp::UdpClientStream, }; use hickory_resolver::TokioAsyncResolver; use hickory_resolver::error::ResolveErrorKind; use hickory_resolver::{ config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts}, + error::ResolveError, proto::{ op::ResponseCode, - rr::{DNSClass, Name, RecordType}, + rr::{DNSClass, Name, RecordType, rdata::AAAA}, xfer::DnsResponse, }, }; -use internal_dns_types::config::{ - DnsConfigParams, DnsConfigZone, DnsRecord, Srv, +use internal_dns_types::{ + config::{DnsConfigParams, DnsConfigZone, DnsRecord, Soa, Srv}, + names::ZONE_APEX_NAME, }; use omicron_test_utils::dev::test_setup_log; use slog::o; @@ -261,33 +264,49 @@ pub async fn multi_record_crud() -> Result<(), anyhow::Error> { } async fn lookup_ip_expect_nxdomain(resolver: &TokioAsyncResolver, name: &str) { + lookup_ip_expect_error_code(resolver, name, ResponseCode::NXDomain).await; +} + +async fn lookup_ip_expect_error_code( + resolver: &TokioAsyncResolver, + name: &str, + expected_code: ResponseCode, +) { match resolver.lookup_ip(name).await { Ok(unexpected) => { - panic!("Expected NXDOMAIN, got record {:?}", unexpected); + panic!("Expected {expected_code}, got record {unexpected:?}"); } - Err(e) => match e.kind() { - ResolveErrorKind::NoRecordsFound { - response_code, - query: _, - soa: _, - negative_ttl: _, - trusted: _, - } => match response_code { - ResponseCode::NXDomain => {} - unexpected => { - panic!( - "Expected NXDOMAIN, got response code {:?}", - unexpected - ); - } - }, - unexpected => { - panic!("Expected NXDOMAIN, got error {:?}", unexpected); - } - }, + Err(e) => expect_no_records_error_code(&e, expected_code), }; } +fn expect_no_records_error_code( + err: &ResolveError, + expected_code: ResponseCode, +) { + match err.kind() { + ResolveErrorKind::NoRecordsFound { + response_code, + query: _, + soa: _, + negative_ttl: _, + trusted: _, + } => { + if response_code == &expected_code { + // Error matches on all the conditions we're checking. No + // issues. + } else { + panic!( + "Expected {expected_code}, got response code {response_code:?}" + ); + } + } + unexpected => { + panic!("Expected {expected_code}, got error {unexpected:?}"); + } + } +} + // Verify that the part of a name that's under the zone name can contain the // zone name itself. For example, you can say that "emy.oxide.internal" exists // under "oxide.internal", meaning that the server would provide @@ -349,6 +368,143 @@ pub async fn empty_record() -> Result<(), anyhow::Error> { Ok(()) } +#[tokio::test] +pub async fn soa() -> Result<(), anyhow::Error> { + let test_ctx = init_client_server("soa").await?; + let resolver = &test_ctx.resolver; + let client = &test_ctx.client; + + let ns1_addr = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x1); + let ns1_aaaa = DnsRecord::Aaaa(ns1_addr); + let ns1_name = format!("ns1.{TEST_ZONE}."); + let ns1 = DnsRecord::Ns(ns1_name.clone()); + let service_addr = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x2); + let service_aaaa = DnsRecord::Aaaa(service_addr); + + // Add a zone with only an SOA record and a NS it refers to. The server + // should reject this: while the records are coherent on their own, the DNS + // server will refuse to accept an externally-provided SOA record. It forms + // the SOA record as part of updating records when updated, and accepting + // external SOA records invites potential conflict with no clear resolution. + // What happens if we're provided two SOA records? What if they conflict? + // Should the DNS server still create another SOA record, and what if it's + // different than the provided one? + + let mut records = HashMap::new(); + + let soa = DnsRecord::Soa(Soa { + mname: ns1_name.clone(), + rname: "admin".to_string(), + serial: 5, + refresh: 1, + retry: 2, + expire: 3, + minimum: 4, + }); + + records.insert("ns1".to_string(), vec![ns1_aaaa.clone()]); + records.insert(ZONE_APEX_NAME.to_string(), vec![ns1.clone(), soa.clone()]); + + let err = dns_records_create(client, TEST_ZONE, records).await.unwrap_err(); + let err_text = err.root_cause().to_string(); + assert!( + err_text + .contains(dns_service_client::ERROR_CODE_UPDATE_DEFINES_SOA_RECORD) + ); + + let lookup_err = resolver + .soa_lookup(TEST_ZONE) + .await + .expect_err("test zone should not exist"); + // I think we really should answer with ResponseCode::Refused. We are not + // authoritative for the .internal TLD, so we don't know that some *other* + // server would have records for `oxide.internal`. It is not a failure of + // our server to not know what that domain is, we should just refuse to + // answer. + // + // One may imagine we should return at least NXDomain without the + // authoritative bit set. RFC 1035 says that "Name Error - Meaningful only + // from an authoritative name server, ...". Does that mean that recursive + // resolvers and clients would faithfully ignore our error in that case? Is + // there a risk that something would miss the non-authoritative nature of + // such an NXDomain and incorrectly cache the non-existence of some other + // domain? Hopefully not! Answering `Refused` would side-step this question. + expect_no_records_error_code(&lookup_err, ResponseCode::ServFail); + + // If an update defines a zone with records, but defines no nameservers, + // that should be acceptable. We won't be able to define an SOA record, + // since we won't have a nameserver to include as the primary source, but + // the zone should otherwise be acceptable. + let mut records = HashMap::new(); + records.insert("service".to_string(), vec![service_aaaa.clone()]); + + dns_records_create(client, TEST_ZONE, records).await?; + + let service_ip_answer = + resolver.lookup_ip(&format!("service.{TEST_ZONE}.")).await?; + let mut ip_iter = service_ip_answer.iter(); + assert_eq!(ip_iter.next(), Some(IpAddr::V6(service_addr))); + assert_eq!(ip_iter.next(), None); + + // When we let the DNS server construct its own SOA record, we should be + // able to tell it about a zone. + let mut records = HashMap::new(); + records.insert("ns1".to_string(), vec![ns1_aaaa.clone()]); + records.insert(ZONE_APEX_NAME.to_string(), vec![ns1.clone()]); + + dns_records_create(client, TEST_ZONE, records).await?; + + // Now the NS records should exist, and so should an SOA. + let soa_answer = resolver.soa_lookup(TEST_ZONE).await?; + + let soa_records: Vec<&hickory_proto::rr::rdata::soa::SOA> = + soa_answer.iter().collect(); + + assert_eq!(soa_records.len(), 1); + + // We should be able to query nameservers for the zone + let zone_ns_answer = resolver.ns_lookup(TEST_ZONE).await?; + let has_ns_record = + zone_ns_answer.as_lookup().records().iter().any(|record| { + if let Some(RData::NS(nsdname)) = record.data() { + nsdname.0.to_utf8().as_str() == &ns1_name + } else { + false + } + }); + assert!(has_ns_record); + + // The nameserver's AAAA record should be in additionals. + let has_aaaa_additional = + zone_ns_answer.as_lookup().records().iter().any(|record| { + if let Some(RData::AAAA(AAAA(addr))) = record.data() { + addr == &ns1_addr + } else { + false + } + }); + assert!(has_aaaa_additional); + + // And we should be able to directly query the SOA record's primary server + let soa_ns_aaaa_answer = + resolver.lookup_ip(soa_records[0].mname().to_owned()).await?; + assert_eq!(soa_ns_aaaa_answer.iter().collect::>(), vec![ns1_addr]); + + // SOA queries under the zone we now know we are authoritative for should + // fail with NXDomain. + // + // TODO: we should see the authoritative bit set here. It's not clear that + // hickory-proto has a way to see if that bit is present in an error. + let lookup_err = resolver + .soa_lookup(format!("foo.{TEST_ZONE}.")) + .await + .expect_err("test zone should not exist"); + expect_no_records_error_code(&lookup_err, ResponseCode::NXDomain); + + test_ctx.cleanup().await; + Ok(()) +} + #[tokio::test] pub async fn nxdomain() -> Result<(), anyhow::Error> { let test_ctx = init_client_server("nxdomain").await?; @@ -387,31 +543,12 @@ pub async fn servfail() -> Result<(), anyhow::Error> { // In this case, we haven't defined any zones yet, so any request should be // outside the server's authoritative zones. That should result in a // SERVFAIL. - match resolver.lookup_ip("unicorn.oxide.internal").await { - Ok(unexpected) => { - panic!("Expected SERVFAIL, got record {:?}", unexpected); - } - Err(e) => match e.kind() { - ResolveErrorKind::NoRecordsFound { - response_code, - query: _, - soa: _, - negative_ttl: _, - trusted: _, - } => match response_code { - ResponseCode::ServFail => {} - unexpected => { - panic!( - "Expected SERVFAIL, got response code {:?}", - unexpected - ); - } - }, - unexpected => { - panic!("Expected SERVFAIL, got error {:?}", unexpected); - } - }, - }; + lookup_ip_expect_error_code( + &resolver, + "unicorn.oxide.internal", + ResponseCode::ServFail, + ) + .await; test_ctx.cleanup().await; Ok(()) diff --git a/dns-server/tests/cross_version_test.rs b/dns-server/tests/cross_version_test.rs new file mode 100644 index 00000000000..7842791a2dd --- /dev/null +++ b/dns-server/tests/cross_version_test.rs @@ -0,0 +1,330 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use anyhow::{Context, Result}; +use camino_tempfile::Utf8TempDir; +use dns_service_client::Client; +use dropshot::{HandlerTaskMode, test_util::LogContext}; +use internal_dns_types::{ + config::{DnsConfigParams, DnsConfigZone, DnsRecord}, + names::ZONE_APEX_NAME, +}; +use omicron_test_utils::dev::test_setup_log; +use slog::o; +use std::{collections::HashMap, net::Ipv6Addr}; + +const TEST_ZONE: &'static str = "oxide.internal"; + +// In this test we both need the latest DNS client from `dns-service-client`, +// and an older client to check compatibility against. While this gives us +// confidence that newer DNS servers' HTTP APIs work as expected with older +// clients, this does not check that old DNS servers handle new DNS clients +// well. +mod v1_client { + use anyhow::Context; + use internal_dns_types::v1; + + use std::collections::HashMap; + + progenitor::generate_api!( + spec = "../openapi/dns-server/dns-server-1.0.0-49359e.json", + interface = Positional, + inner_type = slog::Logger, + derives = [schemars::JsonSchema, Clone, Eq, PartialEq], + pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { + slog::debug!(log, "client request"; + "method" => %request.method(), + "uri" => %request.url(), + "body" => ?&request.body(), + ); + }), + post_hook = (|log: &slog::Logger, result: &Result<_, _>| { + slog::debug!(log, "client response"; "result" => ?result); + }), + replace = { + DnsConfig = v1::config::DnsConfig, + DnsConfigParams = v1::config::DnsConfigParams, + DnsConfigZone = v1::config::DnsConfigZone, + DnsRecord = v1::config::DnsRecord, + Srv = v1::config::Srv, + } + ); + + pub async fn dns_records_create( + client: &Client, + zone_name: &str, + records: HashMap>, + ) -> anyhow::Result<()> { + let before = client + .dns_config_get() + .await + .context("fetch current generation")? + .into_inner(); + + let (our_zones, other_zones) = before + .zones + .into_iter() + .partition::, _>(|z| z.zone_name == zone_name); + + assert!(our_zones.len() <= 1); + let zone_records = if let Some(our_zone) = our_zones.into_iter().next() + { + our_zone.records.into_iter().chain(records.into_iter()).collect() + } else { + records + }; + + let new_zone = v1::config::DnsConfigZone { + zone_name: zone_name.to_owned(), + records: zone_records, + }; + + let zones = + other_zones.into_iter().chain(std::iter::once(new_zone)).collect(); + let after = v1::config::DnsConfigParams { + generation: before.generation.next(), + zones, + time_created: chrono::Utc::now(), + }; + client.dns_config_put(&after).await.context("updating generation")?; + Ok(()) + } + + pub async fn dns_records_list( + client: &Client, + zone_name: &str, + ) -> anyhow::Result>> { + Ok(client + .dns_config_get() + .await + .context("fetch current generation")? + .into_inner() + .zones + .into_iter() + .find(|z| z.zone_name == zone_name) + .map(|z| z.records) + .unwrap_or_else(HashMap::new)) + } +} + +// A V2 server can productively handle requests from a V1 client, and a V1 +// client *can* provide records to a V2 server (though this really shouldn't +// ever happen) +#[tokio::test] +pub async fn cross_version_works() -> Result<(), anyhow::Error> { + let test_ctx = init_client_server("cross_version_works").await?; + + let ns1_addr = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x1); + let ns1_aaaa = DnsRecord::Aaaa(ns1_addr); + let ns1_name = format!("ns1.{TEST_ZONE}."); + let ns1 = DnsRecord::Ns(ns1_name.clone()); + let service_addr = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x2); + let service_aaaa = DnsRecord::Aaaa(service_addr); + let v1_service_aaaa = + internal_dns_types::v1::config::DnsRecord::Aaaa(service_addr); + + let mut records = HashMap::new(); + records.insert("ns1".to_string(), vec![ns1_aaaa]); + records.insert(ZONE_APEX_NAME.to_string(), vec![ns1.clone()]); + + dns_records_create(&test_ctx.latest_client, TEST_ZONE, records) + .await + .expect("can create zone"); + + let v1_records = + v1_client::dns_records_list(&test_ctx.v1_client, TEST_ZONE) + .await + .expect("zone exists"); + let records = dns_records_list(&test_ctx.latest_client, TEST_ZONE) + .await + .expect("zone exists"); + + // The only apex records NS and SOA, which are not returned in V1 APIs, so + // we should see no records at the apex. + assert!(!v1_records.contains_key(ZONE_APEX_NAME)); + + // But via V2 APIs we should see both. + assert_eq!(records[ZONE_APEX_NAME].len(), 2); + assert!(records[ZONE_APEX_NAME].contains(&ns1)); + + // And a V1 client can create DNS records, limited they may be. + let mut v1_style_records = HashMap::new(); + v1_style_records + .insert("service".to_string(), vec![v1_service_aaaa.clone()]); + // Explicitly redefine the ns1 records to an empty vec so they are cleared + // rather than unmodified. + v1_style_records.insert("ns1".to_string(), Vec::new()); + v1_client::dns_records_create( + &test_ctx.v1_client, + TEST_ZONE, + v1_style_records, + ) + .await + .expect("can redefine zone"); + + let v1_records = + v1_client::dns_records_list(&test_ctx.v1_client, TEST_ZONE) + .await + .expect("zone exists"); + let records = dns_records_list(&test_ctx.latest_client, TEST_ZONE) + .await + .expect("zone exists"); + + // Now there really are no records at the zone apex. + assert!(!records.contains_key(ZONE_APEX_NAME)); + eprintln!("records: {:?}", records); + assert_eq!(records.len(), 1); + assert_eq!(v1_records.len(), 1); + assert_eq!(records["service"], vec![service_aaaa.clone()]); + assert_eq!(v1_records["service"], vec![v1_service_aaaa.clone()]); + + test_ctx.cleanup().await; + + Ok(()) +} + +struct TestContext { + v1_client: v1_client::Client, + latest_client: Client, + dns_server: dns_server::dns_server::ServerHandle, + dropshot_server: dropshot::HttpServer, + tmp: Utf8TempDir, + logctx: LogContext, +} + +impl TestContext { + async fn cleanup(self) { + drop(self.dns_server); + self.dropshot_server.close().await.expect("Failed to clean up server"); + self.tmp.close().expect("Failed to clean up tmp directory"); + self.logctx.cleanup_successful(); + } +} + +async fn init_client_server( + test_name: &str, +) -> Result { + // initialize dns server config + let (tmp, config_storage, config_dropshot, logctx) = + test_config(test_name)?; + let log = logctx.log.clone(); + + // initialize dns server db + let store = dns_server::storage::Store::new( + log.new(o!("component" => "store")), + &config_storage, + ) + .context("initializing storage")?; + assert!(store.is_new()); + + // launch a dns server + let dns_server_config = dns_server::dns_server::Config { + bind_address: "[::1]:0".parse().unwrap(), + }; + let (dns_server, dropshot_server) = dns_server::start_servers( + log.clone(), + store, + &dns_server_config, + &config_dropshot, + ) + .await?; + + let v1_client = v1_client::Client::new( + &format!("http://{}", dropshot_server.local_addr()), + log.clone(), + ); + let latest_client = + Client::new(&format!("http://{}", dropshot_server.local_addr()), log); + + Ok(TestContext { + v1_client, + latest_client, + dns_server, + dropshot_server, + tmp, + logctx, + }) +} + +fn test_config( + test_name: &str, +) -> Result< + ( + Utf8TempDir, + dns_server::storage::Config, + dropshot::ConfigDropshot, + LogContext, + ), + anyhow::Error, +> { + let logctx = test_setup_log(test_name); + let tmp_dir = Utf8TempDir::with_prefix("dns-server-test")?; + let mut storage_path = tmp_dir.path().to_path_buf(); + storage_path.push("test"); + let config_storage = + dns_server::storage::Config { storage_path, keep_old_generations: 3 }; + let config_dropshot = dropshot::ConfigDropshot { + bind_address: "[::1]:0".to_string().parse().unwrap(), + default_request_body_max_bytes: 1024, + default_handler_task_mode: HandlerTaskMode::Detached, + log_headers: vec![], + }; + + Ok((tmp_dir, config_storage, config_dropshot, logctx)) +} + +async fn dns_records_create( + client: &Client, + zone_name: &str, + records: HashMap>, +) -> anyhow::Result<()> { + let before = client + .dns_config_get() + .await + .context("fetch current generation")? + .into_inner(); + + let (our_zones, other_zones) = before + .zones + .into_iter() + .partition::, _>(|z| z.zone_name == zone_name); + + assert!(our_zones.len() <= 1); + let zone_records = if let Some(our_zone) = our_zones.into_iter().next() { + our_zone.records.into_iter().chain(records.into_iter()).collect() + } else { + records + }; + + let new_zone = DnsConfigZone { + zone_name: zone_name.to_owned(), + records: zone_records, + }; + + let zones = + other_zones.into_iter().chain(std::iter::once(new_zone)).collect(); + let after = DnsConfigParams { + generation: before.generation.next(), + zones, + time_created: chrono::Utc::now(), + }; + client.dns_config_put(&after).await.context("updating generation")?; + Ok(()) +} + +async fn dns_records_list( + client: &Client, + zone_name: &str, +) -> anyhow::Result>> { + Ok(client + .dns_config_get() + .await + .context("fetch current generation")? + .into_inner() + .zones + .into_iter() + .find(|z| z.zone_name == zone_name) + .map(|z| z.records) + .unwrap_or_else(HashMap::new)) +} diff --git a/internal-dns/resolver/src/resolver.rs b/internal-dns/resolver/src/resolver.rs index 016632c47e2..73ce579fbf3 100644 --- a/internal-dns/resolver/src/resolver.rs +++ b/internal-dns/resolver/src/resolver.rs @@ -826,6 +826,14 @@ mod test { }; dropshot::ServerBuilder::new(api(), label, log) .config(config_dropshot) + .version_policy(dropshot::VersionPolicy::Dynamic(Box::new( + dropshot::ClientSpecifiesVersionInHeader::new( + "api-version" + .parse::() + .expect("api-version is a valid header name"), + semver::Version::new(2, 0, 0), + ), + ))) .start() .unwrap() } diff --git a/internal-dns/types/src/config.rs b/internal-dns/types/src/config.rs index 69bfbe3032a..4f0199fb8c3 100644 --- a/internal-dns/types/src/config.rs +++ b/internal-dns/types/src/config.rs @@ -66,10 +66,12 @@ use core::fmt; use omicron_common::address::{CLICKHOUSE_ADMIN_PORT, CLICKHOUSE_TCP_PORT}; use omicron_common::api::external::Generation; use omicron_uuid_kinds::{OmicronZoneUuid, SledUuid}; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, HashMap}; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV6}; +use std::collections::BTreeMap; +use std::net::{Ipv6Addr, SocketAddrV6}; + +// "v2" types are the most recent, so we re-export them here for dependents that +// just want "latest". +pub use crate::v2::config::*; /// Used to construct the DNS name for a control plane host #[derive(Clone, Debug, PartialEq, PartialOrd)] @@ -533,8 +535,8 @@ impl DnsConfigBuilder { ) } - /// Construct a `DnsConfigZone` describing the control plane zone described - /// up to this point + /// Construct a `DnsConfigZone` describing the control plane DNS zone + /// described up to this point pub fn build_zone(self) -> DnsConfigZone { // Assemble the set of "AAAA" records for sleds. let sled_records = self.sleds.into_iter().map(|(sled, sled_ip)| { @@ -631,111 +633,6 @@ impl DnsConfigBuilder { } } -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] -pub struct DnsConfigParams { - pub generation: Generation, - pub time_created: chrono::DateTime, - pub zones: Vec, -} - -impl DnsConfigParams { - /// Given a high-level DNS configuration, return a reference to its sole - /// DNS zone. - /// - /// # Errors - /// - /// Returns an error if there are 0 or more than one zones in this - /// configuration. - pub fn sole_zone(&self) -> Result<&DnsConfigZone, anyhow::Error> { - ensure!( - self.zones.len() == 1, - "expected exactly one DNS zone, but found {}", - self.zones.len() - ); - Ok(&self.zones[0]) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct DnsConfig { - pub generation: Generation, - pub time_created: chrono::DateTime, - pub time_applied: chrono::DateTime, - pub zones: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] -pub struct DnsConfigZone { - pub zone_name: String, - pub records: HashMap>, -} - -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -#[serde(tag = "type", content = "data")] -pub enum DnsRecord { - A(Ipv4Addr), - // The renames are because openapi-lint complains about `Aaaa` and `Srv` - // not being in screaming snake case. `Aaaa` and `Srv` are the idiomatic - // Rust casings, though. - #[serde(rename = "AAAA")] - Aaaa(Ipv6Addr), - #[serde(rename = "SRV")] - Srv(Srv), -} - -// The `From` and `From` implementations are very slightly -// dubious, because a v4 or v6 address could also theoretically map to a DNS -// PTR record -// (https://www.cloudflare.com/learning/dns/dns-records/dns-ptr-record/). -// However, we don't support PTR records at the moment, so this is fine. Would -// certainly be worth revisiting if we do in the future, though. - -impl From for DnsRecord { - fn from(ip: Ipv4Addr) -> Self { - DnsRecord::A(ip) - } -} - -impl From for DnsRecord { - fn from(ip: Ipv6Addr) -> Self { - DnsRecord::Aaaa(ip) - } -} - -impl From for DnsRecord { - fn from(srv: Srv) -> Self { - DnsRecord::Srv(srv) - } -} - -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialEq, - Eq, - PartialOrd, - Ord, -)] -pub struct Srv { - pub prio: u16, - pub weight: u16, - pub port: u16, - pub target: String, -} - #[cfg(test)] mod test { use super::{DnsConfigBuilder, Host, ServiceName}; diff --git a/internal-dns/types/src/diff.rs b/internal-dns/types/src/diff.rs index 8d33be038d9..7eec299b0c7 100644 --- a/internal-dns/types/src/diff.rs +++ b/internal-dns/types/src/diff.rs @@ -152,6 +152,8 @@ impl std::fmt::Display for DnsDiff<'_> { DnsRecord::Srv(Srv { port, target, .. }) => { format!("SRV port {:5} {}", port, target) } + DnsRecord::Ns(name) => format!("NS {}", name), + DnsRecord::Soa(soa) => format!("SOA {:?}", soa), } )?; } diff --git a/internal-dns/types/src/lib.rs b/internal-dns/types/src/lib.rs index 5dfccd324e3..9feba356f8a 100644 --- a/internal-dns/types/src/lib.rs +++ b/internal-dns/types/src/lib.rs @@ -2,7 +2,34 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Common types for internal DNS resolution. +//! Common types for the management interface of DNS servers for internal and +//! external name resolution. +//! +//! ## Organization +//! +//! Some types in this crate are exposed by its dependents as part of versioned +//! HTTP interfaces, such as `dns-server`'s management interfaces. In those +//! cases we may want to support multiple HTTP interface versions concurrently, +//! and so this crate preserves old versions of select public items used in this +//! way. +//! +//! The alternative here would be to require dependents of `internal-dns` to +//! declare duplicate dependencies on `internal-dns` at different revisions. +//! That would force dependents to take all of `internal-dns`' dependencies at +//! versions of interest as transitive dependencies, and precludes maintenance +//! that would otherwise be able to preserve API compatibility of the public +//! types. +//! +//! `cargo xtask openapi` helps us check that we don't unintentionally break an +//! existing committed version, which also helps us be confident that future +//! maintenance on old versions' types does not introduce breaking changes. +//! +//! The top-level items here can be thought of as the "current" version, where +//! versioned items (and their previous versions) are in the `vN` modules with +//! their latest form re-exported as the "current" version. + +pub mod v1; +pub mod v2; pub mod config; pub mod diff; diff --git a/internal-dns/types/src/names.rs b/internal-dns/types/src/names.rs index ef87a1e00fe..ac314009d28 100644 --- a/internal-dns/types/src/names.rs +++ b/internal-dns/types/src/names.rs @@ -20,6 +20,17 @@ pub const DNS_ZONE: &str = "control-plane.oxide.internal"; /// development pub const DNS_ZONE_EXTERNAL_TESTING: &str = "oxide-dev.test"; +/// Label for records associated with a zone itself, rather than any names +/// inside it. +/// +/// This string is not part of a valid DNS name; no query can actually be +/// issued for `@.`. The string is chosen to match with what users might +/// expect with the prior experience of zone files for more traditional DNS +/// servers - in cases where zone files are actual literal text files either +/// manually or programmatically constructed, records of this nature are +/// typically described with the name "@". +pub const ZONE_APEX_NAME: &str = "@"; + /// Names of services within the control plane #[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)] pub enum ServiceName { diff --git a/internal-dns/types/src/v1/config.rs b/internal-dns/types/src/v1/config.rs new file mode 100644 index 00000000000..ecef17749e2 --- /dev/null +++ b/internal-dns/types/src/v1/config.rs @@ -0,0 +1,180 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::v2; +use anyhow::ensure; +use omicron_common::api::external::Generation; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::net::{Ipv4Addr, Ipv6Addr}; + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct DnsConfigParams { + pub generation: Generation, + pub time_created: chrono::DateTime, + pub zones: Vec, +} + +impl DnsConfigParams { + /// Given a high-level DNS configuration, return a reference to its sole + /// DNS zone. + /// + /// # Errors + /// + /// Returns an error if there are 0 or more than one zones in this + /// configuration. + pub fn sole_zone(&self) -> Result<&DnsConfigZone, anyhow::Error> { + ensure!( + self.zones.len() == 1, + "expected exactly one DNS zone, but found {}", + self.zones.len() + ); + Ok(&self.zones[0]) + } +} + +impl Into for DnsConfigParams { + fn into(self) -> v2::config::DnsConfigParams { + let mut converted_zones: Vec = Vec::new(); + for zone in self.zones.into_iter() { + converted_zones.push(zone.into()); + } + + v2::config::DnsConfigParams { + generation: self.generation, + time_created: self.time_created, + zones: converted_zones, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct DnsConfig { + pub generation: Generation, + pub time_created: chrono::DateTime, + pub time_applied: chrono::DateTime, + pub zones: Vec, +} + +// See docs on [`v2::config::DnsConfigZone`] for more about this struct. They are functionally +// equivalent. We would include that doc comment here, but altering docs to existing types +// makes them appear different in OpenAPI terms and would be "breaking" for the time being. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct DnsConfigZone { + pub zone_name: String, + pub records: HashMap>, +} + +impl Into for DnsConfigZone { + fn into(self) -> v2::config::DnsConfigZone { + let converted_records: HashMap> = + self.records + .into_iter() + .filter_map(|(name, name_records)| { + let converted_name_records: Vec = + name_records + .into_iter() + .map(|rec| rec.into()) + .collect(); + if converted_name_records.is_empty() { + None + } else { + Some((name, converted_name_records)) + } + }) + .collect(); + v2::config::DnsConfigZone { + zone_name: self.zone_name, + records: converted_records, + } + } +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[serde(tag = "type", content = "data")] +pub enum DnsRecord { + A(Ipv4Addr), + // The renames are because openapi-lint complains about `Aaaa` and `Srv` + // not being in screaming snake case. `Aaaa` and `Srv` are the idiomatic + // Rust casings, though. + #[serde(rename = "AAAA")] + Aaaa(Ipv6Addr), + #[serde(rename = "SRV")] + Srv(Srv), +} + +impl Into for DnsRecord { + fn into(self) -> v2::config::DnsRecord { + match self { + DnsRecord::A(ip) => v2::config::DnsRecord::A(ip), + DnsRecord::Aaaa(ip) => v2::config::DnsRecord::Aaaa(ip), + DnsRecord::Srv(srv) => v2::config::DnsRecord::Srv(srv.into()), + } + } +} + +// The `From` and `From` implementations are very slightly +// dubious, because a v4 or v6 address could also theoretically map to a DNS +// PTR record +// (https://www.cloudflare.com/learning/dns/dns-records/dns-ptr-record/). +// However, we don't support PTR records at the moment, so this is fine. Would +// certainly be worth revisiting if we do in the future, though. + +impl From for DnsRecord { + fn from(ip: Ipv4Addr) -> Self { + DnsRecord::A(ip) + } +} + +impl From for DnsRecord { + fn from(ip: Ipv6Addr) -> Self { + DnsRecord::Aaaa(ip) + } +} + +impl From for DnsRecord { + fn from(srv: Srv) -> Self { + DnsRecord::Srv(srv) + } +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct Srv { + pub prio: u16, + pub weight: u16, + pub port: u16, + pub target: String, +} + +impl From for Srv { + fn from(other: v2::config::Srv) -> Self { + Srv { + prio: other.prio, + weight: other.weight, + port: other.port, + target: other.target, + } + } +} diff --git a/internal-dns/types/src/v1/mod.rs b/internal-dns/types/src/v1/mod.rs new file mode 100644 index 00000000000..2ff47775af3 --- /dev/null +++ b/internal-dns/types/src/v1/mod.rs @@ -0,0 +1,5 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +pub mod config; diff --git a/internal-dns/types/src/v2/config.rs b/internal-dns/types/src/v2/config.rs new file mode 100644 index 00000000000..a44c28d5f69 --- /dev/null +++ b/internal-dns/types/src/v2/config.rs @@ -0,0 +1,249 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::v1; +use anyhow::ensure; +use omicron_common::api::external::Generation; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::net::{Ipv4Addr, Ipv6Addr}; + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct DnsConfigParams { + pub generation: Generation, + pub time_created: chrono::DateTime, + pub zones: Vec, +} + +impl DnsConfigParams { + /// Given a high-level DNS configuration, return a reference to its sole + /// DNS zone. + /// + /// # Errors + /// + /// Returns an error if there are 0 or more than one zones in this + /// configuration. + pub fn sole_zone(&self) -> Result<&DnsConfigZone, anyhow::Error> { + ensure!( + self.zones.len() == 1, + "expected exactly one DNS zone, but found {}", + self.zones.len() + ); + Ok(&self.zones[0]) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +pub struct DnsConfig { + pub generation: Generation, + pub time_created: chrono::DateTime, + pub time_applied: chrono::DateTime, + pub zones: Vec, +} + +impl DnsConfig { + /// Perform a *lossy* conversion from the V2 [`DnsConfig`] to the V1 + /// [`v1::config::DnsConfig`]. In particular, V2 adds NS and SOA records, + /// which did not exist in V1, so they are silently discarded when + /// converting down. + /// + /// If this conversion would leave an empty zone, the zone is omitted + /// entirely. + pub fn as_v1(self) -> v1::config::DnsConfig { + let DnsConfig { generation, time_created, time_applied, zones } = self; + + v1::config::DnsConfig { + generation, + time_created, + time_applied, + zones: zones + .into_iter() + .filter_map(|zone| { + let converted_zone = zone.as_v1(); + if converted_zone.records.is_empty() { + None + } else { + Some(converted_zone) + } + }) + .collect(), + } + } +} + +/// Configuration for a specific DNS zone, as opposed to illumos zones in which +/// the services described by these records run. +/// +/// The name `@` is special: it describes records that should be provided for +/// queries about `zone_name`. This is used in favor of the empty string as `@` +/// is the name used for this purpose in zone files for most DNS configurations. +/// It also avoids potentially-confusing debug output from naively printing out +/// records and their names - if you've seen an `@` record and tools are unclear +/// about what that means, hopefully you've arrived here! +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct DnsConfigZone { + pub zone_name: String, + pub records: HashMap>, +} + +impl DnsConfigZone { + fn as_v1(self) -> v1::config::DnsConfigZone { + let DnsConfigZone { zone_name, records } = self; + + v1::config::DnsConfigZone { + zone_name, + records: records + .into_iter() + .filter_map(|(k, v)| { + let converted_records: Vec = + v.into_iter().filter_map(|rec| rec.as_v1()).collect(); + if converted_records.is_empty() { + None + } else { + Some((k, converted_records)) + } + }) + .collect(), + } + } +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[serde(tag = "type", content = "data")] +pub enum DnsRecord { + A(Ipv4Addr), + // The renames are because openapi-lint complains about `Aaaa` and `Srv` + // not being in screaming snake case. `Aaaa` and `Srv` are the idiomatic + // Rust casings, though. + #[serde(rename = "AAAA")] + Aaaa(Ipv6Addr), + #[serde(rename = "SRV")] + Srv(Srv), + #[serde(rename = "NS")] + Ns(String), + #[serde(rename = "SOA")] + Soa(Soa), +} + +impl DnsRecord { + fn as_v1(self) -> Option { + match self { + DnsRecord::A(ip) => Some(v1::config::DnsRecord::A(ip)), + DnsRecord::Aaaa(ip) => Some(v1::config::DnsRecord::Aaaa(ip)), + DnsRecord::Srv(srv) => Some(v1::config::DnsRecord::Srv(srv.into())), + DnsRecord::Ns(_) | DnsRecord::Soa(_) => { + // V1 DNS records do not have variants for NS or SOA records, so + // we're lossy here. + None + } + } + } +} + +// The `From` and `From` implementations are very slightly +// dubious, because a v4 or v6 address could also theoretically map to a DNS +// PTR record +// (https://www.cloudflare.com/learning/dns/dns-records/dns-ptr-record/). +// However, we don't support PTR records at the moment, so this is fine. Would +// certainly be worth revisiting if we do in the future, though. + +impl From for DnsRecord { + fn from(ip: Ipv4Addr) -> Self { + DnsRecord::A(ip) + } +} + +impl From for DnsRecord { + fn from(ip: Ipv6Addr) -> Self { + DnsRecord::Aaaa(ip) + } +} + +impl From for DnsRecord { + fn from(srv: Srv) -> Self { + DnsRecord::Srv(srv) + } +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct Srv { + pub prio: u16, + pub weight: u16, + pub port: u16, + pub target: String, +} + +impl From for Srv { + fn from(other: v1::config::Srv) -> Self { + Srv { + prio: other.prio, + weight: other.weight, + port: other.port, + target: other.target, + } + } +} + +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct Soa { + pub mname: String, + pub rname: String, + pub serial: u32, + pub refresh: i32, + pub retry: i32, + pub expire: i32, + pub minimum: u32, +} + +const HOUR_IN_SECONDS: i32 = 60 * 60; + +impl Soa { + /// Create a struct describing the internal fields of an SOA record. This uses defaults + /// presumed to be reasonable for fields that are not provided as arguments. + pub fn new(mname: String, serial: u32) -> Self { + Self { + mname, + rname: "admin".to_string(), + serial, + // We pick a relatively short REFRESH period because we don't + // support sending NOTIFY messages. We don't support zone transfers + // though, so this is a moot point for the time being. + refresh: HOUR_IN_SECONDS, + retry: HOUR_IN_SECONDS / 10, + expire: 24 * HOUR_IN_SECONDS, + minimum: 60, + } + } +} diff --git a/internal-dns/types/src/v2/mod.rs b/internal-dns/types/src/v2/mod.rs new file mode 100644 index 00000000000..2ff47775af3 --- /dev/null +++ b/internal-dns/types/src/v2/mod.rs @@ -0,0 +1,5 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +pub mod config; diff --git a/nexus/db-model/src/dns.rs b/nexus/db-model/src/dns.rs index b46dccbfb84..aaf6d63ea00 100644 --- a/nexus/db-model/src/dns.rs +++ b/nexus/db-model/src/dns.rs @@ -124,6 +124,8 @@ pub enum DnsRecord { A(Ipv4Addr), AAAA(Ipv6Addr), SRV(SRV), + NS(String), + SOA(SOA), } impl From for DnsRecord { @@ -132,6 +134,8 @@ impl From for DnsRecord { params::DnsRecord::A(addr) => DnsRecord::A(addr), params::DnsRecord::Aaaa(addr) => DnsRecord::AAAA(addr), params::DnsRecord::Srv(srv) => DnsRecord::SRV(SRV::from(srv)), + params::DnsRecord::Ns(ns) => DnsRecord::NS(ns), + params::DnsRecord::Soa(soa) => DnsRecord::SOA(SOA::from(soa)), } } } @@ -144,6 +148,10 @@ impl From for params::DnsRecord { DnsRecord::SRV(srv) => { params::DnsRecord::Srv(params::Srv::from(srv)) } + DnsRecord::NS(ns) => params::DnsRecord::Ns(ns), + DnsRecord::SOA(soa) => { + params::DnsRecord::Soa(params::Soa::from(soa)) + } } } } @@ -185,6 +193,52 @@ impl From for params::Srv { } } +/// This type is identical to `dns_service_client::SOA`. It's defined +/// separately for the same reason as SRV: this is serialized to JSON and stored +/// in the database. The same desire to avoid linking database state to the DNS +/// server API applies. +/// +/// BE CAREFUL MODIFYING THIS STRUCT. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename = "Srv")] +pub struct SOA { + pub mname: String, + pub rname: String, + pub serial: u32, + pub refresh: i32, + pub retry: i32, + pub expire: i32, + pub minimum: u32, +} + +impl From for SOA { + fn from(soa: params::Soa) -> Self { + SOA { + mname: soa.mname, + rname: soa.rname, + serial: soa.serial, + refresh: soa.refresh, + retry: soa.retry, + expire: soa.expire, + minimum: soa.minimum, + } + } +} + +impl From for params::Soa { + fn from(soa: SOA) -> Self { + params::Soa { + mname: soa.mname, + rname: soa.rname, + serial: soa.serial, + refresh: soa.refresh, + retry: soa.retry, + expire: soa.expire, + minimum: soa.minimum, + } + } +} + /// Describes the initial configuration for a DNS group /// /// Provides helpers for constructing the database rows to describe that initial diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index ad93ee5a1bf..970aa28e837 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1018,8 +1018,12 @@ mod test { async fn test_blueprint_external_dns_basic() { static TEST_NAME: &str = "test_blueprint_external_dns_basic"; let logctx = test_setup_log(TEST_NAME); - let (_, mut blueprint) = - ExampleSystemBuilder::new(&logctx.log, TEST_NAME).nsleds(5).build(); + let system_builder = ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nsleds(5) + .external_dns_count(3) + .expect("can set external dns count"); + let external_dns_count = system_builder.get_external_dns_zones(); + let (_, mut blueprint) = system_builder.build(); blueprint.internal_dns_version = Generation::new(); blueprint.external_dns_version = Generation::new(); @@ -1045,7 +1049,33 @@ mod test { String::from("oxide.test"), ); assert_eq!(external_dns_zone.zone_name, "oxide.test"); - assert!(external_dns_zone.records.is_empty()); + // We'll only have external DNS nameserver records - the A/AAAA records + // for servers themselves, and NS records at the apex. + let baseline_external_dns_names = external_dns_count + 1; + assert_eq!( + external_dns_zone.records.len(), + baseline_external_dns_names + ); + + use internal_dns_types::names::ZONE_APEX_NAME; + let apex_records = external_dns_zone + .records + .get(ZONE_APEX_NAME) + .expect("records are present for zone apex"); + assert_eq!(apex_records.len(), external_dns_count); + for i in 0..external_dns_count { + // The nameserver records have 1-indexed numbering, but we iterate + // from 0. Add one to line up expectations for the test. + let ns_name = format!("ns{}", i + 1); + assert!(external_dns_zone.records.contains_key(&ns_name)); + assert_eq!( + apex_records[i], + DnsRecord::Ns(format!( + "{ns_name}.{}", + external_dns_zone.zone_name + )) + ); + } // Now check a more typical case. let external_dns_zone = blueprint_external_dns_config( @@ -1055,19 +1085,30 @@ mod test { ); assert_eq!(external_dns_zone.zone_name, String::from("oxide.test")); let records = &external_dns_zone.records; - assert_eq!(records.len(), 1); + // One name for the silo, three for the nameservers, and one more for + // the zone apex. + let expected_dns_names = 1 + baseline_external_dns_names; + assert_eq!(records.len(), expected_dns_names); let silo_records = records .get(&silo_dns_name(my_silo.name())) .expect("missing silo DNS records"); - // Helper for converting dns records for a given silo to IpAddrs + // Helper for converting dns records for a given silo to IpAddrs. Below + // we'll check about the Nexuses represented in a silo's DNS records. + // These currently are the *only* records that can be present for a + // silo's name. If that changes in the future, this section of the test + // probably needs to be reworked. let records_to_ips = |silo_records: &Vec<_>| { let mut ips: Vec<_> = silo_records .into_iter() .map(|record| match record { DnsRecord::A(v) => IpAddr::V4(*v), DnsRecord::Aaaa(v) => IpAddr::V6(*v), - DnsRecord::Srv(_) => panic!("unexpected SRV record"), + other @ DnsRecord::Srv(_) | + other @ DnsRecord::Ns(_) | + other @ DnsRecord::Soa(_) => { + panic!("unexpected DNS record for silo: {other:?}") + } }) .collect(); ips.sort(); @@ -1679,10 +1720,18 @@ mod test { let (new_name, new_records) = added[0]; assert_eq!(new_name, silo_dns_name(&silo.identity.name)); // And it should have the same IP addresses as all of the other Silos. - assert_eq!( - new_records, - old_external.zones[0].records.values().next().unwrap() - ); + for (prior_silo_name, prior_silo_records) in + old_external.zones[0].records.iter() + { + // Only some records in the external zone are for Silos, though. + if prior_silo_name.ends_with(".sys") { + assert_eq!( + new_records, prior_silo_records, + "new silo ({new_name}) DNS records differ from \ + another silo ({prior_silo_name})" + ); + } + } // If we execute the blueprint, DNS should not be changed. _ = realize_blueprint_and_expect( diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 7a031b8c81c..04a5e2123ea 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -330,6 +330,14 @@ impl ExampleSystemBuilder { self.nexus_count.unwrap_or(ZoneCount(self.nsleds)) } + pub fn get_internal_dns_zones(&self) -> usize { + self.internal_dns_count.0 + } + + pub fn get_external_dns_zones(&self) -> usize { + self.external_dns_count.0 + } + /// Create a new example system with the given modifications. /// /// Return the system, and the initial blueprint that matches it. @@ -525,7 +533,7 @@ impl ExampleSystemBuilder { // A little wrapper to try and avoid having an `on` function which takes 3 // usize parameters. #[derive(Clone, Copy, Debug)] -struct ZoneCount(usize); +struct ZoneCount(pub usize); impl ZoneCount { fn on(self, sled_id: usize, total_sleds: usize) -> usize { diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 3db5e46f967..1c2a5fa9b5c 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -206,20 +206,38 @@ impl super::Nexus { ); let silo_name = &request.recovery_silo.silo_name; - let dns_records = request + // Records that should be present at the rack-internal zone apex - + // `oxide.internal`. + let mut int_zone_records = Vec::new(); + // Internal DNS serves the `control-plane.oxide.internal` zone, where + // internal records for rack-internal services are served. The + // name servers themselves will be given + // `ns$N.control-plane.oxide.internal` name, with NS and A records. + // + // Again, the choice of which server is which `ns$N` is arbitrary. + let mut internal_dns_records = Vec::new(); + + for (_, zc) in request .blueprint .all_omicron_zones(BlueprintZoneDisposition::is_in_service) - .filter_map(|(_, zc)| match zc.zone_type { - BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { - external_ip, - .. - }) => Some(match external_ip.ip { - IpAddr::V4(addr) => DnsRecord::A(addr), - IpAddr::V6(addr) => DnsRecord::Aaaa(addr), - }), - _ => None, - }) - .collect(); + { + match zc.zone_type { + BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { dns_address, .. }, + ) => { + internal_dns_records + .push(DnsRecord::Aaaa(*dns_address.ip())); + let seen_intdns = internal_dns_records.len(); + int_zone_records.push(DnsRecord::Ns(format!( + "ns{}.{}", + seen_intdns, + internal_dns_types::names::DNS_ZONE + ))); + } + _ => {} + } + } + let mut dns_update = DnsVersionUpdateBuilder::new( DnsGroup::External, format!("create silo: {:?}", silo_name.as_str()), @@ -228,7 +246,20 @@ impl super::Nexus { let silo_dns_name = silo_dns_name(silo_name); let recovery_silo_fq_dns_name = format!("{silo_dns_name}.{}", request.external_dns_zone_name); - dns_update.add_name(silo_dns_name, dns_records)?; + + // sled-agent, in service of RSS, has configured internal DNS. We got + // its DNS configuration in `request.internal_dns_zone_config` and are + // appending to it before committing the initial RSS state to the + // database + let external_dns_config = + nexus_types::deployment::execution::blueprint_external_dns_config( + &request.blueprint, + vec![silo_name], + request.external_dns_zone_name, + ); + for (name, records) in external_dns_config.records.into_iter() { + dns_update.add_name(name, records)?; + } // We're providing an update to the initial `external_dns` group we // defined above; also bump RSS's blueprint's `external_dns_version` to diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 769f74a46f7..a7f65766781 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -172,7 +172,7 @@ pub struct Blueprint { // See blueprint execution for more on this. pub internal_dns_version: Generation, - /// external DNS version when thi blueprint was created + /// external DNS version when this blueprint was created // See blueprint execution for more on this. pub external_dns_version: Generation, diff --git a/nexus/types/src/deployment/execution/dns.rs b/nexus/types/src/deployment/execution/dns.rs index cdf91bb4afb..301514347ab 100644 --- a/nexus/types/src/deployment/execution/dns.rs +++ b/nexus/types/src/deployment/execution/dns.rs @@ -7,7 +7,10 @@ use std::{ net::IpAddr, }; -use internal_dns_types::{config::DnsConfigBuilder, names::ServiceName}; +use internal_dns_types::{ + config::DnsConfigBuilder, + names::{ServiceName, ZONE_APEX_NAME}, +}; use omicron_common::api::external::Name; use omicron_uuid_kinds::SledUuid; @@ -20,7 +23,10 @@ use crate::{ silo::{default_silo_name, silo_dns_name}, }; -use super::{Overridables, Sled, blueprint_nexus_external_ips}; +use super::{ + Overridables, Sled, blueprint_external_dns_resolver_ips, + blueprint_nexus_external_ips, +}; /// Returns the expected contents of internal DNS based on the given blueprint pub fn blueprint_internal_dns_config( @@ -164,8 +170,9 @@ pub fn blueprint_external_dns_config<'a>( external_dns_zone_name: String, ) -> DnsConfigZone { let nexus_external_ips = blueprint_nexus_external_ips(blueprint); + let dns_external_ips = blueprint_external_dns_resolver_ips(blueprint); - let dns_records: Vec = nexus_external_ips + let nexus_dns_records: Vec = nexus_external_ips .into_iter() .map(|addr| match addr { IpAddr::V4(addr) => DnsRecord::A(addr), @@ -173,7 +180,26 @@ pub fn blueprint_external_dns_config<'a>( }) .collect(); - let records = silos + let mut zone_records: Vec = Vec::new(); + let external_dns_records: Vec<(String, Vec)> = dns_external_ips + .into_iter() + .enumerate() + .map(|(idx, dns_ip)| { + let record = match dns_ip { + IpAddr::V4(addr) => DnsRecord::A(addr), + IpAddr::V6(addr) => DnsRecord::Aaaa(addr), + }; + // `idx` is 0-based, but nameservers start at `ns1` (1-based). + let name = format!("ns{}", idx + 1); + zone_records.push(DnsRecord::Ns(format!( + "{}.{}", + &name, external_dns_zone_name + ))); + (name, vec![record]) + }) + .collect(); + + let mut records = silos .into_iter() // We do not generate a DNS name for the "default" Silo. // @@ -185,10 +211,17 @@ pub fn blueprint_external_dns_config<'a>( // abstraction, such as it is, would be leakier). .filter_map(|silo_name| { (silo_name != default_silo_name()) - .then(|| (silo_dns_name(&silo_name), dns_records.clone())) + .then(|| (silo_dns_name(&silo_name), nexus_dns_records.clone())) }) + .chain(external_dns_records) .collect::>>(); + if !zone_records.is_empty() { + let prior_records = + records.insert(ZONE_APEX_NAME.to_string(), zone_records); + assert!(prior_records.is_none()); + } + DnsConfigZone { zone_name: external_dns_zone_name, records: records.clone(), diff --git a/nexus/types/src/deployment/execution/utils.rs b/nexus/types/src/deployment/execution/utils.rs index 22483448a81..fb8845465bf 100644 --- a/nexus/types/src/deployment/execution/utils.rs +++ b/nexus/types/src/deployment/execution/utils.rs @@ -85,3 +85,19 @@ pub fn blueprint_nexus_external_ips(blueprint: &Blueprint) -> Vec { }) .collect() } + +/// Return the addresses on which this blueprint's external DNS servers listen +/// for DNS queries. +pub fn blueprint_external_dns_resolver_ips( + blueprint: &Blueprint, +) -> Vec { + blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(_, z)| match z.zone_type { + BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { dns_address, .. }, + ) => Some(dns_address.addr.ip()), + _ => None, + }) + .collect() +} diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index a1a707d12a9..643a8fa2bff 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -195,6 +195,7 @@ pub type DnsConfigParams = internal_dns_types::config::DnsConfigParams; pub type DnsConfigZone = internal_dns_types::config::DnsConfigZone; pub type DnsRecord = internal_dns_types::config::DnsRecord; pub type Srv = internal_dns_types::config::Srv; +pub type Soa = internal_dns_types::config::Soa; /// Message used to notify Nexus that this oximeter instance is up and running. #[derive(Debug, Clone, Copy, JsonSchema, Serialize, Deserialize)] diff --git a/openapi/dns-server/dns-server-2.0.0-a6562b.json b/openapi/dns-server/dns-server-2.0.0-a6562b.json new file mode 100644 index 00000000000..fef9c7ad3f1 --- /dev/null +++ b/openapi/dns-server/dns-server-2.0.0-a6562b.json @@ -0,0 +1,342 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Internal DNS", + "description": "API for the internal DNS server", + "contact": { + "url": "https://oxide.computer", + "email": "api@oxide.computer" + }, + "version": "2.0.0" + }, + "paths": { + "/config": { + "get": { + "operationId": "dns_config_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DnsConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "operationId": "dns_config_put", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DnsConfigParams" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "DnsConfig": { + "type": "object", + "properties": { + "generation": { + "$ref": "#/components/schemas/Generation" + }, + "time_applied": { + "type": "string", + "format": "date-time" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsConfigZone" + } + } + }, + "required": [ + "generation", + "time_applied", + "time_created", + "zones" + ] + }, + "DnsConfigParams": { + "type": "object", + "properties": { + "generation": { + "$ref": "#/components/schemas/Generation" + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsConfigZone" + } + } + }, + "required": [ + "generation", + "time_created", + "zones" + ] + }, + "DnsConfigZone": { + "description": "Configuration for a specific DNS zone, as opposed to illumos zones in which the services described by these records run.\n\nThe name `@` is special: it describes records that should be provided for queries about `zone_name`. This is used in favor of the empty string as `@` is the name used for this purpose in zone files for most DNS configurations. It also avoids potentially-confusing debug output from naively printing out records and their names - if you've seen an `@` record and tools are unclear about what that means, hopefully you've arrived here!", + "type": "object", + "properties": { + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsRecord" + } + } + }, + "zone_name": { + "type": "string" + } + }, + "required": [ + "records", + "zone_name" + ] + }, + "DnsRecord": { + "oneOf": [ + { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "ipv4" + }, + "type": { + "type": "string", + "enum": [ + "A" + ] + } + }, + "required": [ + "data", + "type" + ] + }, + { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "ipv6" + }, + "type": { + "type": "string", + "enum": [ + "AAAA" + ] + } + }, + "required": [ + "data", + "type" + ] + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Srv" + }, + "type": { + "type": "string", + "enum": [ + "SRV" + ] + } + }, + "required": [ + "data", + "type" + ] + }, + { + "type": "object", + "properties": { + "data": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "NS" + ] + } + }, + "required": [ + "data", + "type" + ] + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Soa" + }, + "type": { + "type": "string", + "enum": [ + "SOA" + ] + } + }, + "required": [ + "data", + "type" + ] + } + ] + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Soa": { + "type": "object", + "properties": { + "expire": { + "type": "integer", + "format": "int32" + }, + "minimum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "mname": { + "type": "string" + }, + "refresh": { + "type": "integer", + "format": "int32" + }, + "retry": { + "type": "integer", + "format": "int32" + }, + "rname": { + "type": "string" + }, + "serial": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "expire", + "minimum", + "mname", + "refresh", + "retry", + "rname", + "serial" + ] + }, + "Srv": { + "type": "object", + "properties": { + "port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "prio": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "target": { + "type": "string" + }, + "weight": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "port", + "prio", + "target", + "weight" + ] + } + }, + "responses": { + "Error": { + "description": "Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} diff --git a/openapi/dns-server/dns-server-latest.json b/openapi/dns-server/dns-server-latest.json index d4ceb55e446..a86a0f513a4 120000 --- a/openapi/dns-server/dns-server-latest.json +++ b/openapi/dns-server/dns-server-latest.json @@ -1 +1 @@ -dns-server-1.0.0-49359e.json \ No newline at end of file +dns-server-2.0.0-a6562b.json \ No newline at end of file diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 7818a3787dd..9e9b1c9eef1 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2006,7 +2006,7 @@ "type": "string" }, "external_dns_version": { - "description": "external DNS version when thi blueprint was created", + "description": "external DNS version when this blueprint was created", "allOf": [ { "$ref": "#/components/schemas/Generation" @@ -3733,6 +3733,7 @@ ] }, "DnsConfigZone": { + "description": "Configuration for a specific DNS zone, as opposed to illumos zones in which the services described by these records run.\n\nThe name `@` is special: it describes records that should be provided for queries about `zone_name`. This is used in favor of the empty string as `@` is the name used for this purpose in zone files for most DNS configurations. It also avoids potentially-confusing debug output from naively printing out records and their names - if you've seen an `@` record and tools are unclear about what that means, hopefully you've arrived here!", "type": "object", "properties": { "records": { @@ -3810,6 +3811,42 @@ "data", "type" ] + }, + { + "type": "object", + "properties": { + "data": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "NS" + ] + } + }, + "required": [ + "data", + "type" + ] + }, + { + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Soa" + }, + "type": { + "type": "string", + "enum": [ + "SOA" + ] + } + }, + "required": [ + "data", + "type" + ] } ] }, @@ -6210,6 +6247,48 @@ "vmm_state" ] }, + "Soa": { + "type": "object", + "properties": { + "expire": { + "type": "integer", + "format": "int32" + }, + "minimum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "mname": { + "type": "string" + }, + "refresh": { + "type": "integer", + "format": "int32" + }, + "retry": { + "type": "integer", + "format": "int32" + }, + "rname": { + "type": "string" + }, + "serial": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "expire", + "minimum", + "mname", + "refresh", + "retry", + "rname", + "serial" + ] + }, "SourceNatConfig": { "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", "type": "object", @@ -6239,7 +6318,7 @@ ] }, "SpType": { - "description": "SpType\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", + "description": "`SpType`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", "type": "string", "enum": [ "sled", diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 0e74d18b333..cba018bcd59 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -3582,7 +3582,7 @@ ] }, "RotImageError": { - "description": "RotImageError\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"unchecked\", \"first_page_erased\", \"partially_programmed\", \"invalid_length\", \"header_not_programmed\", \"bootloader_too_small\", \"bad_magic\", \"header_image_size\", \"unaligned_length\", \"unsupported_type\", \"reset_vector_not_thumb2\", \"reset_vector\", \"signature\" ] } ```
", + "description": "`RotImageError`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"unchecked\", \"first_page_erased\", \"partially_programmed\", \"invalid_length\", \"header_not_programmed\", \"bootloader_too_small\", \"bad_magic\", \"header_image_size\", \"unaligned_length\", \"unsupported_type\", \"reset_vector_not_thumb2\", \"reset_vector\", \"signature\" ] } ```
", "type": "string", "enum": [ "unchecked", @@ -3645,7 +3645,7 @@ ] }, "RotSlot": { - "description": "RotSlot\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"slot\" ], \"properties\": { \"slot\": { \"type\": \"string\", \"enum\": [ \"a\" ] } } }, { \"type\": \"object\", \"required\": [ \"slot\" ], \"properties\": { \"slot\": { \"type\": \"string\", \"enum\": [ \"b\" ] } } } ] } ```
", + "description": "`RotSlot`\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"slot\" ], \"properties\": { \"slot\": { \"type\": \"string\", \"enum\": [ \"a\" ] } } }, { \"type\": \"object\", \"required\": [ \"slot\" ], \"properties\": { \"slot\": { \"type\": \"string\", \"enum\": [ \"b\" ] } } } ] } ```
", "oneOf": [ { "type": "object", @@ -3678,7 +3678,7 @@ ] }, "RotState": { - "description": "RotState\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"active\", \"persistent_boot_preference\", \"state\" ], \"properties\": { \"active\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"pending_persistent_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] }, \"persistent_boot_preference\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"slot_a_sha3_256_digest\": { \"type\": [ \"string\", \"null\" ] }, \"slot_b_sha3_256_digest\": { \"type\": [ \"string\", \"null\" ] }, \"state\": { \"type\": \"string\", \"enum\": [ \"v2\" ] }, \"transient_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] } } }, { \"type\": \"object\", \"required\": [ \"message\", \"state\" ], \"properties\": { \"message\": { \"type\": \"string\" }, \"state\": { \"type\": \"string\", \"enum\": [ \"communication_failed\" ] } } }, { \"type\": \"object\", \"required\": [ \"active\", \"persistent_boot_preference\", \"slot_a_fwid\", \"slot_b_fwid\", \"stage0_fwid\", \"stage0next_fwid\", \"state\" ], \"properties\": { \"active\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"pending_persistent_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] }, \"persistent_boot_preference\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"slot_a_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"slot_a_fwid\": { \"type\": \"string\" }, \"slot_b_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"slot_b_fwid\": { \"type\": \"string\" }, \"stage0_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"stage0_fwid\": { \"type\": \"string\" }, \"stage0next_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"stage0next_fwid\": { \"type\": \"string\" }, \"state\": { \"type\": \"string\", \"enum\": [ \"v3\" ] }, \"transient_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] } } } ] } ```
", + "description": "`RotState`\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"active\", \"persistent_boot_preference\", \"state\" ], \"properties\": { \"active\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"pending_persistent_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] }, \"persistent_boot_preference\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"slot_a_sha3_256_digest\": { \"type\": [ \"string\", \"null\" ] }, \"slot_b_sha3_256_digest\": { \"type\": [ \"string\", \"null\" ] }, \"state\": { \"type\": \"string\", \"enum\": [ \"v2\" ] }, \"transient_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] } } }, { \"type\": \"object\", \"required\": [ \"message\", \"state\" ], \"properties\": { \"message\": { \"type\": \"string\" }, \"state\": { \"type\": \"string\", \"enum\": [ \"communication_failed\" ] } } }, { \"type\": \"object\", \"required\": [ \"active\", \"persistent_boot_preference\", \"slot_a_fwid\", \"slot_b_fwid\", \"stage0_fwid\", \"stage0next_fwid\", \"state\" ], \"properties\": { \"active\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"pending_persistent_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] }, \"persistent_boot_preference\": { \"$ref\": \"#/components/schemas/RotSlot\" }, \"slot_a_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"slot_a_fwid\": { \"type\": \"string\" }, \"slot_b_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"slot_b_fwid\": { \"type\": \"string\" }, \"stage0_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"stage0_fwid\": { \"type\": \"string\" }, \"stage0next_error\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotImageError\" } ] } ] }, \"stage0next_fwid\": { \"type\": \"string\" }, \"state\": { \"type\": \"string\", \"enum\": [ \"v3\" ] }, \"transient_boot_preference\": { \"oneOf\": [ { \"type\": \"null\" }, { \"allOf\": [ { \"$ref\": \"#/components/schemas/RotSlot\" } ] } ] } } } ] } ```
", "oneOf": [ { "type": "object", @@ -4207,7 +4207,7 @@ ] }, "SpComponentCaboose": { - "description": "SpComponentCaboose\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"board\", \"git_commit\", \"name\", \"version\" ], \"properties\": { \"board\": { \"type\": \"string\" }, \"epoch\": { \"type\": [ \"string\", \"null\" ] }, \"git_commit\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"sign\": { \"type\": [ \"string\", \"null\" ] }, \"version\": { \"type\": \"string\" } } } ```
", + "description": "`SpComponentCaboose`\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"board\", \"git_commit\", \"name\", \"version\" ], \"properties\": { \"board\": { \"type\": \"string\" }, \"epoch\": { \"type\": [ \"string\", \"null\" ] }, \"git_commit\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"sign\": { \"type\": [ \"string\", \"null\" ] }, \"version\": { \"type\": \"string\" } } } ```
", "type": "object", "properties": { "board": { @@ -4330,7 +4330,7 @@ ] }, "SpIdentifier": { - "description": "SpIdentifier\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"slot\", \"type\" ], \"properties\": { \"slot\": { \"type\": \"integer\", \"format\": \"uint32\", \"minimum\": 0.0 }, \"type\": { \"$ref\": \"#/components/schemas/SpType\" } } } ```
", + "description": "`SpIdentifier`\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"slot\", \"type\" ], \"properties\": { \"slot\": { \"type\": \"integer\", \"format\": \"uint32\", \"minimum\": 0.0 }, \"type\": { \"$ref\": \"#/components/schemas/SpType\" } } } ```
", "type": "object", "properties": { "slot": { @@ -4539,7 +4539,7 @@ ] }, "SpState": { - "description": "SpState\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"base_mac_address\", \"hubris_archive_id\", \"model\", \"power_state\", \"revision\", \"rot\", \"serial_number\" ], \"properties\": { \"base_mac_address\": { \"type\": \"array\", \"items\": { \"type\": \"integer\", \"format\": \"uint8\", \"minimum\": 0.0 }, \"maxItems\": 6, \"minItems\": 6 }, \"hubris_archive_id\": { \"type\": \"string\" }, \"model\": { \"type\": \"string\" }, \"power_state\": { \"$ref\": \"#/components/schemas/PowerState\" }, \"revision\": { \"type\": \"integer\", \"format\": \"uint32\", \"minimum\": 0.0 }, \"rot\": { \"$ref\": \"#/components/schemas/RotState\" }, \"serial_number\": { \"type\": \"string\" } } } ```
", + "description": "`SpState`\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"base_mac_address\", \"hubris_archive_id\", \"model\", \"power_state\", \"revision\", \"rot\", \"serial_number\" ], \"properties\": { \"base_mac_address\": { \"type\": \"array\", \"items\": { \"type\": \"integer\", \"format\": \"uint8\", \"minimum\": 0.0 }, \"maxItems\": 6, \"minItems\": 6 }, \"hubris_archive_id\": { \"type\": \"string\" }, \"model\": { \"type\": \"string\" }, \"power_state\": { \"$ref\": \"#/components/schemas/PowerState\" }, \"revision\": { \"type\": \"integer\", \"format\": \"uint32\", \"minimum\": 0.0 }, \"rot\": { \"$ref\": \"#/components/schemas/RotState\" }, \"serial_number\": { \"type\": \"string\" } } } ```
", "type": "object", "properties": { "base_mac_address": { @@ -4584,7 +4584,7 @@ ] }, "SpType": { - "description": "SpType\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", + "description": "`SpType`\n\n
JSON schema\n\n```json { \"type\": \"string\", \"enum\": [ \"sled\", \"power\", \"switch\" ] } ```
", "type": "string", "enum": [ "sled",