diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 6b06873600a..cb1fc0c74d9 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -23,6 +23,7 @@ use nexus_reconfigurator_planning::system::{SledBuilder, SystemDescription}; use nexus_reconfigurator_simulation::SimStateBuilder; use nexus_reconfigurator_simulation::Simulator; use nexus_reconfigurator_simulation::{BlueprintId, SimState}; +use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::deployment::execution; @@ -458,9 +459,26 @@ enum BlueprintEditCommands { AddNexus { /// sled on which to deploy the new instance sled_id: SledOpt, + /// image source for the new zone + /// + /// The image source is required if the planning input of the system + /// being edited has a TUF repo; otherwise, it will default to the + /// install dataset. + #[clap(subcommand)] + image_source: Option, }, /// add a CockroachDB instance to a particular sled - AddCockroach { sled_id: SledOpt }, + AddCockroach { + /// sled on which to deploy the new instance + sled_id: SledOpt, + /// image source for the new zone + /// + /// The image source is required if the planning input of the system + /// being edited has a TUF repo; otherwise, it will default to the + /// install dataset. + #[clap(subcommand)] + image_source: Option, + }, /// set the image source for a zone SetZoneImage { /// id of zone whose image to set @@ -714,6 +732,60 @@ enum ImageSourceArgs { }, } +/// Adding a new zone to a blueprint needs to choose an image source for that +/// zone. Subcommands that add a zone take an optional [`ImageSourceArgs`] +/// parameter. In the (common in test) case where the planning input has no TUF +/// repo at all, the new and old TUF repo policy are identical (i.e., "use the +/// install dataset"), and therefore we have only one logical choice for the +/// image source for any new zone (the install dataset). If a TUF repo _is_ +/// involved, we have two choices: use the artifact from the newest TUF repo, or +/// use the artifact from the previous TUF repo policy (which might itself be +/// another TUF repo, or might be the install dataset). +fn image_source_unwrap_or( + image_source: Option, + planning_input: &PlanningInput, + zone_kind: ZoneKind, +) -> anyhow::Result { + if let Some(image_source) = image_source { + Ok(image_source.into()) + } else if planning_input.tuf_repo() == planning_input.old_repo() { + planning_input + .tuf_repo() + .description() + .zone_image_source(zone_kind) + .context("could not determine image source") + } else { + let mut options = vec!["`install-dataset`".to_string()]; + for (name, repo) in [ + ("previous", planning_input.old_repo()), + ("current", planning_input.tuf_repo()), + ] { + match repo.description().zone_image_source(zone_kind) { + // Install dataset is already covered, and if either TUF repo is + // missing an artifact of this kind, it's not an option. + Ok(BlueprintZoneImageSource::InstallDataset) | Err(_) => (), + Ok(BlueprintZoneImageSource::Artifact { version, hash }) => { + let version = match version { + BlueprintZoneImageVersion::Available { version } => { + version.to_string() + } + BlueprintZoneImageVersion::Unknown => { + "unknown".to_string() + } + }; + options.push(format!( + "`artifact {version} {hash}` (from {name} TUF repo)" + )); + } + } + } + bail!( + "must specify image source for new zone; options: {}", + options.join(", ") + ) + } +} + impl From for BlueprintZoneImageSource { fn from(value: ImageSourceArgs) -> Self { match value { @@ -1327,17 +1399,27 @@ fn cmd_blueprint_edit( } let label = match args.edit_command { - BlueprintEditCommands::AddNexus { sled_id } => { + BlueprintEditCommands::AddNexus { sled_id, image_source } => { let sled_id = sled_id.to_sled_id(system.description())?; + let image_source = image_source_unwrap_or( + image_source, + &planning_input, + ZoneKind::Nexus, + )?; builder - .sled_add_zone_nexus(sled_id) + .sled_add_zone_nexus(sled_id, image_source) .context("failed to add Nexus zone")?; format!("added Nexus zone to sled {}", sled_id) } - BlueprintEditCommands::AddCockroach { sled_id } => { + BlueprintEditCommands::AddCockroach { sled_id, image_source } => { let sled_id = sled_id.to_sled_id(system.description())?; + let image_source = image_source_unwrap_or( + image_source, + &planning_input, + ZoneKind::CockroachDb, + )?; builder - .sled_add_zone_cockroachdb(sled_id) + .sled_add_zone_cockroachdb(sled_id, image_source) .context("failed to add CockroachDB zone")?; format!("added CockroachDB zone to sled {}", sled_id) } diff --git a/live-tests/tests/test_nexus_add_remove.rs b/live-tests/tests/test_nexus_add_remove.rs index 8630d94659b..5fe9cc58530 100644 --- a/live-tests/tests/test_nexus_add_remove.rs +++ b/live-tests/tests/test_nexus_add_remove.rs @@ -18,6 +18,7 @@ use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_preparation::PlanningInputFromDb; use nexus_sled_agent_shared::inventory::ZoneKind; +use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::SledFilter; use omicron_common::address::NEXUS_INTERNAL_PORT; use omicron_test_utils::dev::poll::CondCheckError; @@ -54,19 +55,49 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { let nexus = initial_nexus_clients.first().expect("internal Nexus client"); // First, deploy a new Nexus zone to an arbitrary sled. - let sled_id = planning_input + let commissioned_sled_ids = planning_input .all_sled_ids(SledFilter::Commissioned) - .next() - .expect("any sled id"); + .collect::>(); + let sled_id = *commissioned_sled_ids.first().expect("any sled id"); let (blueprint1, blueprint2) = blueprint_edit_current_target( log, &planning_input, &collection, &nexus, &|builder: &mut BlueprintBuilder| { + // We have to tell the builder what image source to use for the new + // Nexus zone. If we were the planner, we'd check whether we have a + // TUF repo (or two) then decide whether to use the image from one + // of those or the install dataset. Instead of duplicating all of + // that logic, we'll just find an existing Nexus zone and copy its + // image source. This should always be right in this context; it + // would only be wrong if there are existing Nexus zones with + // different image sources, which would only be true in the middle + // of an update. + let image_source = commissioned_sled_ids + .iter() + .find_map(|&sled_id| { + builder + .current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) + .find_map(|zone| { + if zone.zone_type.is_nexus() { + Some(zone.image_source.clone()) + } else { + None + } + }) + }) + .context( + "could not find in-service Nexus in parent blueprint", + )?; + builder - .sled_add_zone_nexus(sled_id) + .sled_add_zone_nexus(sled_id, image_source) .context("adding Nexus zone")?; + Ok(()) }, ) diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 9396f3f5462..9d82e159624 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -2717,13 +2717,22 @@ mod tests { // Add zones to our new sled. assert_eq!( - builder.sled_ensure_zone_ntp(new_sled_id).unwrap(), + builder + .sled_ensure_zone_ntp( + new_sled_id, + BlueprintZoneImageSource::InstallDataset + ) + .unwrap(), Ensure::Added ); for zpool_id in new_sled_zpools.keys() { assert_eq!( builder - .sled_ensure_zone_crucible(new_sled_id, *zpool_id) + .sled_ensure_zone_crucible( + new_sled_id, + *zpool_id, + BlueprintZoneImageSource::InstallDataset + ) .unwrap(), Ensure::Added ); diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 71858aaf47f..6e3a1bef151 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -2973,6 +2973,7 @@ mod tests { use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::external_api::params; use nexus_types::identity::Asset; use omicron_common::api::external; @@ -3328,7 +3329,12 @@ mod tests { .expect("ensured disks"); } builder - .sled_add_zone_nexus_with_config(sled_ids[2], false, Vec::new()) + .sled_add_zone_nexus_with_config( + sled_ids[2], + false, + Vec::new(), + BlueprintZoneImageSource::InstallDataset, + ) .expect("added nexus to third sled"); builder.build() }; @@ -3397,7 +3403,12 @@ mod tests { .expect("created blueprint builder"); for &sled_id in &sled_ids { builder - .sled_add_zone_nexus_with_config(sled_id, false, Vec::new()) + .sled_add_zone_nexus_with_config( + sled_id, + false, + Vec::new(), + BlueprintZoneImageSource::InstallDataset, + ) .expect("added nexus to third sled"); } builder.build() diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index b20b7ba2f71..ff4a3e4b236 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1523,7 +1523,12 @@ mod test { .unwrap(); let sled_id = blueprint.sleds().next().expect("expected at least one sled"); - builder.sled_add_zone_nexus(sled_id).unwrap(); + builder + .sled_add_zone_nexus( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap(); let blueprint2 = builder.build(); eprintln!("blueprint2: {}", blueprint2.display()); // Figure out the id of the new zone. diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 7a12308dc01..8e928346376 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -45,7 +45,6 @@ use nexus_types::deployment::PendingMgsUpdates; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::deployment::SledResources; -use nexus_types::deployment::TargetReleaseDescription; use nexus_types::deployment::TufRepoContentsError; use nexus_types::deployment::ZpoolFilter; use nexus_types::deployment::ZpoolName; @@ -1166,6 +1165,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_internal_dns( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let gz_address_index = self.next_internal_dns_gz_address_index(sled_id); let sled_subnet = self.sled_resources(sled_id)?.subnet; @@ -1182,7 +1182,6 @@ impl<'a> BlueprintBuilder<'a> { gz_address: dns_subnet.gz_address(), gz_address_index, }); - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1198,6 +1197,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_external_dns( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let id = self.rng.sled_rng(sled_id).next_zone(); let ExternalNetworkingChoice { @@ -1235,7 +1235,6 @@ impl<'a> BlueprintBuilder<'a> { dns_address, nic, }); - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1250,6 +1249,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_ensure_zone_ntp( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result { // If there's already an NTP zone on this sled, do nothing. let has_ntp = { @@ -1275,7 +1275,6 @@ impl<'a> BlueprintBuilder<'a> { }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1293,6 +1292,7 @@ impl<'a> BlueprintBuilder<'a> { &mut self, sled_id: SledUuid, zpool_id: ZpoolUuid, + image_source: BlueprintZoneImageSource, ) -> Result { let pool_name = ZpoolName::new_external(zpool_id); @@ -1343,7 +1343,7 @@ impl<'a> BlueprintBuilder<'a> { id: self.rng.sled_rng(sled_id).next_zone(), filesystem_pool, zone_type, - image_source: BlueprintZoneImageSource::InstallDataset, + image_source, }; self.sled_add_zone(sled_id, zone)?; @@ -1353,6 +1353,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_nexus( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { // Whether Nexus should use TLS and what the external DNS servers it // should use are currently provided at rack-setup time, and should be @@ -1380,6 +1381,7 @@ impl<'a> BlueprintBuilder<'a> { sled_id, external_tls, external_dns_servers, + image_source, ) } @@ -1388,6 +1390,7 @@ impl<'a> BlueprintBuilder<'a> { sled_id: SledUuid, external_tls: bool, external_dns_servers: Vec, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let nexus_id = self.rng.sled_rng(sled_id).next_zone(); let ExternalNetworkingChoice { @@ -1428,7 +1431,6 @@ impl<'a> BlueprintBuilder<'a> { }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1443,6 +1445,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_oximeter( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let oximeter_id = self.rng.sled_rng(sled_id).next_zone(); let ip = self.sled_alloc_ip(sled_id)?; @@ -1454,7 +1457,6 @@ impl<'a> BlueprintBuilder<'a> { }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1469,6 +1471,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_crucible_pantry( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let pantry_id = self.rng.sled_rng(sled_id).next_zone(); let ip = self.sled_alloc_ip(sled_id)?; @@ -1479,7 +1482,6 @@ impl<'a> BlueprintBuilder<'a> { ); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1501,6 +1503,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_cockroachdb( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let zone_id = self.rng.sled_rng(sled_id).next_zone(); let underlay_ip = self.sled_alloc_ip(sled_id)?; @@ -1514,7 +1517,6 @@ impl<'a> BlueprintBuilder<'a> { dataset: OmicronZoneDataset { pool_name }, }); let filesystem_pool = pool_name; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1529,6 +1531,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_clickhouse( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let id = self.rng.sled_rng(sled_id).next_zone(); let underlay_address = self.sled_alloc_ip(sled_id)?; @@ -1541,7 +1544,6 @@ impl<'a> BlueprintBuilder<'a> { address, dataset: OmicronZoneDataset { pool_name }, }); - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1556,6 +1558,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_clickhouse_server( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let zone_id = self.rng.sled_rng(sled_id).next_zone(); let underlay_ip = self.sled_alloc_ip(sled_id)?; @@ -1570,7 +1573,6 @@ impl<'a> BlueprintBuilder<'a> { }, ); let filesystem_pool = pool_name; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1585,6 +1587,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_add_zone_clickhouse_keeper( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let zone_id = self.rng.sled_rng(sled_id).next_zone(); let underlay_ip = self.sled_alloc_ip(sled_id)?; @@ -1599,7 +1602,6 @@ impl<'a> BlueprintBuilder<'a> { }, ); let filesystem_pool = pool_name; - let image_source = self.zone_image_source(zone_type.kind())?; let zone = BlueprintZoneConfig { disposition: BlueprintZoneDisposition::InService, @@ -1614,6 +1616,7 @@ impl<'a> BlueprintBuilder<'a> { pub fn sled_promote_internal_ntp_to_boundary_ntp( &mut self, sled_id: SledUuid, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { // The upstream NTP/DNS servers and domain _should_ come from Nexus and // be modifiable by the operator, but currently can only be set at RSS. @@ -1637,6 +1640,7 @@ impl<'a> BlueprintBuilder<'a> { ntp_servers, dns_servers, domain, + image_source, ) } @@ -1646,6 +1650,7 @@ impl<'a> BlueprintBuilder<'a> { ntp_servers: Vec, dns_servers: Vec, domain: Option, + image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let editor = self.sled_editors.get_mut(&sled_id).ok_or_else(|| { Error::Planner(anyhow!( @@ -1726,7 +1731,6 @@ impl<'a> BlueprintBuilder<'a> { }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; - let image_source = self.zone_image_source(zone_type.kind())?; self.sled_add_zone( sled_id, @@ -1984,61 +1988,6 @@ impl<'a> BlueprintBuilder<'a> { self.pending_mgs_updates.remove(baseboard_id); } - /// Try to find an artifact in either the current or previous release repo - /// that contains an image for a zone of the given kind; see RFD 565 ยง9. - /// Defaults to the install dataset. - pub(crate) fn zone_image_source( - &self, - zone_kind: ZoneKind, - ) -> Result { - let new_repo = self.input.tuf_repo().description(); - let old_repo = self.input.old_repo().description(); - let repo_choice = if self.zone_is_ready_for_update(zone_kind, new_repo) - { - new_repo - } else { - old_repo - }; - repo_choice.zone_image_source(zone_kind) - } - - /// Return `true` iff a zone of the given kind is ready to be updated; - /// i.e., its dependencies have been updated, or its data sufficiently - /// replicated, etc. - fn zone_is_ready_for_update( - &self, - zone_kind: ZoneKind, - new_repo: &TargetReleaseDescription, - ) -> bool { - match zone_kind { - ZoneKind::Nexus => { - // Nexus can only be updated if all non-Nexus zones have been updated, - // i.e., their image source is an artifact from the new repo. - self.sled_ids_with_zones().all(|sled_id| { - self.current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) - .filter(|z| z.zone_type.kind() != ZoneKind::Nexus) - .all(|z| { - // This comparison ignores any TUF repo contents errors - // from `zone_image_source`. This means we'll never be - // able to update Nexus if we can't tell if _other_ zone - // types aren't updated, which seems correct. - Some(&z.image_source) - == new_repo - .zone_image_source(z.kind()) - .ok() - .as_ref() - }) - }) - } - // - // ZoneKind::CockroachDb => todo!("check cluster status in inventory"), - _ => true, // other zone kinds have no special dependencies - } - } - /// Debug method to remove a sled from a blueprint entirely. /// /// Bypasses all expungement checks. Do not use in production. @@ -2300,9 +2249,20 @@ pub mod test { example.input.all_sled_resources(SledFilter::Commissioned) { builder.sled_add_disks(sled_id, sled_resources).unwrap(); - builder.sled_ensure_zone_ntp(sled_id).unwrap(); + builder + .sled_ensure_zone_ntp( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap(); for pool_id in sled_resources.zpools.keys() { - builder.sled_ensure_zone_crucible(sled_id, *pool_id).unwrap(); + builder + .sled_ensure_zone_crucible( + sled_id, + *pool_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap(); } } @@ -2337,9 +2297,20 @@ pub mod test { .unwrap() .resources; builder.sled_add_disks(new_sled_id, &new_sled_resources).unwrap(); - builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); + builder + .sled_ensure_zone_ntp( + new_sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap(); for pool_id in new_sled_resources.zpools.keys() { - builder.sled_ensure_zone_crucible(new_sled_id, *pool_id).unwrap(); + builder + .sled_ensure_zone_crucible( + new_sled_id, + *pool_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap(); } builder.sled_ensure_zone_datasets(new_sled_id).unwrap(); @@ -2801,6 +2772,7 @@ pub mod test { .next() .map(|sa| sa.sled_id) .expect("no sleds present"), + BlueprintZoneImageSource::InstallDataset, ) .unwrap_err(); @@ -2897,7 +2869,12 @@ pub mod test { "test", ) .expect("failed to create builder"); - builder.sled_add_zone_nexus(sled_id).expect("added nexus zone"); + builder + .sled_add_zone_nexus( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .expect("added nexus zone"); } { @@ -2913,7 +2890,12 @@ pub mod test { ) .expect("failed to create builder"); for _ in 0..3 { - builder.sled_add_zone_nexus(sled_id).expect("added nexus zone"); + builder + .sled_add_zone_nexus( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .expect("added nexus zone"); } } @@ -2946,7 +2928,12 @@ pub mod test { "test", ) .expect("failed to create builder"); - let err = builder.sled_add_zone_nexus(sled_id).unwrap_err(); + let err = builder + .sled_add_zone_nexus( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) + .unwrap_err(); assert!( matches!( @@ -3018,7 +3005,10 @@ pub mod test { .expect("constructed builder"); for _ in 0..num_sled_zpools { builder - .sled_add_zone_cockroachdb(target_sled_id) + .sled_add_zone_cockroachdb( + target_sled_id, + BlueprintZoneImageSource::InstallDataset, + ) .expect("added CRDB zone"); } builder.sled_ensure_zone_datasets(target_sled_id).unwrap(); @@ -3057,11 +3047,17 @@ pub mod test { .expect("constructed builder"); for _ in 0..num_sled_zpools { builder - .sled_add_zone_cockroachdb(target_sled_id) + .sled_add_zone_cockroachdb( + target_sled_id, + BlueprintZoneImageSource::InstallDataset, + ) .expect("added CRDB zone"); } let err = builder - .sled_add_zone_cockroachdb(target_sled_id) + .sled_add_zone_cockroachdb( + target_sled_id, + BlueprintZoneImageSource::InstallDataset, + ) .expect_err("failed to create too many CRDB zones"); match err { Error::NoAvailableZpool { sled_id, kind } => { diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 2cfd865e515..237702f47ba 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -16,6 +16,7 @@ use crate::system::SystemDescription; use anyhow::bail; use nexus_inventory::CollectionBuilderRng; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; @@ -469,7 +470,10 @@ impl ExampleSystemBuilder { .unwrap(); } if self.create_zones { - let _ = builder.sled_ensure_zone_ntp(sled_id).unwrap(); + let image_source = BlueprintZoneImageSource::InstallDataset; + let _ = builder + .sled_ensure_zone_ntp(sled_id, image_source.clone()) + .unwrap(); // Create discretionary zones if allowed. if sled_details.policy.matches(SledFilter::Discretionary) { @@ -481,36 +485,61 @@ impl ExampleSystemBuilder { sled_id, false, vec![], + image_source.clone(), ) .unwrap(); } if discretionary_ix == 0 { - builder.sled_add_zone_clickhouse(sled_id).unwrap(); + builder + .sled_add_zone_clickhouse( + sled_id, + image_source.clone(), + ) + .unwrap(); } for _ in 0..self .internal_dns_count .on(discretionary_ix, discretionary_sled_count) { - builder.sled_add_zone_internal_dns(sled_id).unwrap(); + builder + .sled_add_zone_internal_dns( + sled_id, + image_source.clone(), + ) + .unwrap(); } for _ in 0..self .external_dns_count .on(discretionary_ix, discretionary_sled_count) { - builder.sled_add_zone_external_dns(sled_id).unwrap(); + builder + .sled_add_zone_external_dns( + sled_id, + image_source.clone(), + ) + .unwrap(); } for _ in 0..self .crucible_pantry_count .on(discretionary_ix, discretionary_sled_count) { - builder.sled_add_zone_crucible_pantry(sled_id).unwrap(); + builder + .sled_add_zone_crucible_pantry( + sled_id, + image_source.clone(), + ) + .unwrap(); } discretionary_ix += 1; } for pool_name in sled_details.resources.zpools.keys() { let _ = builder - .sled_ensure_zone_crucible(sled_id, *pool_name) + .sled_ensure_zone_crucible( + sled_id, + *pool_name, + image_source.clone(), + ) .unwrap(); } } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 1fc84a9ea6c..8bbc8dbe87b 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -22,6 +22,7 @@ use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::CockroachDbSettings; @@ -29,6 +30,7 @@ use nexus_types::deployment::DiskFilter; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledDetails; use nexus_types::deployment::SledFilter; +use nexus_types::deployment::TufRepoContentsError; use nexus_types::deployment::ZpoolFilter; use nexus_types::external_api::views::PhysicalDiskPolicy; use nexus_types::external_api::views::SledPolicy; @@ -569,7 +571,11 @@ impl<'a> Planner<'a> { // there, all we can do is provision that one zone. We have to wait // for that to succeed and synchronize the clock before we can // provision anything else. - if self.blueprint.sled_ensure_zone_ntp(sled_id)? == Ensure::Added { + if self.blueprint.sled_ensure_zone_ntp( + sled_id, + self.image_source_for_new_zone(ZoneKind::InternalNtp)?, + )? == Ensure::Added + { info!( &self.log, "found sled missing NTP zone (will add one)"; @@ -667,10 +673,11 @@ impl<'a> Planner<'a> { // on it. let mut ncrucibles_added = 0; for zpool_id in sled_resources.all_zpools(ZpoolFilter::InService) { - if self - .blueprint - .sled_ensure_zone_crucible(sled_id, *zpool_id)? - == Ensure::Added + if self.blueprint.sled_ensure_zone_crucible( + sled_id, + *zpool_id, + self.image_source_for_new_zone(ZoneKind::Crucible)?, + )? == Ensure::Added { info!( &self.log, @@ -918,37 +925,41 @@ impl<'a> Planner<'a> { } }; + let image_source = self.image_source_for_new_zone(kind.into())?; match kind { - DiscretionaryOmicronZone::BoundaryNtp => self - .blueprint - .sled_promote_internal_ntp_to_boundary_ntp(sled_id)?, - DiscretionaryOmicronZone::Clickhouse => { - self.blueprint.sled_add_zone_clickhouse(sled_id)? - } - DiscretionaryOmicronZone::ClickhouseKeeper => { - self.blueprint.sled_add_zone_clickhouse_keeper(sled_id)? - } - DiscretionaryOmicronZone::ClickhouseServer => { - self.blueprint.sled_add_zone_clickhouse_server(sled_id)? - } - DiscretionaryOmicronZone::CockroachDb => { - self.blueprint.sled_add_zone_cockroachdb(sled_id)? - } - DiscretionaryOmicronZone::CruciblePantry => { - self.blueprint.sled_add_zone_crucible_pantry(sled_id)? - } - DiscretionaryOmicronZone::InternalDns => { - self.blueprint.sled_add_zone_internal_dns(sled_id)? - } - DiscretionaryOmicronZone::ExternalDns => { - self.blueprint.sled_add_zone_external_dns(sled_id)? + DiscretionaryOmicronZone::BoundaryNtp => { + self.blueprint.sled_promote_internal_ntp_to_boundary_ntp( + sled_id, + image_source, + )? } + DiscretionaryOmicronZone::Clickhouse => self + .blueprint + .sled_add_zone_clickhouse(sled_id, image_source)?, + DiscretionaryOmicronZone::ClickhouseKeeper => self + .blueprint + .sled_add_zone_clickhouse_keeper(sled_id, image_source)?, + DiscretionaryOmicronZone::ClickhouseServer => self + .blueprint + .sled_add_zone_clickhouse_server(sled_id, image_source)?, + DiscretionaryOmicronZone::CockroachDb => self + .blueprint + .sled_add_zone_cockroachdb(sled_id, image_source)?, + DiscretionaryOmicronZone::CruciblePantry => self + .blueprint + .sled_add_zone_crucible_pantry(sled_id, image_source)?, + DiscretionaryOmicronZone::InternalDns => self + .blueprint + .sled_add_zone_internal_dns(sled_id, image_source)?, + DiscretionaryOmicronZone::ExternalDns => self + .blueprint + .sled_add_zone_external_dns(sled_id, image_source)?, DiscretionaryOmicronZone::Nexus => { - self.blueprint.sled_add_zone_nexus(sled_id)? - } - DiscretionaryOmicronZone::Oximeter => { - self.blueprint.sled_add_zone_oximeter(sled_id)? + self.blueprint.sled_add_zone_nexus(sled_id, image_source)? } + DiscretionaryOmicronZone::Oximeter => self + .blueprint + .sled_add_zone_oximeter(sled_id, image_source)?, }; info!( self.log, "added zone to sled"; @@ -1052,19 +1063,20 @@ impl<'a> Planner<'a> { } } - // Update the first out-of-date zone. - let out_of_date_zones = sleds + // Find out of date zones, as defined by zones whose image source does + // not match what it should be based on our current target release. + let target_release = self.input.tuf_repo().description(); + let mut out_of_date_zones = sleds .into_iter() .flat_map(|sled_id| { - let blueprint = &self.blueprint; let log = &self.log; - blueprint + self.blueprint .current_sled_zones( sled_id, BlueprintZoneDisposition::is_in_service, ) .filter_map(move |zone| { - let desired_image_source = match blueprint + let desired_image_source = match target_release .zone_image_source(zone.zone_type.kind()) { Ok(source) => source, @@ -1082,18 +1094,70 @@ impl<'a> Planner<'a> { } }; if zone.image_source != desired_image_source { - Some((sled_id, zone.clone())) + Some((sled_id, zone, desired_image_source)) } else { None } }) }) - .collect::>(); - if let Some((sled_id, zone)) = out_of_date_zones.first() { - return self.update_or_expunge_zone(*sled_id, zone); + .peekable(); + + // Before we filter out zones that can't be updated, do we have any out + // of date zones at all? We need this to explain why we didn't update + // any zones below, if we don't. + let have_out_of_date_zones = out_of_date_zones.peek().is_some(); + + // Of the out-of-date zones, filter out zones that can't be updated yet, + // either because they're not ready or because it wouldn't be safe to + // bounce them. + let mut updateable_zones = + out_of_date_zones.filter(|(_sled_id, zone, _new_image_source)| { + if !self.can_zone_be_shut_down_safely(zone) { + return false; + } + match self.is_zone_ready_for_update(zone.zone_type.kind()) { + Ok(true) => true, + Ok(false) => false, + Err(err) => { + // If we can't tell whether a zone is ready for update, + // assume it can't be. + warn!( + self.log, + "cannot determine whether zone is ready for update"; + "zone" => ?zone, + InlineErrorChain::new(&err), + ); + false + } + } + }); + + // Update the first out-of-date zone. + if let Some((sled_id, zone, new_image_source)) = updateable_zones.next() + { + // Borrow check workaround: `self.update_or_expunge_zone` needs + // `&mut self`, but `self` is borrowed in the `updateable_zones` + // iterator. Clone the one zone we want to update, then drop the + // iterator; now we can call `&mut self` methods. + let zone = zone.clone(); + std::mem::drop(updateable_zones); + + return self.update_or_expunge_zone( + sled_id, + &zone, + new_image_source, + ); + } + + if have_out_of_date_zones { + info!( + self.log, + "not all zones up-to-date, but no zones can be updated now" + ); + } else { + info!(self.log, "all zones up-to-date"); } - info!(self.log, "all zones up-to-date"); Ok(()) } @@ -1103,66 +1167,60 @@ impl<'a> Planner<'a> { &mut self, sled_id: SledUuid, zone: &BlueprintZoneConfig, + new_image_source: BlueprintZoneImageSource, ) -> Result<(), Error> { let zone_kind = zone.zone_type.kind(); - let image_source = self.blueprint.zone_image_source(zone_kind)?; - if zone.image_source == image_source { - // This should only happen in the event of a planning error above. - error!( - self.log, "zone is already up-to-date"; - "sled_id" => %sled_id, - "zone_id" => %zone.id, - "kind" => ?zone.zone_type.kind(), - "image_source" => %image_source, - ); - return Err(Error::ZoneAlreadyUpToDate); - } else { - match zone_kind { - ZoneKind::Crucible - | ZoneKind::Clickhouse - | ZoneKind::ClickhouseKeeper - | ZoneKind::ClickhouseServer - | ZoneKind::CockroachDb => { - info!( - self.log, "updating zone image source in-place"; - "sled_id" => %sled_id, - "zone_id" => %zone.id, - "kind" => ?zone.zone_type.kind(), - "image_source" => %image_source, - ); - self.blueprint.comment(format!( - "updating {:?} zone {} in-place", - zone.zone_type.kind(), - zone.id - )); - self.blueprint.sled_set_zone_source( - sled_id, - zone.id, - image_source, - )?; - } - ZoneKind::BoundaryNtp - | ZoneKind::CruciblePantry - | ZoneKind::ExternalDns - | ZoneKind::InternalDns - | ZoneKind::InternalNtp - | ZoneKind::Nexus - | ZoneKind::Oximeter => { - info!( - self.log, "expunging out-of-date zone"; - "sled_id" => %sled_id, - "zone_id" => %zone.id, - "kind" => ?zone.zone_type.kind(), - ); - self.blueprint.comment(format!( - "expunge {:?} zone {} for update", - zone.zone_type.kind(), - zone.id - )); - self.blueprint.sled_expunge_zone(sled_id, zone.id)?; - } + + // We're called by `do_plan_zone_updates()`, which guarantees the + // `new_image_source` is different from the current image source. + debug_assert_ne!(zone.image_source, new_image_source); + + match zone_kind { + ZoneKind::Crucible + | ZoneKind::Clickhouse + | ZoneKind::ClickhouseKeeper + | ZoneKind::ClickhouseServer + | ZoneKind::CockroachDb => { + info!( + self.log, "updating zone image source in-place"; + "sled_id" => %sled_id, + "zone_id" => %zone.id, + "kind" => ?zone.zone_type.kind(), + "image_source" => %new_image_source, + ); + self.blueprint.comment(format!( + "updating {:?} zone {} in-place", + zone.zone_type.kind(), + zone.id + )); + self.blueprint.sled_set_zone_source( + sled_id, + zone.id, + new_image_source, + )?; + } + ZoneKind::BoundaryNtp + | ZoneKind::CruciblePantry + | ZoneKind::ExternalDns + | ZoneKind::InternalDns + | ZoneKind::InternalNtp + | ZoneKind::Nexus + | ZoneKind::Oximeter => { + info!( + self.log, "expunging out-of-date zone"; + "sled_id" => %sled_id, + "zone_id" => %zone.id, + "kind" => ?zone.zone_type.kind(), + ); + self.blueprint.comment(format!( + "expunge {:?} zone {} for update", + zone.zone_type.kind(), + zone.id + )); + self.blueprint.sled_expunge_zone(sled_id, zone.id)?; } } + Ok(()) } @@ -1259,6 +1317,86 @@ impl<'a> Planner<'a> { // // https://www.cockroachlabs.com/docs/stable/cluster-settings#change-a-cluster-setting } + + /// Return the image source for zones that we need to add. + fn image_source_for_new_zone( + &self, + zone_kind: ZoneKind, + ) -> Result { + let source_repo = if self.is_zone_ready_for_update(zone_kind)? { + self.input.tuf_repo().description() + } else { + self.input.old_repo().description() + }; + source_repo.zone_image_source(zone_kind) + } + + /// Return `true` iff a zone of the given kind is ready to be updated; + /// i.e., its dependencies have been updated. + fn is_zone_ready_for_update( + &self, + zone_kind: ZoneKind, + ) -> Result { + // TODO-correctness: We should return false regardless of `zone_kind` if + // there are still pending updates for components earlier in the update + // ordering than zones: RoT bootloader / RoT / SP / Host OS. + + match zone_kind { + ZoneKind::Nexus => { + // Nexus can only be updated if all non-Nexus zones have been + // updated, i.e., their image source is an artifact from the new + // repo. + let new_repo = self.input.tuf_repo().description(); + + // If we don't actually have a TUF repo here, we can't do + // updates anyway; any return value is fine. + if new_repo.tuf_repo().is_none() { + return Ok(false); + } + + // Check that all in-service zones (other than Nexus) on all + // sleds have an image source consistent with `new_repo`. + for sled_id in self.blueprint.sled_ids_with_zones() { + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + let kind = z.zone_type.kind(); + if kind != ZoneKind::Nexus + && z.image_source + != new_repo.zone_image_source(kind)? + { + return Ok(false); + } + } + } + + Ok(true) + } + _ => Ok(true), // other zone kinds have no special dependencies + } + } + + /// Return `true` iff we believe a zone can safely be shut down; e.g., any + /// data it's responsible for is sufficiently persisted or replicated. + /// + /// "shut down" includes both "discretionary expunge" (e.g., if we're + /// dealing with a zone that is updated via expunge -> replace) or "shut + /// down and restart" (e.g., if we're upgrading a zone in place). + /// + /// This function is not (and cannot!) be called in the "expunge a zone + /// because the underlying disk / sled has been expunged" case. In this + /// case, we have no choice but to reconcile with the fact that the zone is + /// now gone. + fn can_zone_be_shut_down_safely(&self, zone: &BlueprintZoneConfig) -> bool { + // TODO-cleanup remove this `allow` once we populate a variant below + #[allow(clippy::match_single_binding)] + match zone.zone_type.kind() { + // + // ZoneKind::CockroachDb => todo!("check cluster status in inventory"), + _ => true, // other zone kinds have no special safety checks + } + } } /// The reason a sled's zones need to be expunged. @@ -2001,7 +2139,10 @@ pub(crate) mod test { .expect("failed to build blueprint builder"); let sled_id = builder.sled_ids_with_zones().next().expect("no sleds"); builder - .sled_add_zone_external_dns(sled_id) + .sled_add_zone_external_dns( + sled_id, + BlueprintZoneImageSource::InstallDataset, + ) .expect_err("can't add external DNS zones"); // Build a builder for a modfied blueprint that will include @@ -2037,13 +2178,22 @@ pub(crate) mod test { ) }; blueprint_builder - .sled_add_zone_external_dns(sled_1) + .sled_add_zone_external_dns( + sled_1, + BlueprintZoneImageSource::InstallDataset, + ) .expect("added external DNS zone"); blueprint_builder - .sled_add_zone_external_dns(sled_1) + .sled_add_zone_external_dns( + sled_1, + BlueprintZoneImageSource::InstallDataset, + ) .expect("added external DNS zone"); blueprint_builder - .sled_add_zone_external_dns(sled_2) + .sled_add_zone_external_dns( + sled_2, + BlueprintZoneImageSource::InstallDataset, + ) .expect("added external DNS zone"); let blueprint1a = blueprint_builder.build(); @@ -4741,7 +4891,7 @@ pub(crate) mod test { // Use our example system. let mut rng = SimRngState::from_seed(TEST_NAME); - let (mut example, blueprint1) = ExampleSystemBuilder::new_with_rng( + let (mut example, mut blueprint1) = ExampleSystemBuilder::new_with_rng( &logctx.log, rng.next_system_rng(), ) @@ -4773,50 +4923,8 @@ pub(crate) mod test { // attached. let target_release_generation = Generation::from_u32(2); - // Manually specify a trivial TUF repo. - let mut input_builder = example.input.clone().into_builder(); - input_builder.policy_mut().tuf_repo = TufRepoPolicy { - // We use generation 2 to represent the first generation set to a - // target TUF repo. - target_release_generation, - description: TargetReleaseDescription::TufRepo( - TufRepoDescription { - repo: TufRepoMeta { - hash: ArtifactHash([0; 32]), - targets_role_version: 0, - valid_until: Utc::now(), - system_version: Version::new(0, 0, 0), - file_name: String::from(""), - }, - artifacts: vec![], - }, - ), - }; - let input = input_builder.build(); - let mut blueprint2 = Planner::new_based_on( - log.clone(), - &blueprint1, - &input, - "test_blueprint2", - &example.collection, - ) - .expect("can't create planner") - .with_rng(PlannerRng::from_seed((TEST_NAME, "bp2"))) - .plan() - .expect("plan for trivial TUF repo"); - - // All zones should still be sourced from the install dataset. - assert!( - blueprint2 - .all_omicron_zones(BlueprintZoneDisposition::is_in_service) - .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset - )) - ); - // Manually specify a TUF repo with fake zone images. - let mut input_builder = input.into_builder(); + let mut input_builder = example.input.clone().into_builder(); let version = ArtifactVersion::new_static("1.0.0-freeform") .expect("can't parse artifact version"); let fake_hash = ArtifactHash([0; 32]); @@ -4881,8 +4989,8 @@ pub(crate) mod test { && zone.image_source == image_source }; - // Manually "upgrade" all zones except CruciblePantry and Nexus. - for mut zone in blueprint2 + // Manually update all zones except CruciblePantry and Nexus. + for mut zone in blueprint1 .sleds .values_mut() .flat_map(|config| config.zones.iter_mut()) @@ -4905,10 +5013,10 @@ pub(crate) mod test { // Check that there is a new nexus zone that does *not* use the new // artifact (since not all of its dependencies are updated yet). - update_collection_from_blueprint(&mut example, &blueprint2); - let blueprint3 = Planner::new_based_on( + update_collection_from_blueprint(&mut example, &blueprint1); + let blueprint2 = Planner::new_based_on( log.clone(), - &blueprint2, + &blueprint1, &input, "test_blueprint3", &example.collection, @@ -4918,7 +5026,7 @@ pub(crate) mod test { .plan() .expect("can't re-plan for new Nexus zone"); { - let summary = blueprint3.diff_since_blueprint(&blueprint2); + let summary = blueprint2.diff_since_blueprint(&blueprint1); for sled in summary.diff.sleds.modified_values_diff() { assert!(sled.zones.removed.is_empty()); assert_eq!(sled.zones.added.len(), 1); @@ -4936,8 +5044,8 @@ pub(crate) mod test { // We should now have three sets of expunge/add iterations for the // Crucible Pantry zones. - let mut parent = blueprint3; - for i in 4..=9 { + let mut parent = blueprint2; + for i in 3..=8 { let blueprint_name = format!("blueprint_{i}"); update_collection_from_blueprint(&mut example, &parent); let blueprint = Planner::new_based_on( @@ -4953,8 +5061,9 @@ pub(crate) mod test { .unwrap_or_else(|_| panic!("can't re-plan after {i} iterations")); let summary = blueprint.diff_since_blueprint(&parent); + eprintln!("diff to {blueprint_name}: {}", summary.display()); for sled in summary.diff.sleds.modified_values_diff() { - if i % 2 == 0 { + if i % 2 == 1 { assert!(sled.zones.added.is_empty()); assert!(sled.zones.removed.is_empty()); assert_eq!( @@ -4985,11 +5094,11 @@ pub(crate) mod test { parent = blueprint; } - let blueprint9 = parent; + let blueprint8 = parent; // All Crucible Pantries should now be updated. assert_eq!( - blueprint9 + blueprint8 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_up_to_date_pantry(z)) .count(), @@ -4998,7 +5107,7 @@ pub(crate) mod test { // All old Pantry zones should now be expunged. assert_eq!( - blueprint9 + blueprint8 .all_omicron_zones(BlueprintZoneDisposition::is_expunged) .filter(|(_, z)| is_old_pantry(z)) .count(), @@ -5008,14 +5117,14 @@ pub(crate) mod test { // Now we can update Nexus, because all of its dependent zones // are up-to-date w/r/t the new repo. assert_eq!( - blueprint9 + blueprint8 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_old_nexus(z)) .count(), NEXUS_REDUNDANCY + 1, ); - let mut parent = blueprint9; - for i in 10..=17 { + let mut parent = blueprint8; + for i in 9..=16 { update_collection_from_blueprint(&mut example, &parent); let blueprint_name = format!("blueprint{i}"); @@ -5033,7 +5142,7 @@ pub(crate) mod test { let summary = blueprint.diff_since_blueprint(&parent); for sled in summary.diff.sleds.modified_values_diff() { - if i % 2 == 0 { + if i % 2 == 1 { assert!(sled.zones.added.is_empty()); assert!(sled.zones.removed.is_empty()); } else { @@ -5052,19 +5161,19 @@ pub(crate) mod test { } // Everything's up-to-date in Kansas City! - let blueprint17 = parent; + let blueprint16 = parent; assert_eq!( - blueprint17 + blueprint16 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_up_to_date_nexus(z)) .count(), NEXUS_REDUNDANCY + 1, ); - update_collection_from_blueprint(&mut example, &blueprint17); + update_collection_from_blueprint(&mut example, &blueprint16); assert_planning_makes_no_changes( &logctx.log, - &blueprint17, + &blueprint16, &input, &example.collection, TEST_NAME, diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index a411cfeff64..f26cbf0eff7 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -982,7 +982,7 @@ impl OximeterReadPolicy { } /// TUF repo-related policy that's part of the planning input. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct TufRepoPolicy { /// The generation of the target release for the TUF repo. pub target_release_generation: Generation, @@ -1011,7 +1011,7 @@ impl TufRepoPolicy { } /// Source of artifacts for a given target release. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum TargetReleaseDescription { /// The initial release source for an Oxide deployment, before any TUF repo /// has been provided for upgrades.