diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 7931b9a09ca..d44da4a357b 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -118,6 +118,7 @@ mod typed_uuid; mod unsigned; mod upstairs_repair; mod user_builtin; +mod user_data_export; mod utilization; mod virtual_provisioning_collection; mod virtual_provisioning_resource; @@ -241,6 +242,7 @@ pub use typed_uuid::DbTypedUuid; pub use typed_uuid::to_db_typed_uuid; pub use upstairs_repair::*; pub use user_builtin::*; +pub use user_data_export::*; pub use utilization::*; pub use v2p_mapping::*; pub use virtual_provisioning_collection::*; diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index dc965aafd43..12bbda4723a 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(156, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(157, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(157, "user-data-export"), KnownVersion::new(156, "boot-partitions-inventory"), KnownVersion::new(155, "vpc-firewall-icmp"), KnownVersion::new(154, "add-pending-mgs-updates"), diff --git a/nexus/db-model/src/user_data_export.rs b/nexus/db-model/src/user_data_export.rs new file mode 100644 index 00000000000..ee92af2dac7 --- /dev/null +++ b/nexus/db-model/src/user_data_export.rs @@ -0,0 +1,282 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use crate::SqlU16; +use crate::ipv6; +use crate::typed_uuid::DbTypedUuid; +use nexus_db_schema::schema::user_data_export; +use omicron_uuid_kinds::UserDataExportKind; +use omicron_uuid_kinds::UserDataExportUuid; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; +use serde::Deserialize; +use serde::Serialize; +use std::net::SocketAddrV6; +use uuid::Uuid; + +impl_enum_type!( + UserDataExportResourceTypeEnum: + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + pub enum UserDataExportResourceType; + + // Enum values + Snapshot => b"snapshot" + Image => b"image" +); + +// FromStr impl required for use with clap (aka omdb) +impl std::str::FromStr for UserDataExportResourceType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "snapshot" => Ok(UserDataExportResourceType::Snapshot), + "image" => Ok(UserDataExportResourceType::Image), + _ => Err(format!("unrecognized value {} for enum", s)), + } + } +} + +impl UserDataExportResourceType { + pub fn to_string(&self) -> String { + String::from(match self { + UserDataExportResourceType::Snapshot => "snapshot", + UserDataExportResourceType::Image => "image", + }) + } +} + +impl_enum_type!( + UserDataExportStateEnum: + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + pub enum UserDataExportState; + + // Enum values + Requested => b"requested" + Assigning => b"assigning" + Live => b"live" + Deleting => b"deleting" + Deleted => b"deleted" +); + +/// Instead of working directly with the UserDataExportRecord, callers can use +/// this enum instead, where the call site only cares of the record is live or +/// not. +pub enum UserDataExport { + NotLive, + + Live { pantry_address: SocketAddrV6, volume_id: VolumeUuid }, +} + +/// A "user data export" object represents an attachment of a read-only volume +/// to a Pantry for the purpose of exporting data. As of this writing only +/// snapshots and images are able to be exported this way. Management of these +/// objects is done automatically by a background task. +/// +/// Note that read-only volumes should never directly be constructed (read: be +/// passed to Volume::construct). Copies should be created so that the +/// appropriate reference counting for the read-only volume targets can be +/// maintained. The user data export object stores that copied Volume, among +/// other things. +/// +/// The record transitions through the following states: +/// +/// ```text +/// Requested <-- --- +/// | | +/// | | | +/// v | | responsibility of user +/// | | export create saga +/// Assigning -- | +/// | +/// | | +/// v --- +/// --- +/// Live <-- | +/// | | +/// | | | +/// v | | responsibility of user +/// | | export delete saga +/// Deleting -- | +/// | +/// | | +/// v | +/// --- +/// Deleted +/// ``` +/// +/// which are captured in the UserDataExportState enum. Annotated on the right +/// are which sagas are responsible for which state transitions. The state +/// transitions themselves are performed by these sagas and all involve a query +/// that: +/// +/// - checks that the starting state (and other values as required) make sense +/// - updates the state while setting a unique operating_saga_id id (and any +/// other fields as appropriate) +/// +/// As multiple background tasks will be waking up, checking to see what sagas +/// need to be triggered, and requesting that these sagas run, this is meant to +/// block multiple sagas from running at the same time in an effort to cut down +/// on interference - most will unwind at the first step of performing this +/// state transition instead of somewhere in the middle. This is not required +/// for correctness as each saga node can deal with this type of interference. +#[derive(Queryable, Insertable, Selectable, Clone, Debug)] +#[diesel(table_name = user_data_export)] +pub struct UserDataExportRecord { + id: DbTypedUuid, + + state: UserDataExportState, + operating_saga_id: Option, + generation: i64, + + resource_id: Uuid, + resource_type: UserDataExportResourceType, + resource_deleted: bool, + + pantry_ip: Option, + pantry_port: Option, + volume_id: Option>, +} + +impl UserDataExportRecord { + pub fn new( + id: UserDataExportUuid, + resource: UserDataExportResource, + ) -> Self { + let (resource_type, resource_id) = match resource { + UserDataExportResource::Snapshot { id } => { + (UserDataExportResourceType::Snapshot, id) + } + + UserDataExportResource::Image { id } => { + (UserDataExportResourceType::Image, id) + } + }; + + Self { + id: id.into(), + + state: UserDataExportState::Requested, + operating_saga_id: None, + generation: 0, + + resource_type, + resource_id, + resource_deleted: false, + + pantry_ip: None, + pantry_port: None, + volume_id: None, + } + } + + pub fn id(&self) -> UserDataExportUuid { + self.id.into() + } + + pub fn state(&self) -> UserDataExportState { + self.state + } + + pub fn operating_saga_id(&self) -> Option { + self.operating_saga_id + } + + pub fn generation(&self) -> i64 { + self.generation + } + + pub fn resource(&self) -> UserDataExportResource { + match self.resource_type { + UserDataExportResourceType::Snapshot => { + UserDataExportResource::Snapshot { id: self.resource_id } + } + + UserDataExportResourceType::Image => { + UserDataExportResource::Image { id: self.resource_id } + } + } + } + + pub fn deleted(&self) -> bool { + self.resource_deleted + } + + pub fn pantry_address(&self) -> Option { + match (&self.pantry_ip, &self.pantry_port) { + (Some(pantry_ip), Some(pantry_port)) => Some(SocketAddrV6::new( + (*pantry_ip).into(), + (*pantry_port).into(), + 0, + 0, + )), + + (_, _) => None, + } + } + + pub fn volume_id(&self) -> Option { + self.volume_id.map(|i| i.into()) + } + + pub fn is_live(&self) -> Result { + match self.state { + UserDataExportState::Requested + | UserDataExportState::Assigning + | UserDataExportState::Deleting + | UserDataExportState::Deleted => Ok(UserDataExport::NotLive), + + UserDataExportState::Live => { + let Some(pantry_ip) = self.pantry_ip else { + return Err("pantry_ip is None!"); + }; + + let Some(pantry_port) = self.pantry_port else { + return Err("pantry_port is None!"); + }; + + let Some(volume_id) = self.volume_id else { + return Err("volume_id is None!"); + }; + + Ok(UserDataExport::Live { + pantry_address: SocketAddrV6::new( + pantry_ip.into(), + *pantry_port, + 0, + 0, + ), + + volume_id: volume_id.into(), + }) + } + } + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum UserDataExportResource { + Snapshot { id: Uuid }, + + Image { id: Uuid }, +} + +impl UserDataExportResource { + pub fn type_string(&self) -> String { + String::from(match self { + UserDataExportResource::Snapshot { .. } => "snapshot", + UserDataExportResource::Image { .. } => "image", + }) + } + + pub fn id(&self) -> Uuid { + match self { + UserDataExportResource::Snapshot { id } => *id, + UserDataExportResource::Image { id } => *id, + } + } +} diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 80cb8f40650..55f0e582225 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -107,6 +107,7 @@ mod target_release; #[cfg(test)] pub(crate) mod test_utils; mod update; +mod user_data_export; mod utilization; mod v2p_mapping; mod virtual_provisioning_collection; @@ -137,6 +138,7 @@ pub use sled::SledTransition; pub use sled::TransitionError; pub use support_bundle::SupportBundleExpungementReport; pub use switch_port::SwitchPortSettingsCombinedResult; +pub use user_data_export::*; pub use virtual_provisioning_collection::StorageType; pub use vmm::VmmStateUpdateResult; pub use volume::*; diff --git a/nexus/db-queries/src/db/datastore/user_data_export.rs b/nexus/db-queries/src/db/datastore/user_data_export.rs new file mode 100644 index 00000000000..5114690f458 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/user_data_export.rs @@ -0,0 +1,1906 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`UserDataExportRecord`]s. + +use super::DataStore; +use crate::context::OpContext; +use crate::db::model::Image; +use crate::db::model::Snapshot; +use crate::db::model::SqlU16; +use crate::db::model::UserDataExportRecord; +use crate::db::model::UserDataExportResource; +use crate::db::model::UserDataExportResourceType; +use crate::db::model::UserDataExportState; +use crate::db::model::ipv6; +use crate::db::model::to_db_typed_uuid; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::OptionalExtension; +use diesel::prelude::*; +use nexus_db_errors::ErrorHandler; +use nexus_db_errors::OptionalError; +use nexus_db_errors::public_error_from_diesel; +use nexus_db_lookup::DbConnection; +use nexus_types::identity::Resource; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::UserDataExportUuid; +use omicron_uuid_kinds::VolumeUuid; +use std::net::SocketAddrV6; +use uuid::Uuid; + +#[derive(Debug, Default, Clone)] +pub struct UserDataExportChangeset { + /// Resources that need records created for them + pub request_required: Vec, + + /// Records that need the create saga run + pub create_required: Vec, + + /// Records that need the delete saga run + pub delete_required: Vec, +} + +impl DataStore { + async fn user_data_export_create_in_txn( + conn: &async_bb8_diesel::Connection, + err: OptionalError, + id: UserDataExportUuid, + resource: UserDataExportResource, + ) -> Result { + let user_data_export = UserDataExportRecord::new(id, resource); + + use nexus_db_schema::schema::user_data_export::dsl; + + // Has an export with this id been created already? If so, + // return that. + let existing_export: Option = + dsl::user_data_export + .filter(dsl::id.eq(to_db_typed_uuid(id))) + .select(UserDataExportRecord::as_select()) + .first_async(conn) + .await + .optional()?; + + if let Some(existing_export) = existing_export { + return Ok(existing_export); + } + + // Does the resource being referenced still exist? + let resource_id: Uuid = match resource { + UserDataExportResource::Snapshot { id } => { + use nexus_db_schema::schema::snapshot::dsl as snapshot_dsl; + + let snapshot: Option = snapshot_dsl::snapshot + .filter(snapshot_dsl::id.eq(id)) + .select(Snapshot::as_select()) + .first_async(conn) + .await + .optional()?; + + let still_here = match snapshot { + Some(snapshot) => snapshot.time_deleted().is_none(), + None => false, + }; + + if !still_here { + return Err(err.bail(Error::non_resourcetype_not_found( + format!("snapshot with id {id} not found or deleted"), + ))); + } + + id + } + + UserDataExportResource::Image { id } => { + use nexus_db_schema::schema::image::dsl as image_dsl; + + let image: Option = image_dsl::image + .filter(image_dsl::id.eq(id)) + .select(Image::as_select()) + .first_async(conn) + .await + .optional()?; + + let still_here = match image { + Some(image) => image.time_deleted().is_none(), + None => false, + }; + + if !still_here { + return Err(err.bail(Error::non_resourcetype_not_found( + format!("image with id {id} not found or deleted"), + ))); + } + + id + } + }; + + // Does an export object for this resource exist? The unique index would + // catch this but this is our opportunity to return a nicer error type. + let existing_export: Option = + dsl::user_data_export + .filter(dsl::resource_id.eq(resource_id)) + .filter(dsl::state.ne(UserDataExportState::Deleted)) + .select(UserDataExportRecord::as_select()) + .first_async(conn) + .await + .optional()?; + + if existing_export.is_some() { + return Err(err.bail(Error::conflict(format!( + "export already exists for resource {resource_id}" + )))); + } + + // Otherwise, insert the new export object + let rows_inserted = diesel::insert_into(dsl::user_data_export) + .values(user_data_export.clone()) + .execute_async(conn) + .await?; + + if rows_inserted != 1 { + return Err(err.bail(Error::internal_error(&format!( + "{rows_inserted} rows inserted!" + )))); + } + + Ok(user_data_export) + } + + pub async fn user_data_export_create_for_snapshot( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + snapshot_id: Uuid, + ) -> CreateResult { + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("user_data_export_create_for_snapshot") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + Self::user_data_export_create_in_txn( + &conn, + err, + id, + UserDataExportResource::Snapshot { id: snapshot_id }, + ) + .await + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + err + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + pub async fn user_data_export_create_for_image( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + image_id: Uuid, + ) -> CreateResult { + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("user_data_export_create_for_image") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + Self::user_data_export_create_in_txn( + &conn, + err, + id, + UserDataExportResource::Image { id: image_id }, + ) + .await + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + err + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + pub async fn user_data_export_lookup_by_id( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + dsl::user_data_export + .filter(dsl::id.eq(to_db_typed_uuid(id))) + .select(UserDataExportRecord::as_select()) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Return any non-Deleted user data export row for a volume + pub async fn user_data_export_lookup_by_volume_id( + &self, + opctx: &OpContext, + id: VolumeUuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + dsl::user_data_export + .filter(dsl::volume_id.eq(to_db_typed_uuid(id))) + .filter(dsl::state.ne(UserDataExportState::Deleted)) + .select(UserDataExportRecord::as_select()) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Return any non-Deleted user data export row for a snapshot + pub async fn user_data_export_lookup_for_snapshot( + &self, + opctx: &OpContext, + snapshot_id: Uuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + dsl::user_data_export + .filter(dsl::resource_type.eq(UserDataExportResourceType::Snapshot)) + .filter(dsl::resource_id.eq(snapshot_id)) + .filter(dsl::state.ne(UserDataExportState::Deleted)) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Return any non-Deleted user data export row for an image + pub async fn user_data_export_lookup_for_image( + &self, + opctx: &OpContext, + image_id: Uuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + dsl::user_data_export + .filter(dsl::resource_type.eq(UserDataExportResourceType::Image)) + .filter(dsl::resource_id.eq(image_id)) + .filter(dsl::state.ne(UserDataExportState::Deleted)) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Compute the work required related to user data export objects. Return: + /// + /// - which resources do not have any user data export object and need a + /// record created + /// + /// - which user data export records are in state Requested that need the + /// associated create saga run + /// + /// - which user data export records have been marked Deleted that need the + /// associated delete saga run + /// + /// This function also marks user data export records as deleted if the + /// associated resource was itself delete. + pub async fn compute_user_data_export_changeset( + &self, + opctx: &OpContext, + ) -> LookupResult { + opctx.check_complex_operations_allowed()?; + + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::image::dsl as image_dsl; + use nexus_db_schema::schema::snapshot::dsl as snapshot_dsl; + use nexus_db_schema::schema::user_data_export::dsl; + + let mut changeset = UserDataExportChangeset::default(); + + // Check for undeleted snapshots or images that do not yet have user + // data export objects. + + let snapshots: Vec = snapshot_dsl::snapshot + .left_join(dsl::user_data_export.on( + dsl::resource_id.eq(snapshot_dsl::id).and( + dsl::state.nullable().ne(UserDataExportState::Deleted), + ), + )) + .filter(snapshot_dsl::time_deleted.is_null()) + // `is_null` will match on cases where there isn't an export row + .filter(dsl::id.is_null()) + .select(Snapshot::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + for snapshot in snapshots { + changeset + .request_required + .push(UserDataExportResource::Snapshot { id: snapshot.id() }); + } + + let project_images: Vec = image_dsl::image + .left_join(dsl::user_data_export.on( + dsl::resource_id.eq(image_dsl::id).and( + dsl::state.nullable().ne(UserDataExportState::Deleted), + ), + )) + .filter(image_dsl::time_deleted.is_null()) + .filter(image_dsl::project_id.is_not_null()) + // `is_null` will match on cases where there isn't an export row + .filter(dsl::id.is_null()) + .select(Image::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + for image in project_images { + changeset + .request_required + .push(UserDataExportResource::Image { id: image.id() }); + } + + let silo_images: Vec = image_dsl::image + .left_join(dsl::user_data_export.on( + dsl::resource_id.eq(image_dsl::id).and( + dsl::state.nullable().ne(UserDataExportState::Deleted), + ), + )) + .filter(image_dsl::time_deleted.is_null()) + .filter(image_dsl::project_id.is_null()) + // `is_null` will match on cases where there isn't an export row + .filter(dsl::id.is_null()) + .select(Image::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + for image in silo_images { + changeset + .request_required + .push(UserDataExportResource::Image { id: image.id() }); + } + + // Delete any user data export record where the higher level object + // (snapshot, image) was soft or hard deleted. + + diesel::update(dsl::user_data_export) + .filter(dsl::resource_type.eq(UserDataExportResourceType::Snapshot)) + .filter(diesel::dsl::not( + dsl::resource_id.eq_any( + snapshot_dsl::snapshot + .filter(snapshot_dsl::time_deleted.is_null()) + .select(snapshot_dsl::id), + ), + )) + .set(dsl::resource_deleted.eq(true)) + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + diesel::update(dsl::user_data_export) + .filter(dsl::resource_type.eq(UserDataExportResourceType::Image)) + .filter(diesel::dsl::not( + dsl::resource_id.eq_any( + image_dsl::image + .filter(image_dsl::time_deleted.is_null()) + .select(image_dsl::id), + ), + )) + .set(dsl::resource_deleted.eq(true)) + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let records: Vec = dsl::user_data_export + .filter(dsl::resource_deleted.eq(true)) + .select(UserDataExportRecord::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + for record in records { + changeset.delete_required.push(record); + } + + // Running the create saga is also required for any record in state + // Requested - do this after marking the records as deleted above so + // this query can filter on that. + + let mut records = dsl::user_data_export + .filter(dsl::state.eq(UserDataExportState::Requested)) + .filter(dsl::resource_deleted.eq(false)) + .select(UserDataExportRecord::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + changeset.create_required.append(&mut records); + + Ok(changeset) + } + + /// Mark any records where the Pantry address is not in the list of + /// in-service addresses as deleted. + /// + /// Returns how many records were marked for deletion. + pub async fn user_data_export_mark_expunged_deleted( + &self, + opctx: &OpContext, + in_service_pantries: Vec, + ) -> UpdateResult { + opctx.check_complex_operations_allowed()?; + + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + diesel::update(dsl::user_data_export) + .filter(diesel::dsl::not( + dsl::pantry_ip.eq_any(in_service_pantries), + )) + .set(dsl::resource_deleted.eq(true)) + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn user_data_export_mark_deleted( + &self, + id: UserDataExportUuid, + ) -> DeleteResult { + let conn = self.pool_connection_unauthorized().await?; + + use nexus_db_schema::schema::user_data_export::dsl; + + diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(to_db_typed_uuid(id))) + .set(dsl::resource_deleted.eq(true)) + .execute_async(&*conn) + .await + .map(|_| ()) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn set_user_data_export_requested_to_assigning( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Requested)) + .filter(dsl::operating_saga_id.is_null()) + .filter(dsl::generation.lt(generation)) + .filter(dsl::resource_deleted.eq(false)) + .set(( + dsl::state.eq(UserDataExportState::Assigning), + dsl::operating_saga_id.eq(operating_saga_id), + dsl::generation.eq(generation), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // Return Ok if this call set the fields + let ok_conditions = [ + record.state() == UserDataExportState::Assigning, + record.operating_saga_id() == Some(operating_saga_id), + record.generation() == generation, + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from requested to \ + assigning with operating saga id {} generation {}", + record, operating_saga_id, generation, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn unset_user_data_export_requested_to_assigning( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Assigning)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .filter(dsl::generation.eq(generation)) + .set(( + dsl::state.eq(UserDataExportState::Requested), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // If a previous call to this function "unlocked" the + // record, and something else has bumped the generation + // number, then return Ok + if record.generation() > generation { + Ok(()) + } else { + // Return Ok if this call set the fields + let ok_conditions = [ + record.state() == UserDataExportState::Requested, + record.operating_saga_id() == None, + record.generation() == generation, + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from assigning to \ + requested with operating saga id {:?} \ + generation {}", + record, operating_saga_id, generation, + ))) + } + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn set_user_data_export_assigning_to_live( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + pantry_address: SocketAddrV6, + volume_id: VolumeUuid, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Assigning)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .filter(dsl::generation.eq(generation)) + .set(( + dsl::state.eq(UserDataExportState::Live), + dsl::operating_saga_id.eq(Option::::None), + dsl::pantry_ip.eq(ipv6::Ipv6Addr::from(pantry_address.ip())), + dsl::pantry_port.eq(SqlU16::from(pantry_address.port())), + dsl::volume_id.eq(to_db_typed_uuid(volume_id)), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // If a previous call to this function "unlocked" the + // record, and something else has bumped the generation + // number, then return Ok + if record.generation() > generation { + Ok(()) + } else { + // Return Ok if this call set the fields + let ok_conditions = [ + record.state() == UserDataExportState::Live, + record.operating_saga_id() == None, + record.pantry_address() == Some(pantry_address), + record.volume_id() == Some(volume_id), + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from assigning to \ + live with operating saga id {} generation {} \ + pantry_address {} volume {}", + record, + operating_saga_id, + generation, + pantry_address, + volume_id, + ))) + } + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn set_user_data_export_live_to_deleting( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Live)) + .filter(dsl::operating_saga_id.is_null()) + .filter(dsl::generation.lt(generation)) + .filter(dsl::resource_deleted.eq(true)) + .set(( + dsl::state.eq(UserDataExportState::Deleting), + dsl::operating_saga_id.eq(operating_saga_id), + dsl::generation.eq(generation), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // Return Ok if this call set the fields + let ok_conditions = [ + record.state() == UserDataExportState::Deleting, + record.operating_saga_id() == Some(operating_saga_id), + record.generation() == generation, + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from live to \ + deleting with operating saga id {} generation {}", + record, operating_saga_id, generation, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn unset_user_data_export_live_to_deleting( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Deleting)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .filter(dsl::generation.eq(generation)) + .filter(dsl::resource_deleted.eq(true)) + .set(( + dsl::state.eq(UserDataExportState::Live), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // If a previous call to this function "unlocked" the + // record, and something else has bumped the generation + // number, then return Ok + if record.generation() > generation { + Ok(()) + } else { + // Return Ok if this call set the fields + let ok_conditions = [ + record.state() == UserDataExportState::Live, + record.operating_saga_id() == None, + record.generation() == generation, + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from deleting to \ + live with operating saga id {:?} generation {}", + record, operating_saga_id, generation, + ))) + } + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn set_user_data_export_deleting_to_deleted( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + operating_saga_id: Uuid, + generation: i64, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Deleting)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .filter(dsl::generation.eq(generation)) + .filter(dsl::resource_deleted.eq(true)) + .set(( + dsl::state.eq(UserDataExportState::Deleted), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // Return Ok if this call set the fields - do not need to + // check if the generation number bumped here, as no + // saga will start from the Deleted state + let ok_conditions = [ + record.state() == UserDataExportState::Deleted, + record.operating_saga_id() == None, + record.generation() == generation, + ]; + + if ok_conditions.into_iter().all(|v| v) { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from deleting to \ + deleted with operating saga id {:?} generation {}", + record, operating_saga_id, generation, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + pub async fn set_user_data_export_requested_to_deleted( + &self, + opctx: &OpContext, + id: UserDataExportUuid, + ) -> Result<(), Error> { + use nexus_db_schema::schema::user_data_export::dsl; + + // Otherwise, need to impl QueryFragment for TypedUuid<_>! + let untyped_id: Uuid = id.into_untyped_uuid(); + + let updated = diesel::update(dsl::user_data_export) + .filter(dsl::id.eq(untyped_id)) + .filter(dsl::state.eq(UserDataExportState::Requested)) + .filter(dsl::operating_saga_id.is_null()) + .filter(dsl::resource_deleted.eq(true)) + .set(dsl::state.eq(UserDataExportState::Deleted)) + .check_if_exists::(untyped_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + // Return Ok if this call updated the record + if record.state() == UserDataExportState::Deleted { + Ok(()) + } else { + Err(Error::conflict(format!( + "failed to transition {:?} from requested to \ + deleted", + record, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::authz; + use crate::db::model::SnapshotState; + + use crate::db::pub_test_utils::TestDatabase; + use crate::db::pub_test_utils::helpers::create_project; + use crate::db::pub_test_utils::helpers::create_project_image; + use crate::db::pub_test_utils::helpers::create_project_snapshot; + + use nexus_db_lookup::LookupPath; + + use omicron_test_utils::dev; + use omicron_uuid_kinds::VolumeUuid; + + use std::collections::BTreeSet; + use std::net::Ipv6Addr; + + const PROJECT_NAME: &str = "bobs-barrel-of-bits"; + const LARGE_NUMBER_OF_ROWS: usize = 3000; + + #[tokio::test] + async fn test_resource_id_collision() { + let logctx = dev::test_setup_log("test_resource_id_collision"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + "snap", + ) + .await; + + datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap_err(); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that an empty changeset is returned when there are no records. + #[tokio::test] + async fn test_changeset_nothing_noop() { + let logctx = dev::test_setup_log("test_changeset_nothing_noop"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that an empty changeset is returned when snapshot and image + /// records have an associated user data export object in state Live + /// already. + #[tokio::test] + async fn test_changeset_noop() { + let logctx = dev::test_setup_log("test_changeset_noop"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + "snap", + ) + .await; + + let record = datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + let operating_saga_id = Uuid::new_v4(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + 1, + ) + .await + .unwrap(); + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + 1, + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0), + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + + let image = + create_project_image(&opctx, &datastore, &authz_project, "image") + .await; + + let record = datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.id(), + ) + .await + .unwrap(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + 1, + ) + .await + .unwrap(); + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + 1, + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0), + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that adding a record is required if a snapshot does not have an + /// associated user data export record. + #[tokio::test] + async fn test_changeset_create_snapshot() { + let logctx = dev::test_setup_log("test_changeset_create_snapshot"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + "snap", + ) + .await; + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert_eq!(changeset.request_required.len(), 1); + assert_eq!( + changeset.request_required[0], + UserDataExportResource::Snapshot { id: snapshot.id() } + ); + assert_eq!(changeset.create_required.len(), 0); + assert!(changeset.delete_required.is_empty()); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that adding a record is required if an image does not have an + /// associated user data export record. + #[tokio::test] + async fn test_changeset_create_image() { + let logctx = dev::test_setup_log("test_changeset_create_image"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let image = + create_project_image(&opctx, &datastore, &authz_project, "image") + .await; + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert_eq!(changeset.request_required.len(), 1); + assert_eq!( + changeset.request_required[0], + UserDataExportResource::Image { id: image.id() } + ); + assert_eq!(changeset.create_required.len(), 0); + assert!(changeset.delete_required.is_empty()); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that deletion of the associated user data export object is + /// required when the snapshot is deleted. + #[tokio::test] + async fn test_changeset_delete_snapshot() { + let logctx = dev::test_setup_log("test_changeset_delete_snapshot"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + "snap", + ) + .await; + + let (.., authz_snapshot) = LookupPath::new(&opctx, datastore) + .snapshot_id(snapshot.id()) + .lookup_for(authz::Action::Read) + .await + .unwrap(); + + datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + datastore + .project_delete_snapshot( + &opctx, + &authz_snapshot, + &snapshot, + vec![SnapshotState::Creating], + ) + .await + .unwrap(); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), 1); + assert_eq!( + changeset.delete_required[0].resource(), + UserDataExportResource::Snapshot { id: snapshot.id() } + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that deletion of the associated user data export object is + /// required when the image is deleted. + #[tokio::test] + async fn test_changeset_delete_image() { + let logctx = dev::test_setup_log("test_changeset_delete_image"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let image = + create_project_image(&opctx, &datastore, &authz_project, "image") + .await; + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.id(), + ) + .await + .unwrap(); + + let (.., authz_image, db_image) = LookupPath::new(&opctx, datastore) + .project_image_id(image.id()) + .fetch_for(authz::Action::Read) + .await + .unwrap(); + + datastore + .project_image_delete(&opctx, &authz_image, db_image) + .await + .unwrap(); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), 1); + assert_eq!( + changeset.delete_required[0].resource(), + UserDataExportResource::Image { id: image.id() } + ); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert the DB queries can deal with a large number of rows when finding + /// records to create. + #[tokio::test] + async fn test_changeset_create_large() { + let logctx = dev::test_setup_log("test_changeset_create_large"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let mut created_snapshot_ids: BTreeSet = BTreeSet::default(); + + for i in 0..LARGE_NUMBER_OF_ROWS { + let snapshot_string = format!("snap{i}"); + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + &snapshot_string, + ) + .await; + + created_snapshot_ids.insert(snapshot.id()); + } + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert_eq!(changeset.request_required.len(), LARGE_NUMBER_OF_ROWS); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + let mut changeset_snapshot_ids: BTreeSet = BTreeSet::default(); + + for snapshot in &changeset.request_required { + let UserDataExportResource::Snapshot { id } = snapshot else { + panic!("wrong changeset resource"); + }; + + changeset_snapshot_ids.insert(*id); + } + + assert_eq!(created_snapshot_ids, changeset_snapshot_ids); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert the DB queries can deal with a large number of rows when there's + /// nothing to do + #[tokio::test] + async fn test_changeset_noop_large() { + let logctx = dev::test_setup_log("test_changeset_noop_large"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + for i in 0..LARGE_NUMBER_OF_ROWS { + let snapshot_string = format!("snap{i}"); + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + &snapshot_string, + ) + .await; + + let record = datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + let operating_saga_id = Uuid::new_v4(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + 1, + ) + .await + .unwrap(); + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + 1, + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0), + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + } + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert the DB queries can deal with a large number of rows when finding + /// records to delete + #[tokio::test] + async fn test_changeset_delete_large() { + let logctx = dev::test_setup_log("test_changeset_delete_large"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let mut created_snapshot_ids: BTreeSet = BTreeSet::default(); + + for i in 0..LARGE_NUMBER_OF_ROWS { + let snapshot_string = format!("snap{i}"); + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + &snapshot_string, + ) + .await; + + let (.., authz_snapshot) = LookupPath::new(&opctx, datastore) + .snapshot_id(snapshot.id()) + .lookup_for(authz::Action::Read) + .await + .unwrap(); + + datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + datastore + .project_delete_snapshot( + &opctx, + &authz_snapshot, + &snapshot, + vec![SnapshotState::Creating], + ) + .await + .unwrap(); + + created_snapshot_ids.insert(snapshot.id()); + } + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), LARGE_NUMBER_OF_ROWS); + + let mut changeset_snapshot_ids: BTreeSet = BTreeSet::default(); + + for record in &changeset.delete_required { + let UserDataExportResource::Snapshot { id } = record.resource() + else { + panic!("wrong changeset resource"); + }; + + changeset_snapshot_ids.insert(id); + } + + assert_eq!(created_snapshot_ids, changeset_snapshot_ids); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Distribute user data export objects to a number of Pantries, then + /// simulate one of those being expunged, and validate that the affected + /// records will be marked for deletion. + #[tokio::test] + async fn test_delete_records_for_expunged_pantries() { + let logctx = + dev::test_setup_log("test_delete_records_for_expunged_pantries"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let pantry_ips = [ + SocketAddrV6::new( + Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x1, 0x0), + 0, + 0, + 0, + ), + SocketAddrV6::new( + Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x3, 0x0), + 0, + 0, + 0, + ), + SocketAddrV6::new( + Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x5, 0x0), + 0, + 0, + 0, + ), + ]; + + let operating_saga_id = Uuid::new_v4(); + let generation: i64 = 1; + + for i in 0..LARGE_NUMBER_OF_ROWS { + let snapshot_string = format!("snap{i}"); + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + &snapshot_string, + ) + .await; + + let record = datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + generation, + ) + .await + .unwrap(); + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + generation, + pantry_ips[i % 3], + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + } + + let in_service_pantries = vec![ + Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x1, 0x0) + .into(), + Ipv6Addr::new(0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x5, 0x0) + .into(), + ]; + + datastore + .user_data_export_mark_expunged_deleted(&opctx, in_service_pantries) + .await + .unwrap(); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), LARGE_NUMBER_OF_ROWS / 3); + + for record in &changeset.delete_required { + assert_eq!( + record.pantry_address(), + Some(SocketAddrV6::new( + Ipv6Addr::new( + 0xfd00, 0x1122, 0x3344, 0x0, 0x0, 0x0, 0x3, 0x0 + ), + 0, + 0, + 0, + )), + ); + } + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_cannot_transition_to_assigning_if_deleted() { + let logctx = dev::test_setup_log( + "test_cannot_transition_to_assigning_if_deleted", + ); + + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let image = + create_project_image(&opctx, &datastore, &authz_project, "image") + .await; + + let id = UserDataExportUuid::new_v4(); + + let record = datastore + .user_data_export_create_for_image(&opctx, id, image.identity.id) + .await + .unwrap(); + + let (.., authz_image, db_image) = LookupPath::new(&opctx, datastore) + .project_image_id(image.id()) + .fetch_for(authz::Action::Read) + .await + .unwrap(); + + datastore + .project_image_delete(&opctx, &authz_image, db_image) + .await + .unwrap(); + + // Running the changeset computation will set the user data export + // record's resource_deleted field to true + + let _changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + + // This shouldn't work anymore + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + Uuid::new_v4(), + 1, + ) + .await + .unwrap_err(); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that multiple user data export objects can be created, but not be + /// Live at the same time. + #[tokio::test] + async fn test_user_data_export_duplication() { + let logctx = dev::test_setup_log("test_user_data_export_duplication"); + + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let image = + create_project_image(&opctx, &datastore, &authz_project, "image") + .await; + + // The computed changeset should include this image, it doesn't have a + // record yet. + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert_eq!( + &changeset.request_required, + &[UserDataExportResource::Image { id: image.id() }], + ); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + // First, create a user data export object for that image. + + let id = UserDataExportUuid::new_v4(); + + let record = datastore + .user_data_export_create_for_image(&opctx, id, image.identity.id) + .await + .unwrap(); + + // The computed changeset should report that the create saga is + // required. + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert!(changeset.request_required.is_empty()); + assert_eq!(changeset.create_required.len(), 1); + assert_eq!(changeset.create_required[0].id(), record.id()); + assert!(changeset.delete_required.is_empty()); + + // Assert that another record cannot be created for the same image yet. + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.identity.id, + ) + .await + .unwrap_err(); + + // Transition the record to Assigning and assert another record cannot + // be created yet. + + let operating_saga_id = Uuid::new_v4(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + 1, + ) + .await + .unwrap(); + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.identity.id, + ) + .await + .unwrap_err(); + + // The changeset should now be empty. + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + // Transition to Live, same test. + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + 1, + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0), + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.identity.id, + ) + .await + .unwrap_err(); + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert!(changeset.delete_required.is_empty()); + + // Mark the record as deleted. + + datastore.user_data_export_mark_deleted(record.id()).await.unwrap(); + + // The changeset should now show that the record needs to have the + // associated delete saga run. + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert!(changeset.request_required.is_empty()); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), 1); + assert_eq!(changeset.delete_required[0].id(), record.id()); + + // Transition to Deleting, same test. + + datastore + .set_user_data_export_live_to_deleting( + &opctx, + record.id(), + operating_saga_id, + 2, + ) + .await + .unwrap(); + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.identity.id, + ) + .await + .unwrap_err(); + + // Transition to Deleted + + datastore + .set_user_data_export_deleting_to_deleted( + &opctx, + record.id(), + operating_saga_id, + 2, + ) + .await + .unwrap(); + + // The changeset should now show that a record for this iamge needs to + // be created, along with the old record still needing the associated + // delete saga. + + let changeset = + datastore.compute_user_data_export_changeset(&opctx).await.unwrap(); + assert_eq!( + &changeset.request_required, + &[UserDataExportResource::Image { id: image.id() }], + ); + assert!(changeset.create_required.is_empty()); + assert_eq!(changeset.delete_required.len(), 1); + assert_eq!(changeset.delete_required[0].id(), record.id()); + + // Now it should work. + + datastore + .user_data_export_create_for_image( + &opctx, + UserDataExportUuid::new_v4(), + image.identity.id, + ) + .await + .unwrap(); + + db.terminate().await; + logctx.cleanup_successful(); + } + + /// Assert that a record can be marked for deletion in the Assigning state + /// and it will still transition to Live. + #[tokio::test] + async fn test_marked_for_delete_during_assigning() { + let logctx = + dev::test_setup_log("test_marked_for_delete_during_assigning"); + let log = logctx.log.new(o!()); + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let (authz_project, _) = + create_project(&opctx, &datastore, PROJECT_NAME).await; + + let snapshot = create_project_snapshot( + &opctx, + &datastore, + &authz_project, + Uuid::new_v4(), + "snap", + ) + .await; + + let record = datastore + .user_data_export_create_for_snapshot( + &opctx, + UserDataExportUuid::new_v4(), + snapshot.id(), + ) + .await + .unwrap(); + + let operating_saga_id = Uuid::new_v4(); + + datastore + .set_user_data_export_requested_to_assigning( + &opctx, + record.id(), + operating_saga_id, + 1, + ) + .await + .unwrap(); + + // Note I'm not sure anything can do this today! But this test is a + // safe guard against a future where that could happen. + datastore.user_data_export_mark_deleted(record.id()).await.unwrap(); + + datastore + .set_user_data_export_assigning_to_live( + &opctx, + record.id(), + operating_saga_id, + 1, + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0), + VolumeUuid::new_v4(), + ) + .await + .unwrap(); + + db.terminate().await; + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/pub_test_utils/helpers.rs b/nexus/db-queries/src/db/pub_test_utils/helpers.rs index 0ebc3af2f58..c81f6440d0a 100644 --- a/nexus/db-queries/src/db/pub_test_utils/helpers.rs +++ b/nexus/db-queries/src/db/pub_test_utils/helpers.rs @@ -13,22 +13,30 @@ use anyhow::Result; use chrono::Utc; use nexus_db_model::AffinityGroup; use nexus_db_model::AntiAffinityGroup; +use nexus_db_model::BlockSize; use nexus_db_model::ByteCount; use nexus_db_model::Generation; +use nexus_db_model::Image; use nexus_db_model::Instance; use nexus_db_model::InstanceRuntimeState; use nexus_db_model::InstanceState; use nexus_db_model::Project; +use nexus_db_model::ProjectImage; +use nexus_db_model::ProjectImageIdentity; use nexus_db_model::Resources; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; +use nexus_db_model::Snapshot; +use nexus_db_model::SnapshotIdentity; +use nexus_db_model::SnapshotState; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VolumeUuid; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use std::str::FromStr; @@ -395,3 +403,85 @@ pub async fn create_anti_affinity_group_member( .await?; Ok(()) } + +pub async fn create_project_snapshot( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + disk_id: Uuid, + name: &str, +) -> Snapshot { + datastore + .project_ensure_snapshot( + &opctx, + &authz_project, + Snapshot { + identity: SnapshotIdentity { + id: Uuid::new_v4(), + name: external::Name::try_from(name.to_string()) + .unwrap() + .into(), + description: "snapshot".into(), + + time_created: Utc::now(), + time_modified: Utc::now(), + time_deleted: None, + }, + + project_id: authz_project.id(), + disk_id, + volume_id: VolumeUuid::new_v4().into(), + destination_volume_id: VolumeUuid::new_v4().into(), + + gen: Generation::new(), + state: SnapshotState::Creating, + block_size: BlockSize::AdvancedFormat, + + size: external::ByteCount::from_gibibytes_u32(2).into(), + }, + ) + .await + .unwrap() +} + +pub async fn create_project_image( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + name: &str, +) -> Image { + let authz_silo = opctx.authn.silo_required().unwrap(); + + datastore + .project_image_create( + &opctx, + &authz_project, + ProjectImage { + identity: ProjectImageIdentity { + id: Uuid::new_v4(), + name: external::Name::try_from(name.to_string()) + .unwrap() + .into(), + description: "description".into(), + + time_created: Utc::now(), + time_modified: Utc::now(), + time_deleted: None, + }, + + silo_id: authz_silo.id(), + project_id: authz_project.id(), + volume_id: VolumeUuid::new_v4().into(), + + url: None, + os: String::from("debian"), + version: String::from("12"), + digest: None, + block_size: BlockSize::Iso, + + size: external::ByteCount::from_gibibytes_u32(1).into(), + }, + ) + .await + .unwrap() +} diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index e4391e3be70..2ee2f3ff6a0 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -83,6 +83,8 @@ define_enums! { TargetReleaseSourceEnum => "target_release_source", UpstairsRepairNotificationTypeEnum => "upstairs_repair_notification_type", UpstairsRepairTypeEnum => "upstairs_repair_type", + UserDataExportResourceTypeEnum => "user_data_export_resource_type", + UserDataExportStateEnum => "user_data_export_state", UserProvisionTypeEnum => "user_provision_type", VmmStateEnum => "vmm_state", VolumeResourceUsageTypeEnum => "volume_resource_usage_type", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index a86bddb89c1..3d8f2d2fd7f 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -2573,3 +2573,22 @@ table! { report -> Jsonb, } } + +table! { + user_data_export (id) { + id -> Uuid, + + state -> crate::enums::UserDataExportStateEnum, + operating_saga_id -> Nullable, + generation -> Int8, + + resource_id -> Uuid, + resource_type -> crate::enums::UserDataExportResourceTypeEnum, + resource_deleted -> Bool, + + pantry_ip -> Nullable, + pantry_port -> Nullable, + volume_id -> Nullable, + } +} +allow_tables_to_appear_in_same_query!(user_data_export, snapshot, image); diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index aac95a375db..b922fd2aa0c 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Disks and snapshots +//! Disks use crate::app::sagas; use crate::external_api::params; diff --git a/nexus/src/app/snapshot.rs b/nexus/src/app/snapshot.rs index 5b8c59040a6..6461366b85c 100644 --- a/nexus/src/app/snapshot.rs +++ b/nexus/src/app/snapshot.rs @@ -1,3 +1,9 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Snapshots + use std::sync::Arc; use nexus_db_lookup::LookupPath; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 0373ea7e771..3d35ceffc11 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -5996,6 +5996,51 @@ ON omicron.public.webhook_delivery_attempt ( rx_id ); +CREATE TYPE IF NOT EXISTS omicron.public.user_data_export_resource_type AS ENUM ( + 'snapshot', + 'image' +); + +CREATE TYPE IF NOT EXISTS omicron.public.user_data_export_state AS ENUM ( + 'requested', + 'assigning', + 'live', + 'deleting', + 'deleted' +); + +/* + * This table contains a record when a snapshot is being exported. + */ +CREATE TABLE IF NOT EXISTS omicron.public.user_data_export ( + id UUID PRIMARY KEY, + + state omicron.public.user_data_export_state NOT NULL, + operating_saga_id UUID, + generation INT8 NOT NULL, + + resource_id UUID NOT NULL, + resource_type omicron.public.user_data_export_resource_type NOT NULL, + resource_deleted BOOL NOT NULL, + + pantry_ip INET, + pantry_port INT4 CHECK (pantry_port BETWEEN 0 AND 65535), + volume_id UUID +); + +CREATE INDEX IF NOT EXISTS lookup_export_by_resource_type +ON omicron.public.user_data_export (resource_type); + +CREATE UNIQUE INDEX IF NOT EXISTS one_export_record_per_resource +ON omicron.public.user_data_export (resource_id) +WHERE state != 'deleted'; + +CREATE INDEX IF NOT EXISTS lookup_export_by_volume +ON omicron.public.user_data_export (volume_id); + +CREATE INDEX IF NOT EXISTS lookup_export_by_state +ON omicron.public.user_data_export (state); + /* * Ereports * @@ -6175,7 +6220,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '156.0.0', NULL) + (TRUE, NOW(), NOW(), '157.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/user-data-export/up01.sql b/schema/crdb/user-data-export/up01.sql new file mode 100644 index 00000000000..e9b0b8d1223 --- /dev/null +++ b/schema/crdb/user-data-export/up01.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.user_data_export_resource_type AS ENUM ( + 'snapshot', + 'image' +); diff --git a/schema/crdb/user-data-export/up02.sql b/schema/crdb/user-data-export/up02.sql new file mode 100644 index 00000000000..392814f869f --- /dev/null +++ b/schema/crdb/user-data-export/up02.sql @@ -0,0 +1,7 @@ +CREATE TYPE IF NOT EXISTS omicron.public.user_data_export_state AS ENUM ( + 'requested', + 'assigning', + 'live', + 'deleting', + 'deleted' +); diff --git a/schema/crdb/user-data-export/up03.sql b/schema/crdb/user-data-export/up03.sql new file mode 100644 index 00000000000..686f09eb5d5 --- /dev/null +++ b/schema/crdb/user-data-export/up03.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS omicron.public.user_data_export ( + id UUID PRIMARY KEY, + + state omicron.public.user_data_export_state NOT NULL, + operating_saga_id UUID, + generation INT8 NOT NULL, + + resource_id UUID NOT NULL, + resource_type omicron.public.user_data_export_resource_type NOT NULL, + resource_deleted BOOL NOT NULL, + + pantry_ip INET, + pantry_port INT4 CHECK (pantry_port BETWEEN 0 AND 65535), + volume_id UUID +); diff --git a/schema/crdb/user-data-export/up04.sql b/schema/crdb/user-data-export/up04.sql new file mode 100644 index 00000000000..119d4c76148 --- /dev/null +++ b/schema/crdb/user-data-export/up04.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS lookup_export_by_resource_type +ON omicron.public.user_data_export (resource_type); diff --git a/schema/crdb/user-data-export/up05.sql b/schema/crdb/user-data-export/up05.sql new file mode 100644 index 00000000000..8214d90b8ce --- /dev/null +++ b/schema/crdb/user-data-export/up05.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS one_export_record_per_resource +ON omicron.public.user_data_export (resource_id) +WHERE state != 'deleted'; diff --git a/schema/crdb/user-data-export/up06.sql b/schema/crdb/user-data-export/up06.sql new file mode 100644 index 00000000000..23096dbcc9e --- /dev/null +++ b/schema/crdb/user-data-export/up06.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS lookup_export_by_volume +ON omicron.public.user_data_export (volume_id); diff --git a/schema/crdb/user-data-export/up07.sql b/schema/crdb/user-data-export/up07.sql new file mode 100644 index 00000000000..4ecefcec41a --- /dev/null +++ b/schema/crdb/user-data-export/up07.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS lookup_export_by_state +ON omicron.public.user_data_export (state); diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index b5ad56f7f45..cdcba01918c 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -92,6 +92,7 @@ impl_typed_uuid_kind! { Upstairs => "upstairs", UpstairsRepair => "upstairs_repair", UpstairsSession => "upstairs_session", + UserDataExport => "user_data_export", Vnic => "vnic", Volume => "volume", WebhookDelivery => "webhook_delivery",