diff --git a/backend/app/api/demo.go b/backend/app/api/demo.go
index 10208dec0..00086b24b 100644
--- a/backend/app/api/demo.go
+++ b/backend/app/api/demo.go
@@ -18,7 +18,8 @@ const (
demoPasswordEnv = "HBOX_DEMO_PASSWORD"
// demoPasswordDefault is the password used when demo mode runs outside
// production. Public knowledge — never used unless cfg.Mode is development.
- demoPasswordDefault = "demo"
+ // Must satisfy services.PasswordMinLength or registration will reject it.
+ demoPasswordDefault = "demodemo"
// demoPasswordMinLength is the minimum acceptable length for a demo
// password supplied via demoPasswordEnv in production mode.
demoPasswordMinLength = 12
diff --git a/backend/app/api/handlers/v1/controller.go b/backend/app/api/handlers/v1/controller.go
index f9c5d3af3..36ade3778 100644
--- a/backend/app/api/handlers/v1/controller.go
+++ b/backend/app/api/handlers/v1/controller.go
@@ -41,6 +41,12 @@ func WithMaxUploadSize(maxUploadSize int64) func(*V1Controller) {
}
}
+func WithMaxImportSize(maxImportSize int64) func(*V1Controller) {
+ return func(ctrl *V1Controller) {
+ ctrl.maxImportSize = maxImportSize
+ }
+}
+
func WithDemoStatus(demoStatus bool) func(*V1Controller) {
return func(ctrl *V1Controller) {
ctrl.isDemo = demoStatus
@@ -70,6 +76,7 @@ type V1Controller struct {
repo *repo.AllRepos
svc *services.AllServices
maxUploadSize int64
+ maxImportSize int64
isDemo bool
allowRegistration bool
bus *eventbus.EventBus
@@ -233,6 +240,8 @@ func (ctrl *V1Controller) HandleCacheWS() errchain.HandlerFunc {
ctrl.bus.Subscribe(eventbus.EventTagMutation, factory("tag.mutation"))
ctrl.bus.Subscribe(eventbus.EventEntityMutation, factory("entity.mutation"))
ctrl.bus.Subscribe(eventbus.EventUserMutation, factory("user.mutation"))
+ ctrl.bus.Subscribe(eventbus.EventExportMutation, factory("export.mutation"))
+ ctrl.bus.Subscribe(eventbus.EventImportMutation, factory("import.mutation"))
// Persistent asynchronous ticker that keeps all websocket connections alive with periodic pings.
go func() {
diff --git a/backend/app/api/handlers/v1/v1_ctrl_exports.go b/backend/app/api/handlers/v1/v1_ctrl_exports.go
new file mode 100644
index 000000000..4434f936f
--- /dev/null
+++ b/backend/app/api/handlers/v1/v1_ctrl_exports.go
@@ -0,0 +1,290 @@
+package v1
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/hay-kot/httpkit/errchain"
+ "github.com/hay-kot/httpkit/server"
+ "github.com/rs/zerolog/log"
+ "gocloud.dev/blob"
+
+ "github.com/sysadminsmedia/homebox/backend/internal/core/services"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/repo"
+ "github.com/sysadminsmedia/homebox/backend/internal/sys/validate"
+ "github.com/sysadminsmedia/homebox/backend/internal/web/adapters"
+)
+
+// HandleExportsList godoc
+//
+// @Summary List Collection Exports
+// @Description Returns export job rows for the caller's group, newest first.
+// @Tags Group
+// @Produce json
+// @Success 200 {object} Results[repo.ExportOut]
+// @Router /v1/group/exports [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleExportsList() errchain.HandlerFunc {
+ fn := func(r *http.Request) (Results[repo.ExportOut], error) {
+ ctx := services.NewContext(r.Context())
+ rows, err := ctrl.repo.Exports.ListByGroup(ctx, ctx.GID)
+ if err != nil {
+ return Results[repo.ExportOut]{}, err
+ }
+ return WrapResults(rows), nil
+ }
+
+ return adapters.Command(fn, http.StatusOK)
+}
+
+// HandleExportsCreate godoc
+//
+// @Summary Start a Collection Export
+// @Description Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.
+// @Tags Group
+// @Produce json
+// @Success 202 {object} repo.ExportOut
+// @Router /v1/group/exports [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleExportsCreate() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+ out, err := ctrl.svc.Exports.Enqueue(ctx, ctx.GID)
+ if err != nil {
+ log.Err(err).Msg("failed to enqueue export")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ return server.JSON(w, http.StatusAccepted, out)
+ }
+}
+
+// HandleExportGet godoc
+//
+// @Summary Get an Export
+// @Tags Group
+// @Produce json
+// @Param id path string true "Export ID"
+// @Success 200 {object} repo.ExportOut
+// @Router /v1/group/exports/{id} [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleExportGet() errchain.HandlerFunc {
+ fn := func(r *http.Request, id uuid.UUID) (repo.ExportOut, error) {
+ ctx := services.NewContext(r.Context())
+ return ctrl.repo.Exports.Get(ctx, ctx.GID, id)
+ }
+
+ return adapters.CommandID("id", fn, http.StatusOK)
+}
+
+// HandleExportDownload godoc
+//
+// @Summary Download an Export Artifact
+// @Tags Group
+// @Produce application/zip
+// @Param id path string true "Export ID"
+// @Success 200 {file} file
+// @Router /v1/group/exports/{id}/download [GET]
+// @Security Bearer
+func (ctrl *V1Controller) HandleExportDownload() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ ctx := services.NewContext(r.Context())
+ id, err := ctrl.routeID(r)
+ if err != nil {
+ return err
+ }
+ out, err := ctrl.repo.Exports.Get(ctx, ctx.GID, id)
+ if err != nil {
+ if ent.IsNotFound(err) {
+ return validate.NewRequestError(err, http.StatusNotFound)
+ }
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ if out.Status != "completed" || out.ArtifactPath == "" {
+ return validate.NewRequestError(errors.New("export not ready"), http.StatusConflict)
+ }
+ // Defence in depth: refuse to stream anything that doesn't live under the
+ // caller's group prefix. The repo Get above already enforces ownership;
+ // this catches a stale row whose artifact_path was tampered with.
+ expectedPrefix := ctx.GID.String() + "/exports/"
+ if !strings.HasPrefix(out.ArtifactPath, expectedPrefix) {
+ return validate.NewRequestError(errors.New("artifact outside group prefix"), http.StatusForbidden)
+ }
+
+ bucket, err := blob.OpenBucket(r.Context(), ctrl.repo.Attachments.GetConnString())
+ if err != nil {
+ log.Err(err).Msg("export download: open bucket")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ defer func() { _ = bucket.Close() }()
+
+ reader, err := bucket.NewReader(r.Context(), ctrl.repo.Attachments.GetFullPath(out.ArtifactPath), nil)
+ if err != nil {
+ log.Err(err).Str("artifact_path", out.ArtifactPath).Msg("export download: open reader")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ defer func() { _ = reader.Close() }()
+
+ w.Header().Set("Content-Type", "application/zip")
+ w.Header().Set("Content-Disposition",
+ fmt.Sprintf(`attachment; filename="homebox-export-%s.zip"`, out.ID.String()))
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", out.SizeBytes))
+ w.WriteHeader(http.StatusOK)
+ _, err = io.Copy(w, reader)
+ return err
+ }
+}
+
+// HandleExportDelete godoc
+//
+// @Summary Delete an Export
+// @Description Deletes the export row and its blob artifact.
+// @Tags Group
+// @Param id path string true "Export ID"
+// @Success 204
+// @Router /v1/group/exports/{id} [DELETE]
+// @Security Bearer
+func (ctrl *V1Controller) HandleExportDelete() errchain.HandlerFunc {
+ fn := func(r *http.Request, id uuid.UUID) (any, error) {
+ ctx := services.NewContext(r.Context())
+ out, err := ctrl.repo.Exports.Get(ctx, ctx.GID, id)
+ if err != nil {
+ // Idempotent: a missing row is already in the desired state.
+ // Swallow here so the adapter writes 204 instead of letting the
+ // error middleware translate ent.IsNotFound into a 404.
+ if ent.IsNotFound(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ if out.ArtifactPath != "" {
+ // Defence in depth: only touch blobs that live under the caller's
+ // group prefix. The repo Get above already enforces ownership; this
+ // catches a stale row whose artifact_path was tampered with, and
+ // path.Clean collapses any traversal segments before the prefix
+ // check so "/exports/../../other" can't slip through.
+ cleanPath := path.Clean(out.ArtifactPath)
+ expectedPrefix := ctx.GID.String() + "/exports/"
+ if strings.HasPrefix(cleanPath, expectedPrefix) {
+ bucket, err := blob.OpenBucket(r.Context(), ctrl.repo.Attachments.GetConnString())
+ if err == nil {
+ _ = bucket.Delete(r.Context(), ctrl.repo.Attachments.GetFullPath(cleanPath))
+ _ = bucket.Close()
+ }
+ }
+ }
+ if _, err := ctrl.repo.Exports.Delete(ctx, ctx.GID, id); err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ return adapters.CommandID("id", fn, http.StatusNoContent)
+}
+
+// HandleCollectionImport godoc
+//
+// @Summary Import a Collection Zip
+// @Description Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.
+// @Tags Group
+// @Accept multipart/form-data
+// @Produce json
+// @Param file formData file true "Export zip"
+// @Success 202 {object} repo.ExportOut
+// @Router /v1/group/import [POST]
+// @Security Bearer
+func (ctrl *V1Controller) HandleCollectionImport() errchain.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) error {
+ if ctrl.isDemo {
+ return validate.NewRequestError(errors.New("import is not allowed in demo mode"), http.StatusForbidden)
+ }
+
+ ctx := services.NewContext(r.Context())
+
+ isOwner, err := ctrl.repo.Groups.IsOwnerOf(ctx, ctx.UID, ctx.GID)
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ if !isOwner {
+ return validate.NewRequestError(errors.New("only group owners can import"), http.StatusForbidden)
+ }
+
+ // Precondition: no items yet. Default seeded locations/tags are fine —
+ // the worker wipes them as part of the restore. Front-loading the
+ // check here gives instant 409 feedback for clearly-bad attempts.
+ ready, err := ctrl.svc.Exports.IsGroupReadyForImport(r.Context(), ctx.GID)
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ if !ready {
+ return validate.NewRequestError(
+ errors.New("import requires a collection with no user-created items, tags, templates, notifiers, or custom types"),
+ http.StatusConflict)
+ }
+
+ // maxImportSize is in MB and applies to the whole request body via the
+ // path-aware middleware; here we pass it to ParseMultipartForm as the
+ // memory-vs-disk threshold so larger archives spool gracefully.
+ if err := r.ParseMultipartForm(ctrl.maxImportSize << 20); err != nil {
+ log.Err(err).Msg("import: parse multipart")
+ return validate.NewRequestError(err, http.StatusBadRequest)
+ }
+ // Remove any spooled temp files the multipart parser may have created.
+ // Registered before file.Close so the close (LIFO) runs first — on
+ // Windows os.Remove fails while the handle is still open.
+ defer func() {
+ if r.MultipartForm != nil {
+ _ = r.MultipartForm.RemoveAll()
+ }
+ }()
+ file, _, err := r.FormFile("file")
+ if err != nil {
+ return validate.NewRequestError(err, http.StatusBadRequest)
+ }
+ defer func() { _ = file.Close() }()
+
+ // Stage to {gid}/imports/{uuid}.zip in blob storage. Using the gid
+ // prefix makes it impossible for one tenant's uploads to collide with
+ // another's, and the worker enforces the same prefix as a safety net.
+ uploadID := uuid.New()
+ uploadKey := fmt.Sprintf("%s/imports/%s.zip", ctx.GID.String(), uploadID.String())
+
+ bucket, err := blob.OpenBucket(r.Context(), ctrl.repo.Attachments.GetConnString())
+ if err != nil {
+ log.Err(err).Msg("import: open bucket")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ defer func() { _ = bucket.Close() }()
+
+ bw, err := bucket.NewWriter(r.Context(), ctrl.repo.Attachments.GetFullPath(uploadKey),
+ &blob.WriterOptions{ContentType: "application/zip"})
+ if err != nil {
+ log.Err(err).Msg("import: open writer")
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ // io.Copy returns the staged byte count; we record it on the import
+ // row so the UI can render "X MB queued" before the worker starts.
+ uploadSize, err := io.Copy(bw, file)
+ if err != nil {
+ _ = bw.Close()
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+ if err := bw.Close(); err != nil {
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+
+ row, err := ctrl.svc.Exports.EnqueueImport(r.Context(), ctx.GID, ctx.UID, uploadKey, uploadSize)
+ if err != nil {
+ // Best-effort cleanup of the staged upload if we couldn't enqueue.
+ _ = bucket.Delete(r.Context(), ctrl.repo.Attachments.GetFullPath(uploadKey))
+ return validate.NewRequestError(err, http.StatusInternalServerError)
+ }
+
+ return server.JSON(w, http.StatusAccepted, row)
+ }
+}
diff --git a/backend/app/api/main.go b/backend/app/api/main.go
index 690a64b97..30838a944 100644
--- a/backend/app/api/main.go
+++ b/backend/app/api/main.go
@@ -149,7 +149,7 @@ func run(cfg *config.Config) error {
// =========================================================================
// Initialize Database & Repos
- c, err := setupDatabase(cfg, otelProvider)
+ c, sqlDialect, err := setupDatabase(cfg, otelProvider)
if err != nil {
return err
}
@@ -191,6 +191,7 @@ func run(cfg *config.Config) error {
services.WithAutoIncrementAssetID(cfg.Options.AutoIncrementAssetID),
services.WithCurrencies(currencyData),
services.WithNotifierConfig(&cfg.Notifier),
+ services.WithExportPlumbing(app.bus, app.db, cfg.Storage, cfg.Database.PubSubConnString, sqlDialect),
services.WithMailer(&app.mailer),
)
@@ -217,8 +218,13 @@ func run(cfg *config.Config) error {
middleware.RealIP,
mid.Logger(logger),
mid.SecurityHeaders(),
- // Restrict the max body size to the upload limit + 1MB (for overhead)
- mid.MaxBodySize(cfg.Web.MaxUploadSize+1),
+ // Restrict the max body size to the upload limit + 1MB (for overhead).
+ // Collection-import uploads carry the full inventory zip and have
+ // their own much larger cap; everything else falls through to the
+ // default.
+ mid.MaxBodySizeByPath(cfg.Web.MaxUploadSize+1, map[string]int64{
+ "/api/v1/group/import": cfg.Web.MaxImportSize + 1,
+ }),
middleware.Recoverer,
middleware.StripSlashes,
)
diff --git a/backend/app/api/recurring.go b/backend/app/api/recurring.go
index 7c7e2172f..a8b310561 100644
--- a/backend/app/api/recurring.go
+++ b/backend/app/api/recurring.go
@@ -11,6 +11,8 @@ import (
"github.com/rs/zerolog/log"
"github.com/sysadminsmedia/homebox/backend/internal/sys/config"
"github.com/sysadminsmedia/homebox/backend/pkgs/utils"
+ "gocloud.dev/blob"
+ "gocloud.dev/gcerrors"
"gocloud.dev/pubsub"
)
@@ -50,6 +52,10 @@ func registerRecurringTasks(app *app, cfg *config.Config, runner *graceful.Runne
}
}))
+ runner.AddPlugin(NewTask("purge-stale-exports", 24*time.Hour, func(ctx context.Context) {
+ purgeStaleExports(ctx, app)
+ }))
+
runner.AddPlugin(NewTask("send-notifications", time.Hour, func(ctx context.Context) {
now := time.Now()
if now.Hour() == 8 {
@@ -61,6 +67,43 @@ func registerRecurringTasks(app *app, cfg *config.Config, runner *graceful.Runne
}
}))
+ runner.AddFunc("collection-export-subscription", func(ctx context.Context) error {
+ return runJobSubscription(ctx, cfg, "collection_export", func(ctx context.Context, msg *pubsub.Message) {
+ gid, err := uuid.Parse(msg.Metadata["group_id"])
+ if err != nil {
+ log.Err(err).Str("group_id", msg.Metadata["group_id"]).Msg("export job: bad group_id")
+ return
+ }
+ exportID, err := uuid.Parse(msg.Metadata["export_id"])
+ if err != nil {
+ log.Err(err).Str("export_id", msg.Metadata["export_id"]).Msg("export job: bad export_id")
+ return
+ }
+ app.services.Exports.RunExport(ctx, exportID, gid)
+ })
+ })
+
+ runner.AddFunc("collection-import-subscription", func(ctx context.Context) error {
+ return runJobSubscription(ctx, cfg, "collection_import", func(ctx context.Context, msg *pubsub.Message) {
+ gid, err := uuid.Parse(msg.Metadata["group_id"])
+ if err != nil {
+ log.Err(err).Str("group_id", msg.Metadata["group_id"]).Msg("import job: bad group_id")
+ return
+ }
+ userID, err := uuid.Parse(msg.Metadata["user_id"])
+ if err != nil {
+ log.Err(err).Str("user_id", msg.Metadata["user_id"]).Msg("import job: bad user_id")
+ return
+ }
+ importID, err := uuid.Parse(msg.Metadata["import_id"])
+ if err != nil {
+ log.Err(err).Str("import_id", msg.Metadata["import_id"]).Msg("import job: bad import_id")
+ return
+ }
+ app.services.Exports.RunImport(ctx, gid, userID, importID)
+ })
+ })
+
if cfg.Thumbnail.Enabled {
runner.AddFunc("create-thumbnails-subscription", func(ctx context.Context) error {
pubsubString, err := utils.GenerateSubPubConn(cfg.Database.PubSubConnString, "thumbnails")
@@ -162,3 +205,100 @@ func registerRecurringTasks(app *app, cfg *config.Config, runner *graceful.Runne
cfg.Print()
}
}
+
+// runJobSubscription opens a pubsub topic+subscription pair for the given
+// topic name and runs handler for each received message. Mirrors the
+// thumbnail subscriber's lifecycle: shut down topic and subscription when
+// ctx ends; ack every message regardless of handler outcome (no redelivery).
+func runJobSubscription(ctx context.Context, cfg *config.Config, topicName string, handler func(context.Context, *pubsub.Message)) error {
+ conn, err := utils.GenerateSubPubConn(cfg.Database.PubSubConnString, topicName)
+ if err != nil {
+ log.Err(err).Str("topic", topicName).Msg("failed to generate pubsub connection string")
+ return err
+ }
+ topic, err := pubsub.OpenTopic(ctx, conn)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := topic.Shutdown(ctx); err != nil {
+ log.Err(err).Str("topic", topicName).Msg("failed to shutdown pubsub topic")
+ }
+ }()
+
+ sub, err := pubsub.OpenSubscription(ctx, conn)
+ if err != nil {
+ log.Err(err).Str("topic", topicName).Msg("failed to open pubsub subscription")
+ return err
+ }
+ defer func() {
+ if err := sub.Shutdown(ctx); err != nil {
+ log.Err(err).Str("topic", topicName).Msg("failed to shutdown pubsub subscription")
+ }
+ }()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ msg, err := sub.Receive(ctx)
+ if err != nil {
+ log.Err(err).Str("topic", topicName).Msg("failed to receive message from pubsub topic")
+ continue
+ }
+ if msg == nil {
+ continue
+ }
+ handler(ctx, msg)
+ msg.Ack()
+ }
+ }
+}
+
+// purgeStaleExports drops export rows and their blob artifacts older than a
+// week — long enough for users to re-download a backup, short enough to not
+// pile up. The blob is deleted before the row because the row holds the only
+// ArtifactPath pointer; dropping the row first would orphan the blob if the
+// bucket is unavailable. Failed rows stay so the next sweep retries.
+func purgeStaleExports(ctx context.Context, app *app) {
+ cutoff := time.Now().Add(-7 * 24 * time.Hour)
+ candidates, err := app.repos.Exports.ListOlderThan(ctx, cutoff)
+ if err != nil {
+ log.Err(err).Msg("failed to list stale exports")
+ return
+ }
+ if len(candidates) == 0 {
+ return
+ }
+ bucket, err := blob.OpenBucket(ctx, app.repos.Attachments.GetConnString())
+ if err != nil {
+ log.Err(err).Msg("export cleanup: failed to open bucket; deferring purge to next sweep")
+ return
+ }
+ defer func() { _ = bucket.Close() }()
+ purged := 0
+ for _, e := range candidates {
+ if e.ArtifactPath != "" {
+ err := bucket.Delete(ctx, app.repos.Attachments.GetFullPath(e.ArtifactPath))
+ if err != nil && gcerrors.Code(err) != gcerrors.NotFound {
+ log.Warn().Err(err).
+ Str("export_id", e.ID.String()).
+ Str("artifact_path", e.ArtifactPath).
+ Msg("export cleanup: blob delete failed; leaving row for next sweep")
+ continue
+ }
+ }
+ if _, err := app.repos.Exports.Delete(ctx, e.GroupID, e.ID); err != nil {
+ log.Warn().Err(err).
+ Str("export_id", e.ID.String()).
+ Msg("export cleanup: row delete failed; leaving for next sweep")
+ continue
+ }
+ purged++
+ }
+ log.Info().
+ Int("purged", purged).
+ Int("candidates", len(candidates)).
+ Msg("purged stale collection exports")
+}
diff --git a/backend/app/api/routes.go b/backend/app/api/routes.go
index 87ba33e4b..218546751 100644
--- a/backend/app/api/routes.go
+++ b/backend/app/api/routes.go
@@ -68,6 +68,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
a.bus,
a.conf,
v1.WithMaxUploadSize(a.conf.Web.MaxUploadSize),
+ v1.WithMaxImportSize(a.conf.Web.MaxImportSize),
v1.WithRegistration(a.conf.Options.AllowRegistration),
v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode
v1.WithURL(fmt.Sprintf("%s:%s", a.conf.Web.Host, a.conf.Web.Port)),
@@ -134,6 +135,14 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
r.Delete("/groups/invitations/{id}", chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsDelete(), userMW...))
r.Post("/groups/invitations/{id}", chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsAccept(), userMW...))
+ // Collection export/import (group-scoped)
+ r.Post("/group/exports", chain.ToHandlerFunc(v1Ctrl.HandleExportsCreate(), userMW...))
+ r.Get("/group/exports", chain.ToHandlerFunc(v1Ctrl.HandleExportsList(), userMW...))
+ r.Get("/group/exports/{id}", chain.ToHandlerFunc(v1Ctrl.HandleExportGet(), userMW...))
+ r.Get("/group/exports/{id}/download", chain.ToHandlerFunc(v1Ctrl.HandleExportDownload(), userMW...))
+ r.Delete("/group/exports/{id}", chain.ToHandlerFunc(v1Ctrl.HandleExportDelete(), userMW...))
+ r.Post("/group/import", chain.ToHandlerFunc(v1Ctrl.HandleCollectionImport(), userMW...))
+
r.Get("/groups/statistics", chain.ToHandlerFunc(v1Ctrl.HandleGroupStatistics(), userMW...))
r.Get("/groups/statistics/purchase-price", chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...))
r.Get("/groups/statistics/locations", chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLocations(), userMW...))
diff --git a/backend/app/api/setup.go b/backend/app/api/setup.go
index 3fb86190d..90437e23e 100644
--- a/backend/app/api/setup.go
+++ b/backend/app/api/setup.go
@@ -20,30 +20,30 @@ import (
// setupDatabase prepares the storage directory, validates the driver/SSL
// config, opens the database (through the OTel provider for tracing), runs
-// any pending goose migrations, and returns an ent client. Caller owns the
-// client and must Close() it. Extracted from run() to keep that function
-// under the gocyclo threshold.
-func setupDatabase(cfg *config.Config, otelProvider *otel.Provider) (*ent.Client, error) {
+// any pending goose migrations, and returns an ent client along with the
+// resolved ent dialect name. Caller owns the client and must Close() it.
+// Extracted from run() to keep that function under the gocyclo threshold.
+func setupDatabase(cfg *config.Config, otelProvider *otel.Provider) (*ent.Client, string, error) {
if err := setupStorageDir(cfg); err != nil {
- return nil, err
+ return nil, "", err
}
driver := strings.ToLower(cfg.Database.Driver)
if driver == config.DriverPostgres {
if !validatePostgresSSLMode(cfg.Database.SslMode) {
log.Error().Str("sslmode", cfg.Database.SslMode).Msg("invalid sslmode")
- return nil, fmt.Errorf("invalid sslmode: %s", cfg.Database.SslMode)
+ return nil, "", fmt.Errorf("invalid sslmode: %s", cfg.Database.SslMode)
}
}
databaseURL, err := setupDatabaseURL(cfg)
if err != nil {
- return nil, err
+ return nil, "", err
}
driverName, dialectName, err := resolveDriver(driver)
if err != nil {
- return nil, err
+ return nil, "", err
}
db, err := otelProvider.OpenDatabase(driverName, databaseURL)
@@ -55,7 +55,7 @@ func setupDatabase(cfg *config.Config, otelProvider *otel.Provider) (*ent.Client
Str("port", cfg.Database.Port).
Str("database", cfg.Database.Database).
Msg("failed opening connection to {driver} database at {host}:{port}/{database}")
- return nil, fmt.Errorf("failed opening connection to %s database at %s:%s/%s: %w",
+ return nil, "", fmt.Errorf("failed opening connection to %s database at %s:%s/%s: %w",
driver, cfg.Database.Host, cfg.Database.Port, cfg.Database.Database, err)
}
@@ -63,9 +63,9 @@ func setupDatabase(cfg *config.Config, otelProvider *otel.Provider) (*ent.Client
if err := runMigrations(c, driver); err != nil {
_ = c.Close()
- return nil, err
+ return nil, "", err
}
- return c, nil
+ return c, dialectName, nil
}
// resolveDriver maps the configured driver name onto the (database/sql, ent
diff --git a/backend/app/api/static/docs/docs.go b/backend/app/api/static/docs/docs.go
index 103f1166a..c6a1a696b 100644
--- a/backend/app/api/static/docs/docs.go
+++ b/backend/app/api/static/docs/docs.go
@@ -1155,6 +1155,183 @@ const docTemplate = `{
}
}
},
+ "/v1/group/exports": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Returns export job rows for the caller's group, newest first.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "List Collection Exports",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.Results-repo_ExportOut"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Start a Collection Export",
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Deletes the export row and its blob artifact.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Delete an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}/download": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/zip"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Download an Export Artifact",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "file"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.",
+ "consumes": [
+ "multipart/form-data"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Import a Collection Zip",
+ "parameters": [
+ {
+ "type": "file",
+ "description": "Export zip",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -3708,6 +3885,80 @@ const docTemplate = `{
}
}
},
+ "ent.Export": {
+ "type": "object",
+ "properties": {
+ "artifact_path": {
+ "description": "ArtifactPath holds the value of the \"artifact_path\" field.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "CreatedAt holds the value of the \"created_at\" field.",
+ "type": "string"
+ },
+ "edges": {
+ "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ExportQuery when eager-loading is set.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.ExportEdges"
+ }
+ ]
+ },
+ "error": {
+ "description": "Error holds the value of the \"error\" field.",
+ "type": "string"
+ },
+ "group_id": {
+ "description": "GroupID holds the value of the \"group_id\" field.",
+ "type": "string"
+ },
+ "id": {
+ "description": "ID of the ent.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind holds the value of the \"kind\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Kind"
+ }
+ ]
+ },
+ "progress": {
+ "description": "Progress holds the value of the \"progress\" field.",
+ "type": "integer"
+ },
+ "size_bytes": {
+ "description": "SizeBytes holds the value of the \"size_bytes\" field.",
+ "type": "integer"
+ },
+ "status": {
+ "description": "Status holds the value of the \"status\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Status"
+ }
+ ]
+ },
+ "updated_at": {
+ "description": "UpdatedAt holds the value of the \"updated_at\" field.",
+ "type": "string"
+ }
+ }
+ },
+ "ent.ExportEdges": {
+ "type": "object",
+ "properties": {
+ "group": {
+ "description": "Group holds the value of the group edge.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.Group"
+ }
+ ]
+ }
+ }
+ },
"ent.Group": {
"type": "object",
"properties": {
@@ -3765,6 +4016,13 @@ const docTemplate = `{
"$ref": "#/definitions/ent.EntityType"
}
},
+ "exports": {
+ "description": "Exports holds the value of the exports edge.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ent.Export"
+ }
+ },
"invitation_tokens": {
"description": "InvitationTokens holds the value of the invitation_tokens edge.",
"type": "array",
@@ -4352,6 +4610,36 @@ const docTemplate = `{
"TypeTime"
]
},
+ "export.Kind": {
+ "type": "string",
+ "enum": [
+ "export",
+ "export",
+ "import"
+ ],
+ "x-enum-varnames": [
+ "DefaultKind",
+ "KindExport",
+ "KindImport"
+ ]
+ },
+ "export.Status": {
+ "type": "string",
+ "enum": [
+ "pending",
+ "pending",
+ "running",
+ "completed",
+ "failed"
+ ],
+ "x-enum-varnames": [
+ "DefaultStatus",
+ "StatusPending",
+ "StatusRunning",
+ "StatusCompleted",
+ "StatusFailed"
+ ]
+ },
"repo.APIKeyCreate": {
"type": "object",
"required": [
@@ -5280,6 +5568,42 @@ const docTemplate = `{
}
}
},
+ "repo.ExportOut": {
+ "type": "object",
+ "properties": {
+ "artifactPath": {
+ "type": "string"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is \"export\" for server-produced backup artifacts, \"import\" for\nuser-uploaded restore zips. The lifecycle fields below behave the\nsame for both.",
+ "type": "string"
+ },
+ "progress": {
+ "type": "integer"
+ },
+ "sizeBytes": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"repo.Group": {
"type": "object",
"properties": {
@@ -6184,6 +6508,17 @@ const docTemplate = `{
}
}
},
+ "v1.Results-repo_ExportOut": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
"v1.TelemetryStatus": {
"type": "object",
"properties": {
diff --git a/backend/app/api/static/docs/openapi-3.json b/backend/app/api/static/docs/openapi-3.json
index d5a19e3df..a16bb34b1 100644
--- a/backend/app/api/static/docs/openapi-3.json
+++ b/backend/app/api/static/docs/openapi-3.json
@@ -1258,6 +1258,203 @@
}
}
},
+ "/v1/group/exports": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Returns export job rows for the caller's group, newest first.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "List Collection Exports",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/v1.Results-repo_ExportOut"
+ }
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Start a Collection Export",
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get an Export",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Deletes the export row and its blob artifact.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Delete an Export",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}/download": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Download an Export Artifact",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/zip": {
+ "schema": {
+ "type": "string",
+ "format": "binary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Import a Collection Zip",
+ "requestBody": {
+ "content": {
+ "multipart/form-data": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "description": "Export zip",
+ "type": "string",
+ "format": "binary"
+ }
+ },
+ "required": [
+ "file"
+ ]
+ }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -3927,6 +4124,80 @@
}
}
},
+ "ent.Export": {
+ "type": "object",
+ "properties": {
+ "artifact_path": {
+ "description": "ArtifactPath holds the value of the \"artifact_path\" field.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "CreatedAt holds the value of the \"created_at\" field.",
+ "type": "string"
+ },
+ "edges": {
+ "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ExportQuery when eager-loading is set.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ent.ExportEdges"
+ }
+ ]
+ },
+ "error": {
+ "description": "Error holds the value of the \"error\" field.",
+ "type": "string"
+ },
+ "group_id": {
+ "description": "GroupID holds the value of the \"group_id\" field.",
+ "type": "string"
+ },
+ "id": {
+ "description": "ID of the ent.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind holds the value of the \"kind\" field.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/export.Kind"
+ }
+ ]
+ },
+ "progress": {
+ "description": "Progress holds the value of the \"progress\" field.",
+ "type": "integer"
+ },
+ "size_bytes": {
+ "description": "SizeBytes holds the value of the \"size_bytes\" field.",
+ "type": "integer"
+ },
+ "status": {
+ "description": "Status holds the value of the \"status\" field.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/export.Status"
+ }
+ ]
+ },
+ "updated_at": {
+ "description": "UpdatedAt holds the value of the \"updated_at\" field.",
+ "type": "string"
+ }
+ }
+ },
+ "ent.ExportEdges": {
+ "type": "object",
+ "properties": {
+ "group": {
+ "description": "Group holds the value of the group edge.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ent.Group"
+ }
+ ]
+ }
+ }
+ },
"ent.Group": {
"type": "object",
"properties": {
@@ -3984,6 +4255,13 @@
"$ref": "#/components/schemas/ent.EntityType"
}
},
+ "exports": {
+ "description": "Exports holds the value of the exports edge.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ent.Export"
+ }
+ },
"invitation_tokens": {
"description": "InvitationTokens holds the value of the invitation_tokens edge.",
"type": "array",
@@ -4571,6 +4849,36 @@
"TypeTime"
]
},
+ "export.Kind": {
+ "type": "string",
+ "enum": [
+ "export",
+ "export",
+ "import"
+ ],
+ "x-enum-varnames": [
+ "DefaultKind",
+ "KindExport",
+ "KindImport"
+ ]
+ },
+ "export.Status": {
+ "type": "string",
+ "enum": [
+ "pending",
+ "pending",
+ "running",
+ "completed",
+ "failed"
+ ],
+ "x-enum-varnames": [
+ "DefaultStatus",
+ "StatusPending",
+ "StatusRunning",
+ "StatusCompleted",
+ "StatusFailed"
+ ]
+ },
"repo.APIKeyCreate": {
"type": "object",
"required": [
@@ -5499,6 +5807,42 @@
}
}
},
+ "repo.ExportOut": {
+ "type": "object",
+ "properties": {
+ "artifactPath": {
+ "type": "string"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is \"export\" for server-produced backup artifacts, \"import\" for\nuser-uploaded restore zips. The lifecycle fields below behave the\nsame for both.",
+ "type": "string"
+ },
+ "progress": {
+ "type": "integer"
+ },
+ "sizeBytes": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"repo.Group": {
"type": "object",
"properties": {
@@ -6403,6 +6747,17 @@
}
}
},
+ "v1.Results-repo_ExportOut": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ },
"v1.TelemetryStatus": {
"type": "object",
"properties": {
diff --git a/backend/app/api/static/docs/openapi-3.yaml b/backend/app/api/static/docs/openapi-3.yaml
index 33db3b0a0..867abc4fa 100644
--- a/backend/app/api/static/docs/openapi-3.yaml
+++ b/backend/app/api/static/docs/openapi-3.yaml
@@ -753,6 +753,126 @@ paths:
responses:
"204":
description: No Content
+ /v1/group/exports:
+ get:
+ security:
+ - Bearer: []
+ description: Returns export job rows for the caller's group, newest first.
+ tags:
+ - Group
+ summary: List Collection Exports
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/v1.Results-repo_ExportOut"
+ post:
+ security:
+ - Bearer: []
+ description: Creates a pending export row and enqueues the build job. Poll the
+ listing endpoint or watch the WebSocket for completion.
+ tags:
+ - Group
+ summary: Start a Collection Export
+ responses:
+ "202":
+ description: Accepted
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
+ "/v1/group/exports/{id}":
+ get:
+ security:
+ - Bearer: []
+ tags:
+ - Group
+ summary: Get an Export
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
+ delete:
+ security:
+ - Bearer: []
+ description: Deletes the export row and its blob artifact.
+ tags:
+ - Group
+ summary: Delete an Export
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "204":
+ description: No Content
+ "/v1/group/exports/{id}/download":
+ get:
+ security:
+ - Bearer: []
+ tags:
+ - Group
+ summary: Download an Export Artifact
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: OK
+ content:
+ application/zip:
+ schema:
+ type: string
+ format: binary
+ /v1/group/import:
+ post:
+ security:
+ - Bearer: []
+ description: Uploads a collection-export zip and enqueues the import job. The
+ destination group must be empty. Returns the tracked import row so
+ clients can poll for progress.
+ tags:
+ - Group
+ summary: Import a Collection Zip
+ requestBody:
+ content:
+ multipart/form-data:
+ schema:
+ type: object
+ properties:
+ file:
+ description: Export zip
+ type: string
+ format: binary
+ required:
+ - file
+ required: true
+ responses:
+ "202":
+ description: Accepted
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
/v1/groups:
get:
security:
@@ -2410,6 +2530,55 @@ components:
description: Group holds the value of the group edge.
allOf:
- $ref: "#/components/schemas/ent.Group"
+ ent.Export:
+ type: object
+ properties:
+ artifact_path:
+ description: ArtifactPath holds the value of the "artifact_path" field.
+ type: string
+ created_at:
+ description: CreatedAt holds the value of the "created_at" field.
+ type: string
+ edges:
+ description: >-
+ Edges holds the relations/edges for other nodes in the graph.
+
+ The values are being populated by the ExportQuery when eager-loading is set.
+ allOf:
+ - $ref: "#/components/schemas/ent.ExportEdges"
+ error:
+ description: Error holds the value of the "error" field.
+ type: string
+ group_id:
+ description: GroupID holds the value of the "group_id" field.
+ type: string
+ id:
+ description: ID of the ent.
+ type: string
+ kind:
+ description: Kind holds the value of the "kind" field.
+ allOf:
+ - $ref: "#/components/schemas/export.Kind"
+ progress:
+ description: Progress holds the value of the "progress" field.
+ type: integer
+ size_bytes:
+ description: SizeBytes holds the value of the "size_bytes" field.
+ type: integer
+ status:
+ description: Status holds the value of the "status" field.
+ allOf:
+ - $ref: "#/components/schemas/export.Status"
+ updated_at:
+ description: UpdatedAt holds the value of the "updated_at" field.
+ type: string
+ ent.ExportEdges:
+ type: object
+ properties:
+ group:
+ description: Group holds the value of the group edge.
+ allOf:
+ - $ref: "#/components/schemas/ent.Group"
ent.Group:
type: object
properties:
@@ -2453,6 +2622,11 @@ components:
type: array
items:
$ref: "#/components/schemas/ent.EntityType"
+ exports:
+ description: Exports holds the value of the exports edge.
+ type: array
+ items:
+ $ref: "#/components/schemas/ent.Export"
invitation_tokens:
description: InvitationTokens holds the value of the invitation_tokens edge.
type: array
@@ -2862,6 +3036,30 @@ components:
- TypeNumber
- TypeBoolean
- TypeTime
+ export.Kind:
+ type: string
+ enum:
+ - export
+ - export
+ - import
+ x-enum-varnames:
+ - DefaultKind
+ - KindExport
+ - KindImport
+ export.Status:
+ type: string
+ enum:
+ - pending
+ - pending
+ - running
+ - completed
+ - failed
+ x-enum-varnames:
+ - DefaultStatus
+ - StatusPending
+ - StatusRunning
+ - StatusCompleted
+ - StatusFailed
repo.APIKeyCreate:
type: object
required:
@@ -3505,6 +3703,33 @@ components:
type: string
warrantyExpires:
type: string
+ repo.ExportOut:
+ type: object
+ properties:
+ artifactPath:
+ type: string
+ createdAt:
+ type: string
+ error:
+ type: string
+ groupId:
+ type: string
+ id:
+ type: string
+ kind:
+ description: |-
+ Kind is "export" for server-produced backup artifacts, "import" for
+ user-uploaded restore zips. The lifecycle fields below behave the
+ same for both.
+ type: string
+ progress:
+ type: integer
+ sizeBytes:
+ type: integer
+ status:
+ type: string
+ updatedAt:
+ type: string
repo.Group:
type: object
properties:
@@ -4108,6 +4333,13 @@ components:
token:
type: string
minLength: 20
+ v1.Results-repo_ExportOut:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: "#/components/schemas/repo.ExportOut"
v1.TelemetryStatus:
type: object
properties:
diff --git a/backend/app/api/static/docs/swagger.json b/backend/app/api/static/docs/swagger.json
index 0852613ec..257fa81d1 100644
--- a/backend/app/api/static/docs/swagger.json
+++ b/backend/app/api/static/docs/swagger.json
@@ -1152,6 +1152,183 @@
}
}
},
+ "/v1/group/exports": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Returns export job rows for the caller's group, newest first.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "List Collection Exports",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.Results-repo_ExportOut"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Start a Collection Export",
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Deletes the export row and its blob artifact.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Delete an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}/download": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/zip"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Download an Export Artifact",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "file"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.",
+ "consumes": [
+ "multipart/form-data"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Import a Collection Zip",
+ "parameters": [
+ {
+ "type": "file",
+ "description": "Export zip",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -3705,6 +3882,80 @@
}
}
},
+ "ent.Export": {
+ "type": "object",
+ "properties": {
+ "artifact_path": {
+ "description": "ArtifactPath holds the value of the \"artifact_path\" field.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "CreatedAt holds the value of the \"created_at\" field.",
+ "type": "string"
+ },
+ "edges": {
+ "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ExportQuery when eager-loading is set.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.ExportEdges"
+ }
+ ]
+ },
+ "error": {
+ "description": "Error holds the value of the \"error\" field.",
+ "type": "string"
+ },
+ "group_id": {
+ "description": "GroupID holds the value of the \"group_id\" field.",
+ "type": "string"
+ },
+ "id": {
+ "description": "ID of the ent.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind holds the value of the \"kind\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Kind"
+ }
+ ]
+ },
+ "progress": {
+ "description": "Progress holds the value of the \"progress\" field.",
+ "type": "integer"
+ },
+ "size_bytes": {
+ "description": "SizeBytes holds the value of the \"size_bytes\" field.",
+ "type": "integer"
+ },
+ "status": {
+ "description": "Status holds the value of the \"status\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Status"
+ }
+ ]
+ },
+ "updated_at": {
+ "description": "UpdatedAt holds the value of the \"updated_at\" field.",
+ "type": "string"
+ }
+ }
+ },
+ "ent.ExportEdges": {
+ "type": "object",
+ "properties": {
+ "group": {
+ "description": "Group holds the value of the group edge.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.Group"
+ }
+ ]
+ }
+ }
+ },
"ent.Group": {
"type": "object",
"properties": {
@@ -3762,6 +4013,13 @@
"$ref": "#/definitions/ent.EntityType"
}
},
+ "exports": {
+ "description": "Exports holds the value of the exports edge.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ent.Export"
+ }
+ },
"invitation_tokens": {
"description": "InvitationTokens holds the value of the invitation_tokens edge.",
"type": "array",
@@ -4349,6 +4607,36 @@
"TypeTime"
]
},
+ "export.Kind": {
+ "type": "string",
+ "enum": [
+ "export",
+ "export",
+ "import"
+ ],
+ "x-enum-varnames": [
+ "DefaultKind",
+ "KindExport",
+ "KindImport"
+ ]
+ },
+ "export.Status": {
+ "type": "string",
+ "enum": [
+ "pending",
+ "pending",
+ "running",
+ "completed",
+ "failed"
+ ],
+ "x-enum-varnames": [
+ "DefaultStatus",
+ "StatusPending",
+ "StatusRunning",
+ "StatusCompleted",
+ "StatusFailed"
+ ]
+ },
"repo.APIKeyCreate": {
"type": "object",
"required": [
@@ -5277,6 +5565,42 @@
}
}
},
+ "repo.ExportOut": {
+ "type": "object",
+ "properties": {
+ "artifactPath": {
+ "type": "string"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is \"export\" for server-produced backup artifacts, \"import\" for\nuser-uploaded restore zips. The lifecycle fields below behave the\nsame for both.",
+ "type": "string"
+ },
+ "progress": {
+ "type": "integer"
+ },
+ "sizeBytes": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"repo.Group": {
"type": "object",
"properties": {
@@ -6181,6 +6505,17 @@
}
}
},
+ "v1.Results-repo_ExportOut": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
"v1.TelemetryStatus": {
"type": "object",
"properties": {
diff --git a/backend/app/api/static/docs/swagger.yaml b/backend/app/api/static/docs/swagger.yaml
index b9e8b30c5..6332c8ec5 100644
--- a/backend/app/api/static/docs/swagger.yaml
+++ b/backend/app/api/static/docs/swagger.yaml
@@ -490,6 +490,54 @@ definitions:
- $ref: '#/definitions/ent.Group'
description: Group holds the value of the group edge.
type: object
+ ent.Export:
+ properties:
+ artifact_path:
+ description: ArtifactPath holds the value of the "artifact_path" field.
+ type: string
+ created_at:
+ description: CreatedAt holds the value of the "created_at" field.
+ type: string
+ edges:
+ allOf:
+ - $ref: '#/definitions/ent.ExportEdges'
+ description: |-
+ Edges holds the relations/edges for other nodes in the graph.
+ The values are being populated by the ExportQuery when eager-loading is set.
+ error:
+ description: Error holds the value of the "error" field.
+ type: string
+ group_id:
+ description: GroupID holds the value of the "group_id" field.
+ type: string
+ id:
+ description: ID of the ent.
+ type: string
+ kind:
+ allOf:
+ - $ref: '#/definitions/export.Kind'
+ description: Kind holds the value of the "kind" field.
+ progress:
+ description: Progress holds the value of the "progress" field.
+ type: integer
+ size_bytes:
+ description: SizeBytes holds the value of the "size_bytes" field.
+ type: integer
+ status:
+ allOf:
+ - $ref: '#/definitions/export.Status'
+ description: Status holds the value of the "status" field.
+ updated_at:
+ description: UpdatedAt holds the value of the "updated_at" field.
+ type: string
+ type: object
+ ent.ExportEdges:
+ properties:
+ group:
+ allOf:
+ - $ref: '#/definitions/ent.Group'
+ description: Group holds the value of the group edge.
+ type: object
ent.Group:
properties:
created_at:
@@ -531,6 +579,11 @@ definitions:
items:
$ref: '#/definitions/ent.EntityType'
type: array
+ exports:
+ description: Exports holds the value of the exports edge.
+ items:
+ $ref: '#/definitions/ent.Export'
+ type: array
invitation_tokens:
description: InvitationTokens holds the value of the invitation_tokens edge.
items:
@@ -933,6 +986,30 @@ definitions:
- TypeNumber
- TypeBoolean
- TypeTime
+ export.Kind:
+ enum:
+ - export
+ - export
+ - import
+ type: string
+ x-enum-varnames:
+ - DefaultKind
+ - KindExport
+ - KindImport
+ export.Status:
+ enum:
+ - pending
+ - pending
+ - running
+ - completed
+ - failed
+ type: string
+ x-enum-varnames:
+ - DefaultStatus
+ - StatusPending
+ - StatusRunning
+ - StatusCompleted
+ - StatusFailed
repo.APIKeyCreate:
properties:
expiresAt:
@@ -1576,6 +1653,33 @@ definitions:
required:
- name
type: object
+ repo.ExportOut:
+ properties:
+ artifactPath:
+ type: string
+ createdAt:
+ type: string
+ error:
+ type: string
+ groupId:
+ type: string
+ id:
+ type: string
+ kind:
+ description: |-
+ Kind is "export" for server-produced backup artifacts, "import" for
+ user-uploaded restore zips. The lifecycle fields below behave the
+ same for both.
+ type: string
+ progress:
+ type: integer
+ sizeBytes:
+ type: integer
+ status:
+ type: string
+ updatedAt:
+ type: string
+ type: object
repo.Group:
properties:
createdAt:
@@ -2179,6 +2283,13 @@ definitions:
- password
- token
type: object
+ v1.Results-repo_ExportOut:
+ properties:
+ items:
+ items:
+ $ref: '#/definitions/repo.ExportOut'
+ type: array
+ type: object
v1.TelemetryStatus:
properties:
enabled:
@@ -2935,6 +3046,117 @@ paths:
summary: Update Entity Type
tags:
- Entity Types
+ /v1/group/exports:
+ get:
+ description: Returns export job rows for the caller's group, newest first.
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.Results-repo_ExportOut'
+ security:
+ - Bearer: []
+ summary: List Collection Exports
+ tags:
+ - Group
+ post:
+ description: Creates a pending export row and enqueues the build job. Poll the
+ listing endpoint or watch the WebSocket for completion.
+ produces:
+ - application/json
+ responses:
+ "202":
+ description: Accepted
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Start a Collection Export
+ tags:
+ - Group
+ /v1/group/exports/{id}:
+ delete:
+ description: Deletes the export row and its blob artifact.
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ responses:
+ "204":
+ description: No Content
+ security:
+ - Bearer: []
+ summary: Delete an Export
+ tags:
+ - Group
+ get:
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Get an Export
+ tags:
+ - Group
+ /v1/group/exports/{id}/download:
+ get:
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ produces:
+ - application/zip
+ responses:
+ "200":
+ description: OK
+ schema:
+ type: file
+ security:
+ - Bearer: []
+ summary: Download an Export Artifact
+ tags:
+ - Group
+ /v1/group/import:
+ post:
+ consumes:
+ - multipart/form-data
+ description: Uploads a collection-export zip and enqueues the import job. The
+ destination group must be empty. Returns the tracked import row so clients
+ can poll for progress.
+ parameters:
+ - description: Export zip
+ in: formData
+ name: file
+ required: true
+ type: file
+ produces:
+ - application/json
+ responses:
+ "202":
+ description: Accepted
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Import a Collection Zip
+ tags:
+ - Group
/v1/groups:
delete:
produces:
diff --git a/backend/internal/core/services/all.go b/backend/internal/core/services/all.go
index 682cb8a36..2b2a9e0c5 100644
--- a/backend/internal/core/services/all.go
+++ b/backend/internal/core/services/all.go
@@ -3,6 +3,8 @@ package services
import (
"github.com/sysadminsmedia/homebox/backend/internal/core/currencies"
+ "github.com/sysadminsmedia/homebox/backend/internal/core/services/reporting/eventbus"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent"
"github.com/sysadminsmedia/homebox/backend/internal/data/repo"
"github.com/sysadminsmedia/homebox/backend/internal/sys/config"
"github.com/sysadminsmedia/homebox/backend/pkgs/mailer"
@@ -13,6 +15,7 @@ type AllServices struct {
Group *GroupService
Entities *EntityService
BackgroundService *BackgroundService
+ Exports *ExportService
Currencies *currencies.CurrencyRegistry
}
@@ -22,6 +25,11 @@ type options struct {
autoIncrementAssetID bool
currencies []currencies.Currency
notifierConfig *config.NotifierConf
+ bus *eventbus.EventBus
+ db *ent.Client
+ storage config.Storage
+ pubSubConn string
+ dialect string
mailer *mailer.Mailer
}
@@ -45,6 +53,18 @@ func WithNotifierConfig(v *config.NotifierConf) func(*options) {
}
}
+// WithExportPlumbing wires the dependencies the ExportService needs to dump
+// raw SQL through the ent client and to publish job messages.
+func WithExportPlumbing(bus *eventbus.EventBus, db *ent.Client, storage config.Storage, pubSubConn, dialect string) func(*options) {
+ return func(o *options) {
+ o.bus = bus
+ o.db = db
+ o.storage = storage
+ o.pubSubConn = pubSubConn
+ o.dialect = dialect
+ }
+}
+
// WithMailer hands the SMTP mailer to services that send mail (currently only
// password reset). A nil or unconfigured mailer disables those code paths
// rather than panicking.
@@ -97,6 +117,14 @@ func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices {
latest: Latest{},
notifierConfig: options.notifierConfig,
},
+ Exports: &ExportService{
+ db: options.db,
+ repos: repos,
+ bus: options.bus,
+ storage: options.storage,
+ pubSubConn: options.pubSubConn,
+ dialect: options.dialect,
+ },
Currencies: currencies.NewCurrencyService(options.currencies),
}
}
diff --git a/backend/internal/core/services/main_test.go b/backend/internal/core/services/main_test.go
index 68e4f577f..ed7611764 100644
--- a/backend/internal/core/services/main_test.go
+++ b/backend/internal/core/services/main_test.go
@@ -82,7 +82,13 @@ func MainNoExit(m *testing.M) int {
currencies.CollectDefaults(),
)
- tSvc = New(tRepos, WithCurrencies(defaults))
+ tSvc = New(tRepos,
+ WithCurrencies(defaults),
+ WithExportPlumbing(tbus, tClient, config.Storage{
+ PrefixPath: "/",
+ ConnString: "file://" + os.TempDir(),
+ }, "mem://{{ .Topic }}", "sqlite3"),
+ )
defer func() { _ = client.Close() }()
bootstrap()
diff --git a/backend/internal/core/services/reporting/eventbus/eventbus.go b/backend/internal/core/services/reporting/eventbus/eventbus.go
index b4b742e5c..f512be489 100644
--- a/backend/internal/core/services/reporting/eventbus/eventbus.go
+++ b/backend/internal/core/services/reporting/eventbus/eventbus.go
@@ -14,6 +14,8 @@ const (
EventTagMutation Event = "tags.mutation"
EventEntityMutation Event = "entity.mutation"
EventUserMutation Event = "user.mutation"
+ EventExportMutation Event = "export.mutation"
+ EventImportMutation Event = "import.mutation"
)
type GroupMutationEvent struct {
@@ -40,6 +42,8 @@ func New() *EventBus {
EventTagMutation: {},
EventEntityMutation: {},
EventUserMutation: {},
+ EventExportMutation: {},
+ EventImportMutation: {},
},
}
}
diff --git a/backend/internal/core/services/service_exports.go b/backend/internal/core/services/service_exports.go
new file mode 100644
index 000000000..ac9cfda3c
--- /dev/null
+++ b/backend/internal/core/services/service_exports.go
@@ -0,0 +1,1324 @@
+package services
+
+import (
+ "archive/zip"
+ "context"
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/rs/zerolog/log"
+ "go.opentelemetry.io/otel"
+ "gocloud.dev/blob"
+ "gocloud.dev/pubsub"
+
+ "github.com/sysadminsmedia/homebox/backend/internal/core/services/reporting/eventbus"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/entity"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/notifier"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/tag"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/repo"
+ "github.com/sysadminsmedia/homebox/backend/internal/sys/config"
+ "github.com/sysadminsmedia/homebox/backend/pkgs/utils"
+)
+
+// ExportSchemaVersion is the on-disk version of the export zip layout.
+// Bump this when manifest/file shapes change in incompatible ways and import
+// can no longer round-trip an older export.
+const ExportSchemaVersion = 1
+
+// entitiesTable is the on-disk name of the entities table. Hoisted out of
+// the exportTables literal so the same string isn't repeated across every
+// FK/scope reference that points back at it.
+const entitiesTable = "entities"
+
+// Pubsub topic names used by the export and import workers.
+const (
+ TopicCollectionExport = "collection_export"
+ TopicCollectionImport = "collection_import"
+)
+
+// ManifestFile is the name of the manifest entry inside the zip artifact.
+const manifestFile = "manifest.json"
+
+// attachmentsDir is the prefix inside the zip for attachment blobs.
+const attachmentsDir = "attachments/"
+
+// tableSpec describes how to extract one table's rows scoped to a group, and
+// how to handle foreign keys on import.
+//
+// New fields/columns flow through automatically: export uses SELECT * and
+// import builds INSERT from the JSON keys. Adding a new TABLE still requires
+// editing this list and (probably) the dependency graph; same for adding a
+// new FK column to an existing table that points at another exported table.
+type tableSpec struct {
+ // name is the SQL table name.
+ name string
+ // scope is a SQL WHERE fragment with one ? placeholder for the group ID.
+ // Use "" to fetch every row in the table.
+ scope string
+ // pkCol is the primary-key column name. "" for junction tables that have
+ // no single-column PK (e.g. tag_entities).
+ pkCol string
+ // groupCols are columns whose values are remapped to the destination
+ // group_id on import (the various "group_xxx" FK columns).
+ groupCols []string
+ // userCols are columns whose values are remapped to the importing user
+ // (notifiers being the only example).
+ userCols []string
+ // fkCols are immediate foreign keys: { column → target table }. The
+ // import looks each value up in the id map populated by earlier table
+ // inserts and substitutes the new id.
+ fkCols map[string]string
+ // deferCols are foreign keys whose target row may not exist yet at the
+ // time this row is inserted (self-references and forward-circular refs).
+ // They are nulled on insert and patched in a second pass.
+ deferCols map[string]string
+}
+
+// exportTables defines the export/import schema. Order matters: imports run
+// in this order, so each table's non-deferred FK targets must already be
+// present.
+//
+// Why every PK is remapped on import: a real "fresh server" import would
+// keep original IDs, but if the user re-imports a backup into the same
+// server (or a server that already received this export once), reusing PKs
+// causes UNIQUE-constraint violations. Remapping always = simpler invariant.
+//
+// Self-referential FKs (entities.entity_children, tags.tag_children,
+// attachments.attachment_thumbnail) and forward-circular FKs
+// (entity_types.entity_type_default_template,
+// entity_templates.entity_template_location) live in deferCols so the first
+// INSERT pass can succeed; the second pass patches them with remapped IDs.
+//
+// Known gap: entity_templates.default_tag_ids is a JSON list of tag UUIDs.
+// We do not currently rewrite UUIDs nested inside JSON columns, so that
+// reference is lost on import. Templates and tags both still come across
+// individually; only the template→tag default association is dropped.
+var exportTables = []tableSpec{
+ {
+ name: "entity_types",
+ scope: "group_entity_types = ?",
+ pkCol: "id",
+ groupCols: []string{"group_entity_types"},
+ deferCols: map[string]string{"entity_type_default_template": "entity_templates"},
+ },
+ {
+ name: "entity_templates",
+ scope: "group_entity_templates = ?",
+ pkCol: "id",
+ groupCols: []string{"group_entity_templates"},
+ deferCols: map[string]string{"entity_template_location": entitiesTable},
+ },
+ {
+ name: "template_fields",
+ scope: "entity_template_fields IN (SELECT id FROM entity_templates WHERE group_entity_templates = ?)",
+ pkCol: "id",
+ fkCols: map[string]string{"entity_template_fields": "entity_templates"},
+ },
+ {
+ name: "tags",
+ scope: "group_tags = ?",
+ pkCol: "id",
+ groupCols: []string{"group_tags"},
+ deferCols: map[string]string{"tag_children": "tags"},
+ },
+ {
+ name: entitiesTable,
+ scope: "group_entities = ?",
+ pkCol: "id",
+ groupCols: []string{"group_entities"},
+ fkCols: map[string]string{"entity_type_entities": "entity_types"},
+ deferCols: map[string]string{"entity_children": entitiesTable},
+ },
+ {
+ name: "entity_fields",
+ scope: "entity_fields IN (SELECT id FROM entities WHERE group_entities = ?)",
+ pkCol: "id",
+ fkCols: map[string]string{"entity_fields": entitiesTable},
+ },
+ {
+ name: "maintenance_entries",
+ scope: "entity_id IN (SELECT id FROM entities WHERE group_entities = ?)",
+ pkCol: "id",
+ fkCols: map[string]string{"entity_id": entitiesTable},
+ },
+ {
+ // Two-part scope: the regular attachments owned by an entity in this
+ // group, PLUS the thumbnail rows those attachments point at (which
+ // have entity_attachments=NULL and are linked only via
+ // attachment_thumbnail on the parent). Each ? is the same gid;
+ // dumpTable/wipeGroup expand based on placeholder count.
+ name: "attachments",
+ scope: "entity_attachments IN (SELECT id FROM entities WHERE group_entities = ?)" +
+ " OR id IN (SELECT attachment_thumbnail FROM attachments" +
+ " WHERE attachment_thumbnail IS NOT NULL" +
+ " AND entity_attachments IN (SELECT id FROM entities WHERE group_entities = ?))",
+ pkCol: "id",
+ fkCols: map[string]string{"entity_attachments": entitiesTable},
+ deferCols: map[string]string{"attachment_thumbnail": "attachments"},
+ },
+ {
+ name: "tag_entities",
+ scope: "tag_id IN (SELECT id FROM tags WHERE group_tags = ?)",
+ fkCols: map[string]string{"tag_id": "tags", "entity_id": entitiesTable},
+ },
+ {
+ name: "notifiers",
+ scope: "group_id = ?",
+ pkCol: "id",
+ groupCols: []string{"group_id"},
+ userCols: []string{"user_id"},
+ },
+}
+
+// Manifest is the contents of manifest.json inside the export zip.
+type Manifest struct {
+ SchemaVersion int `json:"schemaVersion"`
+ ExportedAt time.Time `json:"exportedAt"`
+ GroupID uuid.UUID `json:"groupId"`
+ HomeboxVersion string `json:"homeboxVersion,omitempty"`
+ Counts map[string]int `json:"counts"`
+}
+
+// ExportService orchestrates the export and import jobs. It is wired into
+// AllServices and invoked by the pubsub workers in app/api/recurring.go.
+//
+// Every public method takes the requesting tenant's group id and refuses to
+// operate on data that does not belong to that group.
+type ExportService struct {
+ db *ent.Client
+ repos *repo.AllRepos
+ bus *eventbus.EventBus
+ storage config.Storage
+ pubSubConn string
+ dialect string // "sqlite3" or "postgres"
+}
+
+// Enqueue creates a pending Export row for gid and publishes a job to the
+// export topic. The actual zip-building happens in the worker.
+func (s *ExportService) Enqueue(ctx context.Context, gid uuid.UUID) (repo.ExportOut, error) {
+ ctx, span := otel.Tracer("services").Start(ctx, "ExportService.Enqueue")
+ defer span.End()
+
+ out, err := s.repos.Exports.Create(ctx, gid)
+ if err != nil {
+ return out, err
+ }
+
+ if err := s.publishExportJob(ctx, gid, out.ID); err != nil {
+ _ = s.repos.Exports.SetFailed(ctx, gid, out.ID, "failed to enqueue: "+err.Error())
+ return out, err
+ }
+
+ s.publishMutation(gid)
+ return out, nil
+}
+
+// EnqueueImport creates a tracked import row pointing at the zip already
+// staged at uploadKey and publishes a job for the worker to pick up. The
+// returned row carries the ID the frontend can poll for progress.
+// uploadKey must live under "{gid}/imports/" — the worker re-validates
+// this before reading.
+func (s *ExportService) EnqueueImport(ctx context.Context, gid uuid.UUID, userID uuid.UUID, uploadKey string, sizeBytes int64) (repo.ExportOut, error) {
+ ctx, span := otel.Tracer("services").Start(ctx, "ExportService.EnqueueImport")
+ defer span.End()
+
+ row, err := s.repos.Exports.CreateImport(ctx, gid, uploadKey, sizeBytes)
+ if err != nil {
+ return row, err
+ }
+
+ if err := s.publishImportJob(ctx, gid, userID, row.ID); err != nil {
+ // Mark the row failed so the user sees what happened instead of a
+ // permanently-pending entry. Best-effort: if the SetFailed also
+ // fails we still return the publish error to the caller.
+ _ = s.repos.Exports.SetFailed(ctx, gid, row.ID, "failed to enqueue: "+err.Error())
+ return row, err
+ }
+ return row, nil
+}
+
+// IsGroupReadyForImport returns true when gid contains no user-created data
+// across any table that wipeGroup will delete. Default locations, tags, and
+// the lazily-created "Item"/"Location" entity_types from registration are
+// tolerated — the import wipes them before restoring. Any extra rows beyond
+// those seed baselines, or any presence in tables that aren't seeded
+// (entity_templates, notifiers), blocks the import so a one-click restore
+// can't silently destroy work.
+//
+// The seed-baseline counts are coarse: a user who deletes some default tags
+// and then adds the same number of custom tags would pass with a false
+// negative. Acceptable trade-off versus adding a per-row "is_seed" flag.
+//
+// Tables not checked explicitly are covered transitively: template_fields
+// require templates; entity_fields/attachments/maintenance_entries/tag_entities
+// require entities or tags.
+func (s *ExportService) IsGroupReadyForImport(ctx context.Context, gid uuid.UUID) (bool, error) {
+ items, err := s.db.Entity.Query().Where(
+ entity.HasGroupWith(group.ID(gid)),
+ entity.HasEntityTypeWith(entitytype.IsLocation(false)),
+ ).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if items > 0 {
+ return false, nil
+ }
+
+ locations, err := s.db.Entity.Query().Where(
+ entity.HasGroupWith(group.ID(gid)),
+ entity.HasEntityTypeWith(entitytype.IsLocation(true)),
+ ).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if locations > len(defaultLocations()) {
+ return false, nil
+ }
+
+ tags, err := s.db.Tag.Query().Where(tag.HasGroupWith(group.ID(gid))).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if tags > len(defaultTags()) {
+ return false, nil
+ }
+
+ // Entity types are lazily created with names "Item" and "Location" the
+ // first time GetDefault is called for each. Anything beyond those two
+ // implies a user-customized type.
+ const defaultEntityTypeCount = 2
+ entityTypes, err := s.db.EntityType.Query().Where(entitytype.HasGroupWith(group.ID(gid))).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if entityTypes > defaultEntityTypeCount {
+ return false, nil
+ }
+
+ templates, err := s.db.EntityTemplate.Query().Where(entitytemplate.HasGroupWith(group.ID(gid))).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if templates > 0 {
+ return false, nil
+ }
+
+ notifiers, err := s.db.Notifier.Query().Where(notifier.HasGroupWith(group.ID(gid))).Count(ctx)
+ if err != nil {
+ return false, err
+ }
+ if notifiers > 0 {
+ return false, nil
+ }
+
+ return true, nil
+}
+
+// RunExport is invoked by the pubsub subscriber when an export job message is
+// received. It transitions the row through running → completed/failed and
+// uploads the artifact to blob storage.
+func (s *ExportService) RunExport(ctx context.Context, exportID, gid uuid.UUID) {
+ ctx, span := otel.Tracer("services").Start(ctx, "ExportService.RunExport")
+ defer span.End()
+
+ exp, err := s.repos.Exports.Get(ctx, gid, exportID)
+ if err != nil {
+ log.Err(err).Stringer("export_id", exportID).Stringer("gid", gid).Msg("export job: row not found or wrong group")
+ return
+ }
+ if exp.Status != "pending" {
+ log.Warn().Stringer("export_id", exportID).Str("status", exp.Status).Msg("export job: not pending, skipping")
+ return
+ }
+
+ if err := s.repos.Exports.SetRunning(ctx, gid, exportID); err != nil {
+ log.Err(err).Msg("export job: failed to mark running")
+ return
+ }
+ s.publishMutation(gid)
+
+ artifactPath, sizeBytes, err := s.buildArtifact(ctx, exportID, gid)
+ if err != nil {
+ log.Err(err).Stringer("export_id", exportID).Msg("export job: failed")
+ _ = s.repos.Exports.SetFailed(ctx, gid, exportID, err.Error())
+ s.publishMutation(gid)
+ return
+ }
+
+ if err := s.repos.Exports.SetCompleted(ctx, gid, exportID, artifactPath, sizeBytes); err != nil {
+ log.Err(err).Msg("export job: failed to mark completed")
+ }
+ s.publishMutation(gid)
+}
+
+// buildArtifact does the actual zip generation: dump every group-scoped
+// table to JSON, copy attachment blobs, write manifest, upload to blob
+// storage. Returns the blob key and total size.
+func (s *ExportService) buildArtifact(ctx context.Context, exportID, gid uuid.UUID) (string, int64, error) {
+ tmp, err := os.CreateTemp("", fmt.Sprintf("homebox-export-%s-*.zip", exportID))
+ if err != nil {
+ return "", 0, fmt.Errorf("create temp file: %w", err)
+ }
+ tmpPath := tmp.Name()
+ defer func() {
+ _ = tmp.Close()
+ _ = os.Remove(tmpPath)
+ }()
+
+ zw := zip.NewWriter(tmp)
+
+ counts := make(map[string]int)
+ dbSql := s.db.Sql()
+ for i, spec := range exportTables {
+ rows, err := dumpTable(ctx, dbSql, s.dialect, spec, gid)
+ if err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("dump %s: %w", spec.name, err)
+ }
+ counts[spec.name] = len(rows)
+
+ w, err := zw.Create(spec.name + ".json")
+ if err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("zip create %s.json: %w", spec.name, err)
+ }
+ enc := json.NewEncoder(w)
+ if err := enc.Encode(rows); err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("zip encode %s.json: %w", spec.name, err)
+ }
+
+ // Coarse-grained progress: 0..80% spans the table dumps, 80..95% the
+ // attachment copies, 95..100% the upload.
+ pct := int(float64(i+1) / float64(len(exportTables)) * 80)
+ _ = s.repos.Exports.SetProgress(ctx, gid, exportID, pct)
+ }
+
+ // Copy attachment blobs into the zip.
+ if err := s.copyAttachmentBlobs(ctx, zw, gid); err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("copy attachments: %w", err)
+ }
+ _ = s.repos.Exports.SetProgress(ctx, gid, exportID, 95)
+
+ // Manifest last so we know the counts.
+ mf := Manifest{
+ SchemaVersion: ExportSchemaVersion,
+ ExportedAt: time.Now().UTC(),
+ GroupID: gid,
+ Counts: counts,
+ }
+ mw, err := zw.Create(manifestFile)
+ if err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("zip create manifest: %w", err)
+ }
+ if err := json.NewEncoder(mw).Encode(mf); err != nil {
+ _ = zw.Close()
+ return "", 0, fmt.Errorf("zip encode manifest: %w", err)
+ }
+
+ if err := zw.Close(); err != nil {
+ return "", 0, fmt.Errorf("zip close: %w", err)
+ }
+
+ // Upload to blob storage.
+ if _, err := tmp.Seek(0, io.SeekStart); err != nil {
+ return "", 0, fmt.Errorf("seek temp: %w", err)
+ }
+ stat, err := tmp.Stat()
+ if err != nil {
+ return "", 0, fmt.Errorf("stat temp: %w", err)
+ }
+ size := stat.Size()
+
+ artifactPath := fmt.Sprintf("%s/exports/%s.zip", gid.String(), exportID.String())
+ bucket, err := blob.OpenBucket(ctx, s.repos.Attachments.GetConnString())
+ if err != nil {
+ return "", 0, fmt.Errorf("open bucket: %w", err)
+ }
+ defer func() { _ = bucket.Close() }()
+
+ bw, err := bucket.NewWriter(ctx, s.repos.Attachments.GetFullPath(artifactPath), &blob.WriterOptions{
+ ContentType: "application/zip",
+ })
+ if err != nil {
+ return "", 0, fmt.Errorf("blob writer: %w", err)
+ }
+ if _, err := io.Copy(bw, tmp); err != nil {
+ _ = bw.Close()
+ return "", 0, fmt.Errorf("blob copy: %w", err)
+ }
+ if err := bw.Close(); err != nil {
+ return "", 0, fmt.Errorf("blob close: %w", err)
+ }
+
+ return artifactPath, size, nil
+}
+
+// copyAttachmentBlobs streams every attachment blob in the group — including
+// thumbnail rows — into the zip under attachments/{attachment_id}. Lookup on
+// the import side uses the file's stem (the attachment UUID) via the id map.
+//
+// Reuses the attachments tableSpec scope so the row dump and the blob copy
+// can never disagree about which attachments belong to the group.
+func (s *ExportService) copyAttachmentBlobs(ctx context.Context, zw *zip.Writer, gid uuid.UUID) error {
+ var spec tableSpec
+ for _, t := range exportTables {
+ if t.name == "attachments" {
+ spec = t
+ break
+ }
+ }
+
+ q := "SELECT id, path FROM attachments WHERE " + rebindPlaceholders(spec.scope, s.dialect)
+ args := make([]any, 0, strings.Count(spec.scope, "?"))
+ for i := 0; i < cap(args); i++ {
+ args = append(args, gid.String())
+ }
+ rows, err := s.db.Sql().QueryContext(ctx, q, args...)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rows.Close() }()
+ type attRef struct{ id, path string }
+ var refs []attRef
+ for rows.Next() {
+ var id, path string
+ if err := rows.Scan(&id, &path); err != nil {
+ return err
+ }
+ if path == "" {
+ continue
+ }
+ refs = append(refs, attRef{id: id, path: path})
+ }
+ if err := rows.Err(); err != nil {
+ return err
+ }
+
+ bucket, err := blob.OpenBucket(ctx, s.repos.Attachments.GetConnString())
+ if err != nil {
+ return err
+ }
+ defer func() { _ = bucket.Close() }()
+
+ for _, ref := range refs {
+ r, err := bucket.NewReader(ctx, s.repos.Attachments.GetFullPath(ref.path), nil)
+ if err != nil {
+ // Don't fail the whole export for one missing blob; just skip it.
+ // On import the attachment row will exist but the blob won't —
+ // same end state as a thumbnail-generation failure today.
+ log.Warn().Err(err).Str("path", ref.path).Msg("export: attachment blob missing, skipping")
+ continue
+ }
+ w, err := zw.Create(attachmentsDir + ref.id)
+ if err != nil {
+ _ = r.Close()
+ return err
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ _ = r.Close()
+ return err
+ }
+ _ = r.Close()
+ }
+ return nil
+}
+
+// publishExportJob sends a message on the export topic.
+func (s *ExportService) publishExportJob(ctx context.Context, gid, exportID uuid.UUID) error {
+ conn, err := utils.GenerateSubPubConn(s.pubSubConn, TopicCollectionExport)
+ if err != nil {
+ return err
+ }
+ topic, err := pubsub.OpenTopic(ctx, conn)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = topic.Shutdown(ctx) }()
+ return topic.Send(ctx, &pubsub.Message{
+ Body: []byte("collection_export:" + exportID.String()),
+ Metadata: map[string]string{
+ "group_id": gid.String(),
+ "export_id": exportID.String(),
+ },
+ })
+}
+
+// publishImportJob sends a message on the import topic. The worker loads
+// the tracked import row by importID, reads the staged upload from blob
+// storage at the row's artifact_path, unzips, restores into the group
+// identified by gid, then deletes the staged upload.
+func (s *ExportService) publishImportJob(ctx context.Context, gid, userID, importID uuid.UUID) error {
+ conn, err := utils.GenerateSubPubConn(s.pubSubConn, TopicCollectionImport)
+ if err != nil {
+ return err
+ }
+ topic, err := pubsub.OpenTopic(ctx, conn)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = topic.Shutdown(ctx) }()
+ return topic.Send(ctx, &pubsub.Message{
+ Body: []byte("collection_import:" + gid.String()),
+ Metadata: map[string]string{
+ "group_id": gid.String(),
+ "user_id": userID.String(),
+ "import_id": importID.String(),
+ },
+ })
+}
+
+func (s *ExportService) publishMutation(gid uuid.UUID) {
+ if s.bus != nil {
+ s.bus.Publish(eventbus.EventExportMutation, eventbus.GroupMutationEvent{GID: gid})
+ }
+}
+
+// dumpTable runs SELECT * for spec.scope and returns each row as a JSON-
+// friendly map. UUIDs and JSON-blob columns come back from sqlite as []byte;
+// we coerce to string here so json.Marshal does the right thing.
+//
+// Scope clauses may contain multiple ? placeholders (e.g. for an OR-of-
+// subqueries). Each placeholder is filled with the same gid — none of the
+// existing scopes need to vary by placeholder.
+func dumpTable(ctx context.Context, db *sql.DB, dialect string, spec tableSpec, gid uuid.UUID) ([]map[string]any, error) {
+ q := "SELECT * FROM " + spec.name
+ var args []any
+ if spec.scope != "" {
+ q += " WHERE " + rebindPlaceholders(spec.scope, dialect)
+ for i := 0; i < strings.Count(spec.scope, "?"); i++ {
+ args = append(args, gid.String())
+ }
+ }
+ rows, err := db.QueryContext(ctx, q, args...)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = rows.Close() }()
+
+ cols, err := rows.Columns()
+ if err != nil {
+ return nil, err
+ }
+
+ out := make([]map[string]any, 0)
+ for rows.Next() {
+ vals := make([]any, len(cols))
+ ptrs := make([]any, len(cols))
+ for i := range vals {
+ ptrs[i] = &vals[i]
+ }
+ if err := rows.Scan(ptrs...); err != nil {
+ return nil, err
+ }
+ row := make(map[string]any, len(cols))
+ for i, col := range cols {
+ row[col] = normalizeScan(vals[i])
+ }
+ out = append(out, row)
+ }
+ return out, rows.Err()
+}
+
+// normalizeScan converts driver-returned values into JSON-marshallable
+// shapes. The two big ones: []byte (UUIDs and JSON blobs in sqlite) becomes
+// string, and time.Time stays as time.Time so json.Marshal renders RFC3339.
+func normalizeScan(v any) any {
+ switch x := v.(type) {
+ case []byte:
+ return string(x)
+ default:
+ return v
+ }
+}
+
+// rebindPlaceholders rewrites "?" to "$1", "$2", … for postgres. SQLite uses
+// "?" natively. Assumes scope clauses use a single placeholder per occurrence.
+func rebindPlaceholders(s, dialect string) string {
+ if dialect != "postgres" {
+ return s
+ }
+ var b strings.Builder
+ n := 0
+ for _, ch := range s {
+ if ch == '?' {
+ n++
+ fmt.Fprintf(&b, "$%d", n)
+ continue
+ }
+ b.WriteRune(ch)
+ }
+ return b.String()
+}
+
+// =============================================================================
+// Import path
+// =============================================================================
+
+// RunImport is invoked by the pubsub subscriber when an import job message
+// is received. It loads the tracked import row, validates the staged
+// upload, asserts the destination group is empty, and replays every row.
+// Status/progress on the row drives the polling UI on the frontend.
+func (s *ExportService) RunImport(ctx context.Context, gid, userID, importID uuid.UUID) {
+ ctx, span := otel.Tracer("services").Start(ctx, "ExportService.RunImport")
+ defer span.End()
+
+ row, err := s.repos.Exports.Get(ctx, gid, importID)
+ if err != nil {
+ log.Err(err).Stringer("import_id", importID).Stringer("gid", gid).Msg("import job: row not found or wrong group")
+ return
+ }
+ if row.Kind != "import" {
+ log.Error().Stringer("import_id", importID).Str("kind", row.Kind).Msg("import job: row is not an import, refusing")
+ return
+ }
+ if row.Status != "pending" {
+ log.Warn().Stringer("import_id", importID).Str("status", row.Status).Msg("import job: not pending, skipping")
+ return
+ }
+ uploadKey := row.ArtifactPath
+
+ // Hard scope check: refuse anything that doesn't live under the caller's
+ // group prefix. Defence in depth — the handler already enforced this.
+ prefix := gid.String() + "/imports/"
+ if !strings.HasPrefix(uploadKey, prefix) {
+ log.Error().Str("upload_key", uploadKey).Stringer("gid", gid).Msg("import job: upload key outside group prefix, refusing")
+ _ = s.repos.Exports.SetFailed(ctx, gid, importID, "upload outside group prefix")
+ s.publishImportFinished(gid)
+ return
+ }
+
+ if err := s.repos.Exports.SetRunning(ctx, gid, importID); err != nil {
+ log.Err(err).Stringer("import_id", importID).Msg("import job: failed to mark running")
+ return
+ }
+ s.publishImportFinished(gid)
+
+ if err := s.runImport(ctx, gid, userID, importID, uploadKey); err != nil {
+ log.Err(err).Stringer("gid", gid).Msg("import job: failed")
+ _ = s.repos.Exports.SetFailed(ctx, gid, importID, err.Error())
+ } else {
+ // On success the upload zip has been fully restored; keep the row
+ // size_bytes (set when the upload was staged) and just flip status.
+ if err := s.repos.Exports.SetCompleted(ctx, gid, importID, uploadKey, row.SizeBytes); err != nil {
+ log.Err(err).Stringer("import_id", importID).Msg("import job: failed to mark completed")
+ }
+ }
+
+ // Cleanup the staging blob whether the import succeeded or not — keeping
+ // it around just lets a second delivery race against the populated DB.
+ if err := s.deleteUpload(ctx, uploadKey); err != nil {
+ log.Warn().Err(err).Str("upload_key", uploadKey).Msg("import job: failed to clean staging upload")
+ }
+
+ s.publishImportFinished(gid)
+}
+
+func (s *ExportService) runImport(ctx context.Context, gid, userID, importID uuid.UUID, uploadKey string) error {
+ // setProgress is best-effort: a failed status update is logged but never
+ // aborts the import itself — progress is observability, not correctness.
+ setProgress := func(pct int) {
+ if err := s.repos.Exports.SetProgress(ctx, gid, importID, pct); err != nil {
+ log.Warn().Err(err).Stringer("import_id", importID).Int("pct", pct).Msg("import job: failed to update progress")
+ }
+ s.publishImportFinished(gid)
+ }
+
+ // Precondition: no items (non-location entities) in this group. Default
+ // seeded locations/tags/entity_types are fine; we wipe them below before
+ // restoring.
+ ready, err := s.IsGroupReadyForImport(ctx, gid)
+ if err != nil {
+ return fmt.Errorf("import precondition: %w", err)
+ }
+ if !ready {
+ return errors.New("import requires a collection with no items")
+ }
+
+ // Stream the upload to a temp file so we can use archive/zip's seek API.
+ bucket, err := blob.OpenBucket(ctx, s.repos.Attachments.GetConnString())
+ if err != nil {
+ return fmt.Errorf("open bucket: %w", err)
+ }
+ defer func() { _ = bucket.Close() }()
+
+ r, err := bucket.NewReader(ctx, s.repos.Attachments.GetFullPath(uploadKey), nil)
+ if err != nil {
+ return fmt.Errorf("open upload: %w", err)
+ }
+ defer func() { _ = r.Close() }()
+
+ tmp, err := os.CreateTemp("", "homebox-import-*.zip")
+ if err != nil {
+ return fmt.Errorf("create temp: %w", err)
+ }
+ tmpPath := tmp.Name()
+ defer func() {
+ _ = tmp.Close()
+ _ = os.Remove(tmpPath)
+ }()
+ size, err := io.Copy(tmp, r)
+ if err != nil {
+ return fmt.Errorf("download upload: %w", err)
+ }
+
+ zr, err := zip.NewReader(tmp, size)
+ if err != nil {
+ return fmt.Errorf("open zip: %w", err)
+ }
+
+ if err := enforceZipUncompressedLimit(zr, size); err != nil {
+ return err
+ }
+
+ mf, err := readManifest(zr)
+ if err != nil {
+ return fmt.Errorf("read manifest: %w", err)
+ }
+ if mf.SchemaVersion != ExportSchemaVersion {
+ return fmt.Errorf("unsupported schema version %d (this server expects %d)", mf.SchemaVersion, ExportSchemaVersion)
+ }
+ // Progress budget: 0–5% download + manifest, ~5–80% reserved for the DB
+ // phase (reported once after commit because intermediate setProgress
+ // calls would deadlock on SQLite — the write tx holds the single
+ // writer lock and ent's pool can't take it), 80–95% per-file blob
+ // restore, 95–100% finalization.
+ setProgress(5)
+
+ // All DB work — the seed wipe, every row insert, and the deferred FK
+ // patches — runs in a single tx so the group never sits in a half-imported
+ // state. If anything below fails, the deferred Rollback unwinds the wipe
+ // too. Blob uploads and bus notifications run only after Commit because
+ // (a) blobs are not transactional, and (b) restoreAttachmentBlobs needs to
+ // look up rows via the ent client, which uses its own pool and would not
+ // see uncommitted writes under Postgres READ COMMITTED.
+ tx, err := s.db.Sql().BeginTx(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("begin import tx: %w", err)
+ }
+ defer func() { _ = tx.Rollback() }()
+
+ // Wipe the seeded defaults (locations, tags, entity_types, notifiers,
+ // etc.) so the imported collection isn't mixed with the auto-created
+ // starter content. The empty-group precondition above guarantees this is
+ // safe — there are no user-created items to lose.
+ if err := wipeGroup(ctx, tx, s.dialect, gid); err != nil {
+ return fmt.Errorf("wipe before import: %w", err)
+ }
+
+ idMap, err := s.replayImportRows(ctx, tx, zr, gid, userID, mf.GroupID)
+ if err != nil {
+ return err
+ }
+
+ if err := tx.Commit(); err != nil {
+ return fmt.Errorf("commit import: %w", err)
+ }
+ setProgress(80)
+
+ // Restore attachment blobs. The zip names them attachments/{old_uuid};
+ // look up the new attachment row through the id map. Must run post-commit
+ // because the lookup goes through the ent client, which uses a different
+ // connection than our tx.
+ blobProgress := func(done, total int) {
+ if total <= 0 {
+ return
+ }
+ setProgress(80 + int(float64(done)/float64(total)*15))
+ }
+ if err := s.restoreAttachmentBlobs(ctx, zr, idMap["attachments"], blobProgress); err != nil {
+ // Compensating cleanup. The tx is already committed, so a partial blob
+ // restore leaves rows pointing at blobs that don't exist on disk and —
+ // because IsGroupReadyForImport rejects non-empty groups — blocks any
+ // retry. Wipe the freshly-imported rows so the group goes back to its
+ // pre-import (empty) state. Successfully uploaded blobs are left on
+ // disk; on retry the same content hashes will write to the same paths.
+ if werr := wipeGroup(ctx, s.db.Sql(), s.dialect, gid); werr != nil {
+ log.Err(werr).Stringer("gid", gid).Msg("import job: blob restore failed and rollback wipe also failed; group left in partially imported state")
+ }
+ return fmt.Errorf("restore attachments: %w", err)
+ }
+ setProgress(95)
+
+ // Notify the frontend that lots of things just appeared.
+ if s.bus != nil {
+ s.bus.Publish(eventbus.EventEntityMutation, eventbus.GroupMutationEvent{GID: gid})
+ s.bus.Publish(eventbus.EventTagMutation, eventbus.GroupMutationEvent{GID: gid})
+ }
+ return nil
+}
+
+// restoreAttachmentBlobs iterates attachments/* in the zip and writes each
+// file to blob storage at the path recorded on the matching attachment row.
+// Filenames in the zip use the source-side attachment UUID; idMap translates
+// to the new UUID assigned during the row import. The optional onProgress
+// callback is invoked after each blob is written so the import row's
+// progress field stays current during what can be the slowest phase of a
+// restore.
+func (s *ExportService) restoreAttachmentBlobs(ctx context.Context, zr *zip.Reader, idMap map[string]string, onProgress func(done, total int)) error {
+ bucket, err := blob.OpenBucket(ctx, s.repos.Attachments.GetConnString())
+ if err != nil {
+ return err
+ }
+ defer func() { _ = bucket.Close() }()
+
+ // Pre-count blob entries so onProgress can report a meaningful ratio.
+ total := 0
+ for _, f := range zr.File {
+ if strings.HasPrefix(f.Name, attachmentsDir) && !f.FileInfo().IsDir() {
+ total++
+ }
+ }
+ done := 0
+
+ for _, f := range zr.File {
+ if !strings.HasPrefix(f.Name, attachmentsDir) || f.FileInfo().IsDir() {
+ continue
+ }
+ oldIDStr := strings.TrimPrefix(f.Name, attachmentsDir)
+ newIDStr, ok := idMap[oldIDStr]
+ if !ok {
+ log.Warn().Str("name", f.Name).Msg("import: no attachment row matches blob, skipping")
+ continue
+ }
+ id, err := uuid.Parse(newIDStr)
+ if err != nil {
+ log.Warn().Str("name", f.Name).Msg("import: remapped attachment id is not a uuid")
+ continue
+ }
+ att, err := s.db.Attachment.Get(ctx, id)
+ if err != nil {
+ log.Warn().Err(err).Stringer("attachment_id", id).Msg("import: attachment row missing for blob")
+ continue
+ }
+ zf, err := f.Open()
+ if err != nil {
+ return err
+ }
+ w, err := bucket.NewWriter(ctx, s.repos.Attachments.GetFullPath(att.Path), &blob.WriterOptions{
+ ContentType: att.MimeType,
+ })
+ if err != nil {
+ _ = zf.Close()
+ return err
+ }
+ if _, err := io.Copy(w, zf); err != nil {
+ _ = w.Close()
+ _ = zf.Close()
+ return err
+ }
+ if err := w.Close(); err != nil {
+ _ = zf.Close()
+ return err
+ }
+ _ = zf.Close()
+ done++
+ if onProgress != nil {
+ onProgress(done, total)
+ }
+ }
+ return nil
+}
+
+// deleteUpload removes the staged import zip from blob storage.
+func (s *ExportService) deleteUpload(ctx context.Context, uploadKey string) error {
+ bucket, err := blob.OpenBucket(ctx, s.repos.Attachments.GetConnString())
+ if err != nil {
+ return err
+ }
+ defer func() { _ = bucket.Close() }()
+ return bucket.Delete(ctx, s.repos.Attachments.GetFullPath(uploadKey))
+}
+
+func (s *ExportService) publishImportFinished(gid uuid.UUID) {
+ if s.bus != nil {
+ s.bus.Publish(eventbus.EventImportMutation, eventbus.GroupMutationEvent{GID: gid})
+ }
+}
+
+// readManifest pulls and parses manifest.json out of the zip.
+func readManifest(zr *zip.Reader) (Manifest, error) {
+ var mf Manifest
+ for _, f := range zr.File {
+ if f.Name != manifestFile {
+ continue
+ }
+ r, err := f.Open()
+ if err != nil {
+ return mf, err
+ }
+ defer func() { _ = r.Close() }()
+ return mf, json.NewDecoder(r).Decode(&mf)
+ }
+ return mf, errors.New("manifest.json missing from zip")
+}
+
+// readTableJSON loads a single table file from the zip, tolerating its
+// absence (returns an empty slice — exports may legitimately omit a table
+// with zero rows in future versions).
+func readTableJSON(zr *zip.Reader, name string) ([]map[string]any, error) {
+ for _, f := range zr.File {
+ if f.Name != name {
+ continue
+ }
+ r, err := f.Open()
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = r.Close() }()
+ var out []map[string]any
+ if err := json.NewDecoder(r).Decode(&out); err != nil {
+ return nil, err
+ }
+ return out, nil
+ }
+ return nil, nil
+}
+
+// sqlExecer is the minimal interface used by the import path so the same
+// helpers work against a *sql.DB (auto-commit) and a *sql.Tx (transactional
+// import). Both stdlib types implement ExecContext with this signature.
+type sqlExecer interface {
+ ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
+}
+
+// insertRow builds and runs an INSERT for one row's worth of column-value
+// pairs. Self-maintaining: every JSON key becomes a column.
+func insertRow(ctx context.Context, db sqlExecer, dialect, table string, row map[string]any) error {
+ if len(row) == 0 {
+ return nil
+ }
+ // Reject any attacker-shaped identifiers before they reach the SQL
+ // builder. Column names flow from JSON keys in an attacker-controlled
+ // zip; quoteIdent also escapes embedded quotes, but rejecting up front
+ // gives a clear error and keeps the SQL we generate trivial to audit.
+ if !isValidSQLIdent(table) {
+ return fmt.Errorf("invalid table identifier %q", table)
+ }
+ cols := make([]string, 0, len(row))
+ for k := range row {
+ if !isValidSQLIdent(k) {
+ return fmt.Errorf("invalid column identifier %q on table %q", k, table)
+ }
+ cols = append(cols, k)
+ }
+ // Stable column order so generated SQL is deterministic in tests/logs.
+ sortStrings(cols)
+
+ args := make([]any, 0, len(cols))
+ placeholders := make([]string, 0, len(cols))
+ for i, c := range cols {
+ args = append(args, row[c])
+ placeholders = append(placeholders, placeholder(dialect, i+1))
+ }
+
+ q := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)",
+ quoteIdent(dialect, table),
+ joinQuoted(dialect, cols),
+ strings.Join(placeholders, ", "),
+ )
+ _, err := db.ExecContext(ctx, q, args...)
+ return err
+}
+
+// placeholder returns the dialect-specific positional placeholder.
+func placeholder(dialect string, n int) string {
+ if dialect == "postgres" {
+ return fmt.Sprintf("$%d", n)
+ }
+ return "?"
+}
+
+// quoteIdent quotes an identifier. Both supported dialects accept double
+// quotes around identifiers — including sqlite for reserved words like
+// "primary" on the attachments table. Any embedded double-quote is escaped
+// per the SQL standard (and shared dialect behavior) by doubling it, so a
+// stray quote can never close the identifier and inject SQL. Callers should
+// still validate identifiers via isValidSQLIdent for attacker-supplied input;
+// this escape is defence-in-depth, not the primary gate.
+func quoteIdent(_ string, ident string) string {
+ return `"` + strings.ReplaceAll(ident, `"`, `""`) + `"`
+}
+
+// isValidSQLIdent returns true if s is a syntactically conservative SQL
+// identifier: an ASCII letter or underscore followed by letters, digits, or
+// underscores. The import path runs JSON map keys through this before they
+// are interpolated as column names, so a hostile export zip cannot smuggle
+// SQL into a table name or column list. dumpTable populates these keys from
+// rows.Columns(), which only ever returns plain identifiers, so every legit
+// key satisfies this check.
+func isValidSQLIdent(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i, r := range s {
+ switch {
+ case r >= 'a' && r <= 'z',
+ r >= 'A' && r <= 'Z',
+ r == '_':
+ // always allowed
+ case (r >= '0' && r <= '9') && i > 0:
+ // digits allowed anywhere except the first character
+ default:
+ return false
+ }
+ }
+ return true
+}
+
+func joinQuoted(dialect string, cols []string) string {
+ out := make([]string, len(cols))
+ for i, c := range cols {
+ out[i] = quoteIdent(dialect, c)
+ }
+ return strings.Join(out, ", ")
+}
+
+// rewriteBlobPath swaps the leading "{srcGid}/" segment of an attachment's
+// blob key for "{dstGid}/". Anything else (including paths without that
+// prefix) is returned unchanged so we never mangle data that happens to
+// already point at the destination, or paths from a future scheme that
+// doesn't lead with the gid.
+func rewriteBlobPath(path string, srcGid, dstGid uuid.UUID) string {
+ prefix := srcGid.String() + "/"
+ if !strings.HasPrefix(path, prefix) {
+ return path
+ }
+ return dstGid.String() + "/" + strings.TrimPrefix(path, prefix)
+}
+
+// enforceZipUncompressedLimit rejects zip bombs before any member is opened.
+// Legitimate exports compress ~3-10x (JSON tables compress well, attachment
+// binaries barely at all); 100x the compressed upload is a generous ceiling
+// that still flags any pathological expansion ratio. Both per-entry and
+// cumulative caps are checked since either alone is bypassable. The declared
+// uncompressed size in the central directory is what attackers control, but
+// typical bombs declare accurate-but-tiny per-entry sizes that sum to a huge
+// total — the cumulative check is what stops them.
+func enforceZipUncompressedLimit(zr *zip.Reader, uploadSize int64) error {
+ const maxZipExpansionRatio = 100
+ maxUncompressed := uint64(uploadSize) * maxZipExpansionRatio
+ var total uint64
+ for _, f := range zr.File {
+ if f.UncompressedSize64 > maxUncompressed {
+ return fmt.Errorf("import rejected: zip entry %q declares uncompressed size %d, exceeds limit %d", f.Name, f.UncompressedSize64, maxUncompressed)
+ }
+ if f.UncompressedSize64 > maxUncompressed-total {
+ return fmt.Errorf("import rejected: zip cumulative uncompressed size exceeds limit %d", maxUncompressed)
+ }
+ total += f.UncompressedSize64
+ }
+ return nil
+}
+
+// replayImportRows reads each table file from the zip, regenerates every PK,
+// remaps group/user/FK columns, rewrites attachment blob paths from the source
+// gid prefix to the destination, and inserts the row into tx. Self-referential
+// and forward-circular FKs are stashed and patched in a second pass so the
+// first INSERT can succeed before the referenced row exists. Returns
+// idMap[table][oldID]=newID so the post-commit blob restore can resolve
+// attachment file names back to the just-inserted rows.
+func (s *ExportService) replayImportRows(ctx context.Context, tx *sql.Tx, zr *zip.Reader, gid, userID, srcGroupID uuid.UUID) (map[string]map[string]string, error) {
+ idMap := make(map[string]map[string]string)
+ rememberID := func(table, oldID, newID string) {
+ if _, ok := idMap[table]; !ok {
+ idMap[table] = make(map[string]string)
+ }
+ idMap[table][oldID] = newID
+ }
+
+ // remapFK substitutes an old FK value with its remapped new value, or
+ // returns the original if unknown (which surfaces as a FK violation on
+ // insert — better to fail loud than silently null it out).
+ remapFK := func(target string, v any) any {
+ if v == nil {
+ return nil
+ }
+ s := fmt.Sprint(v)
+ if s == "" {
+ return nil
+ }
+ if mapping, ok := idMap[target]; ok {
+ if newID, found := mapping[s]; found {
+ return newID
+ }
+ }
+ return v
+ }
+
+ type deferredUpdate struct {
+ table, col, newID, oldFKValue, targetTable string
+ }
+ var deferred []deferredUpdate
+
+ for _, spec := range exportTables {
+ rows, err := readTableJSON(zr, spec.name+".json")
+ if err != nil {
+ return nil, fmt.Errorf("read %s.json: %w", spec.name, err)
+ }
+ for _, row := range rows {
+ newID, err := remapImportRow(row, spec, gid, userID, srcGroupID, remapFK, rememberID)
+ if err != nil {
+ return nil, err
+ }
+ for col, target := range spec.deferCols {
+ if v, ok := row[col]; ok && v != nil && v != "" {
+ if newID != "" {
+ deferred = append(deferred, deferredUpdate{
+ table: spec.name,
+ col: col,
+ newID: newID,
+ oldFKValue: fmt.Sprint(v),
+ targetTable: target,
+ })
+ }
+ row[col] = nil
+ }
+ }
+ if err := insertRow(ctx, tx, s.dialect, spec.name, row); err != nil {
+ return nil, fmt.Errorf("insert %s: %w", spec.name, err)
+ }
+ }
+ }
+
+ // Apply deferred updates (self-referential and forward-circular FKs).
+ for _, d := range deferred {
+ newFK := remapFK(d.targetTable, d.oldFKValue)
+ q := fmt.Sprintf("UPDATE %s SET %s = %s WHERE id = %s",
+ d.table, d.col, placeholder(s.dialect, 1), placeholder(s.dialect, 2))
+ if _, err := tx.ExecContext(ctx, q, newFK, d.newID); err != nil {
+ return nil, fmt.Errorf("deferred update %s.%s: %w", d.table, d.col, err)
+ }
+ }
+
+ return idMap, nil
+}
+
+// remapImportRow rewrites a single row in place: regenerates its PK, swaps
+// group/user/FK columns, and validates+rewrites attachment blob paths from
+// the source gid prefix to the destination gid. Returns the new PK (empty
+// for junction tables with no pkCol) so the caller can record deferred FK
+// updates against it.
+func remapImportRow(
+ row map[string]any,
+ spec tableSpec,
+ gid, userID, srcGroupID uuid.UUID,
+ remapFK func(target string, v any) any,
+ rememberID func(table, oldID, newID string),
+) (string, error) {
+ var newID string
+ if spec.pkCol != "" {
+ if v, ok := row[spec.pkCol]; ok && v != nil {
+ old := fmt.Sprint(v)
+ newID = uuid.NewString()
+ row[spec.pkCol] = newID
+ rememberID(spec.name, old, newID)
+ }
+ }
+ for _, col := range spec.groupCols {
+ if _, ok := row[col]; ok {
+ row[col] = gid.String()
+ }
+ }
+ for _, col := range spec.userCols {
+ if _, ok := row[col]; ok {
+ row[col] = userID.String()
+ }
+ }
+ for col, target := range spec.fkCols {
+ if v, ok := row[col]; ok {
+ row[col] = remapFK(target, v)
+ }
+ }
+ // Attachment paths are "{group_id}/documents/{hash}"; rewrite the source
+ // gid prefix to the destination so the row points at where we will
+ // actually upload the blob and so cascade-cleanup on group delete sweeps
+ // it correctly.
+ //
+ // The zip is attacker-controlled (an admin imports a file they
+ // uploaded). Without validation, a crafted path like
+ // "{srcGid}/documents/../../etc/foo" would survive rewriteBlobPath
+ // (it only swaps the gid prefix) and reach the blob writer; the
+ // fileblob backend doesn't resolve ".." segments. Validate the
+ // source shape strictly, then re-validate the result.
+ if spec.name == "attachments" {
+ if err := rewriteAttachmentPath(row, srcGroupID, gid); err != nil {
+ return "", err
+ }
+ }
+ return newID, nil
+}
+
+// rewriteAttachmentPath validates the attachment row's path column, swaps
+// the source gid prefix for the destination gid, and re-validates the
+// result. Mutates row in place.
+func rewriteAttachmentPath(row map[string]any, srcGroupID, dstGroupID uuid.UUID) error {
+ v, ok := row["path"]
+ if !ok {
+ return fmt.Errorf("attachment row missing path column")
+ }
+ str, ok := v.(string)
+ if !ok || str == "" {
+ return fmt.Errorf("attachment row has empty/non-string path")
+ }
+ cleanPath := path.Clean(str)
+ srcPrefix := srcGroupID.String() + "/documents/"
+ if !strings.HasPrefix(cleanPath, srcPrefix) {
+ return fmt.Errorf("attachment path %q does not live under source group's documents prefix", str)
+ }
+ newPath := rewriteBlobPath(cleanPath, srcGroupID, dstGroupID)
+ dstPrefix := dstGroupID.String() + "/documents/"
+ if !strings.HasPrefix(newPath, dstPrefix) {
+ return fmt.Errorf("rewritten attachment path %q escapes destination group's documents prefix", newPath)
+ }
+ row["path"] = newPath
+ return nil
+}
+
+// wipeGroup deletes every group-scoped row in the export table list, in
+// reverse dependency order. Used before an import so the seeded
+// defaults don't pollute the restored collection.
+//
+// Reusing exportTables means new tables are wiped automatically once they're
+// added to the export schema — no separate list to keep in sync.
+func wipeGroup(ctx context.Context, db sqlExecer, dialect string, gid uuid.UUID) error {
+ for i := len(exportTables) - 1; i >= 0; i-- {
+ spec := exportTables[i]
+ if spec.scope == "" {
+ continue
+ }
+ q := "DELETE FROM " + quoteIdent(dialect, spec.name) +
+ " WHERE " + rebindPlaceholders(spec.scope, dialect)
+ args := make([]any, 0, strings.Count(spec.scope, "?"))
+ for j := 0; j < cap(args); j++ {
+ args = append(args, gid.String())
+ }
+ if _, err := db.ExecContext(ctx, q, args...); err != nil {
+ return fmt.Errorf("wipe %s: %w", spec.name, err)
+ }
+ }
+ return nil
+}
+
+// sortStrings is a tiny inlined sort to keep the file dependency-light.
+func sortStrings(s []string) {
+ for i := 1; i < len(s); i++ {
+ for j := i; j > 0 && s[j-1] > s[j]; j-- {
+ s[j], s[j-1] = s[j-1], s[j]
+ }
+ }
+}
diff --git a/backend/internal/core/services/service_exports_test.go b/backend/internal/core/services/service_exports_test.go
new file mode 100644
index 000000000..567c9e3bf
--- /dev/null
+++ b/backend/internal/core/services/service_exports_test.go
@@ -0,0 +1,342 @@
+package services
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gocloud.dev/blob"
+
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/attachment"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/entity"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/predicate"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/tag"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/repo"
+)
+
+func tagInGroup(gid uuid.UUID) predicate.Tag {
+ return tag.HasGroupWith(group.ID(gid))
+}
+
+// TestExportRoundTrip writes some entities into a fresh source group, runs
+// the export to produce a zip artifact, and then replays that artifact into
+// a separate empty destination group. Counts and selected fields are
+// asserted on the destination side.
+//
+// This is the load-bearing integration test for the raw-SQL dump/restore
+// path: anything that doesn't round-trip cleanly (timestamps, UUIDs, JSON
+// columns, self-referential FKs) shows up here.
+func TestExportRoundTrip(t *testing.T) {
+ ctx := context.Background()
+
+ // --- Source group with data ----------------------------------------
+ src, err := tRepos.Groups.GroupCreate(ctx, "export-src-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+
+ containerET, err := tRepos.EntityTypes.GetDefault(ctx, src.ID, true)
+ require.NoError(t, err)
+ itemET, err := tRepos.EntityTypes.GetDefault(ctx, src.ID, false)
+ require.NoError(t, err)
+
+ // One location, one item nested in it.
+ loc, err := tRepos.Entities.Create(ctx, src.ID, repo.EntityCreate{
+ Name: "Garage",
+ Description: "primary",
+ EntityTypeID: containerET.ID,
+ })
+ require.NoError(t, err)
+
+ item, err := tRepos.Entities.Create(ctx, src.ID, repo.EntityCreate{
+ Name: "Drill",
+ Description: "cordless",
+ ParentID: loc.ID,
+ EntityTypeID: itemET.ID,
+ })
+ require.NoError(t, err)
+
+ // Tag and link to the item (exercises the tag_entities junction).
+ tg, err := tRepos.Tags.Create(ctx, src.ID, repo.TagCreate{
+ Name: "tools",
+ Description: "stuff that hits other stuff",
+ })
+ require.NoError(t, err)
+ _, err = tClient.Entity.UpdateOneID(item.ID).AddTagIDs(tg.ID).Save(ctx)
+ require.NoError(t, err)
+
+ // Real attachment + a fabricated thumbnail row pointing at it.
+ // This is the scenario that broke before: the thumbnail row has
+ // entity_attachments=NULL and is reachable only via the parent's
+ // attachment_thumbnail FK, so the original entity-only scope missed it.
+ parentAtt, err := tRepos.Attachments.Create(ctx, item.ID,
+ repo.ItemCreateAttachment{
+ Title: "manual.pdf",
+ Content: bytes.NewReader([]byte("dummy pdf body")),
+ },
+ attachment.TypeManual, false)
+ require.NoError(t, err)
+
+ srcGroup, err := tClient.Group.Get(ctx, src.ID)
+ require.NoError(t, err)
+ thumbUpload, err := tRepos.Attachments.UploadFile(ctx, srcGroup,
+ repo.ItemCreateAttachment{
+ Title: "manual-thumb",
+ Content: bytes.NewReader([]byte("dummy thumbnail body")),
+ })
+ require.NoError(t, err)
+ thumbAtt, err := tClient.Attachment.Create().
+ SetType(attachment.TypeThumbnail).
+ SetTitle("manual-thumb").
+ SetPath(thumbUpload.Path).
+ SetMimeType("image/webp").
+ Save(ctx)
+ require.NoError(t, err)
+ _, err = tClient.Attachment.UpdateOneID(parentAtt.ID).SetThumbnailID(thumbAtt.ID).Save(ctx)
+ require.NoError(t, err)
+
+ // --- Export --------------------------------------------------------
+ expRow, err := tRepos.Exports.Create(ctx, src.ID)
+ require.NoError(t, err)
+
+ artifactPath, sizeBytes, err := tSvc.Exports.buildArtifact(ctx, expRow.ID, src.ID)
+ require.NoError(t, err)
+ require.NotEmpty(t, artifactPath)
+ require.Positive(t, sizeBytes)
+
+ // Artifact must live under the source group's prefix.
+ assert.True(t, strings.HasPrefix(artifactPath, src.ID.String()+"/exports/"),
+ "artifact path %q must be scoped to source group", artifactPath)
+
+ // --- Destination: fresh group with seeded defaults -----------------
+ // Mirror what real registration does: a new group has default locations
+ // and tags but no items. The import must tolerate this and wipe them.
+ dst, err := tRepos.Groups.GroupCreate(ctx, "export-dst-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+
+ dstContainerET, err := tRepos.EntityTypes.GetDefault(ctx, dst.ID, true)
+ require.NoError(t, err)
+ for _, name := range []string{"Living Room", "Garage", "Kitchen"} {
+ _, err := tRepos.Entities.Create(ctx, dst.ID, repo.EntityCreate{
+ Name: name,
+ EntityTypeID: dstContainerET.ID,
+ })
+ require.NoError(t, err)
+ }
+ for _, name := range []string{"Appliances", "Electronics"} {
+ _, err := tRepos.Tags.Create(ctx, dst.ID, repo.TagCreate{Name: name})
+ require.NoError(t, err)
+ }
+
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, dst.ID)
+ require.NoError(t, err)
+ require.True(t, ready, "dst group with only seeded defaults must be importable")
+
+ // Stage the just-built artifact as if it had been uploaded for import.
+ // We re-publish it under the destination's import prefix to satisfy the
+ // worker's scope check.
+ importKey := dst.ID.String() + "/imports/" + uuid.New().String() + ".zip"
+ require.NoError(t, copyBlobUnderTest(ctx, tSvc.Exports, artifactPath, importKey))
+
+ // Create the tracked import row the worker reads to find the upload key
+ // and to report status/progress against.
+ impRow, err := tRepos.Exports.CreateImport(ctx, dst.ID, importKey, sizeBytes)
+ require.NoError(t, err)
+ tSvc.Exports.RunImport(ctx, dst.ID, tUser.ID, impRow.ID)
+
+ // --- Assertions ----------------------------------------------------
+ dstEntities, err := tClient.Entity.Query().Where(entity.HasGroupWith(group.ID(dst.ID))).All(ctx)
+ require.NoError(t, err)
+ require.Len(t, dstEntities, 2, "exactly the location and the item should remain — seeded defaults wiped, source data restored")
+
+ gotItem, err := tClient.Entity.Query().
+ Where(entity.HasGroupWith(group.ID(dst.ID)), entity.Name("Drill")).
+ Only(ctx)
+ require.NoError(t, err)
+
+ parent, err := gotItem.QueryParent().Only(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, "Garage", parent.Name, "parent FK must be restored on second pass")
+
+ tags, err := gotItem.QueryTag().All(ctx)
+ require.NoError(t, err)
+ require.Len(t, tags, 1, "tag_entities junction must round-trip")
+ assert.Equal(t, "tools", tags[0].Name)
+
+ // Seeded tags must be gone — only the imported "tools" tag should remain.
+ allTags, err := tClient.Tag.Query().Where(tagInGroup(dst.ID)).All(ctx)
+ require.NoError(t, err)
+ require.Len(t, allTags, 1, "seeded tags should have been wiped")
+ assert.Equal(t, "tools", allTags[0].Name)
+
+ // IDs are intentionally regenerated on import (so re-importing the same
+ // archive into a server that already has the data doesn't conflict on
+ // PK). Names + relationship structure are what matters.
+ assert.NotEqual(t, item.ID, gotItem.ID, "import should remap PKs")
+ assert.NotEqual(t, tg.ID, tags[0].ID, "import should remap PKs")
+
+ // Attachment + thumbnail must both round-trip with the parent→thumbnail
+ // link intact and both blobs present at their new on-disk paths.
+ gotAtts, err := gotItem.QueryAttachments().All(ctx)
+ require.NoError(t, err)
+ require.Len(t, gotAtts, 1, "parent attachment row must round-trip")
+
+ gotThumb, err := gotAtts[0].QueryThumbnail().Only(ctx)
+ require.NoError(t, err, "parent attachment must have its thumbnail edge restored")
+ assert.Equal(t, "image/webp", gotThumb.MimeType)
+
+ // Imported paths must be rewritten to the destination group's prefix —
+ // otherwise the DB would point at the source group and on-delete cascade
+ // would leak blobs.
+ dstPrefix := dst.ID.String() + "/"
+ assert.True(t, strings.HasPrefix(gotAtts[0].Path, dstPrefix),
+ "parent attachment path must point at dst group (got %q)", gotAtts[0].Path)
+ assert.True(t, strings.HasPrefix(gotThumb.Path, dstPrefix),
+ "thumbnail path must point at dst group (got %q)", gotThumb.Path)
+ assert.NotContains(t, gotAtts[0].Path, src.ID.String(),
+ "source gid must not appear anywhere in the imported path")
+
+ bk, err := blob.OpenBucket(ctx, tRepos.Attachments.GetConnString())
+ require.NoError(t, err)
+ defer func() { _ = bk.Close() }()
+
+ parentBlob, err := bk.ReadAll(ctx, tRepos.Attachments.GetFullPath(gotAtts[0].Path))
+ require.NoError(t, err, "parent attachment blob must be present at the rewritten path")
+ assert.Equal(t, "dummy pdf body", string(parentBlob))
+
+ thumbBlob, err := bk.ReadAll(ctx, tRepos.Attachments.GetFullPath(gotThumb.Path))
+ require.NoError(t, err, "thumbnail blob must be present at the rewritten path")
+ assert.Equal(t, "dummy thumbnail body", string(thumbBlob))
+}
+
+// TestIsGroupReadyForImport_BlocksUserCreatedRows asserts that the import
+// gate blocks not just on items but on user-created rows in any table the
+// import would wipe (tags, entity_templates, notifiers, custom entity_types,
+// and custom locations beyond the seeded baseline). The pure-seed and
+// pure-empty cases must still pass.
+func TestIsGroupReadyForImport_BlocksUserCreatedRows(t *testing.T) {
+ ctx := context.Background()
+
+ t.Run("empty group passes", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-empty-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.True(t, ready, "empty group must be importable")
+ })
+
+ t.Run("only seeded defaults passes", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-seed-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ locET, err := tRepos.EntityTypes.GetDefault(ctx, g.ID, true)
+ require.NoError(t, err)
+ for _, name := range []string{"Living Room", "Garage", "Kitchen", "Bedroom", "Bathroom", "Office", "Attic", "Basement"} {
+ _, err := tRepos.Entities.Create(ctx, g.ID, repo.EntityCreate{Name: name, EntityTypeID: locET.ID})
+ require.NoError(t, err)
+ }
+ for _, name := range []string{"Appliances", "IOT", "Electronics", "Servers", "General", "Important"} {
+ _, err := tRepos.Tags.Create(ctx, g.ID, repo.TagCreate{Name: name})
+ require.NoError(t, err)
+ }
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.True(t, ready, "full seed baseline must be importable")
+ })
+
+ t.Run("extra tag blocks", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-tag-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ for i := 0; i <= len(defaultTags()); i++ {
+ _, err := tRepos.Tags.Create(ctx, g.ID, repo.TagCreate{Name: fk.Str(8)})
+ require.NoError(t, err)
+ }
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.False(t, ready, "tag count beyond seed baseline must block")
+ })
+
+ t.Run("extra location blocks", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-loc-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ locET, err := tRepos.EntityTypes.GetDefault(ctx, g.ID, true)
+ require.NoError(t, err)
+ for i := 0; i <= len(defaultLocations()); i++ {
+ _, err := tRepos.Entities.Create(ctx, g.ID, repo.EntityCreate{Name: fk.Str(8), EntityTypeID: locET.ID})
+ require.NoError(t, err)
+ }
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.False(t, ready, "location count beyond seed baseline must block")
+ })
+
+ t.Run("notifier blocks", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-not-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ _, err = tRepos.Notifiers.Create(ctx, g.ID, tUser.ID, repo.NotifierCreate{
+ Name: "n",
+ URL: "ntfy://x/topic",
+ IsActive: true,
+ })
+ require.NoError(t, err)
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.False(t, ready, "any notifier must block")
+ })
+
+ t.Run("template blocks", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-tpl-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ _, err = tRepos.EntityTemplates.Create(ctx, g.ID, repo.EntityTemplateCreate{Name: "t"})
+ require.NoError(t, err)
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.False(t, ready, "any entity template must block")
+ })
+
+ t.Run("custom entity_type blocks", func(t *testing.T) {
+ g, err := tRepos.Groups.GroupCreate(ctx, "ready-et-"+fk.Str(4), uuid.Nil)
+ require.NoError(t, err)
+ // Trigger lazy creation of both defaults, then add a third custom type.
+ _, err = tRepos.EntityTypes.GetDefault(ctx, g.ID, true)
+ require.NoError(t, err)
+ _, err = tRepos.EntityTypes.GetDefault(ctx, g.ID, false)
+ require.NoError(t, err)
+ _, err = tRepos.EntityTypes.Create(ctx, g.ID, repo.EntityTypeCreate{Name: "Custom", IsLocation: false})
+ require.NoError(t, err)
+ ready, err := tSvc.Exports.IsGroupReadyForImport(ctx, g.ID)
+ require.NoError(t, err)
+ assert.False(t, ready, "entity_type beyond Item/Location defaults must block")
+ })
+}
+
+// copyBlobUnderTest reuses the export service's bucket plumbing to copy a
+// blob from one key to another in the same backing store. Used to "stage"
+// the just-produced export under the destination group's import prefix.
+func copyBlobUnderTest(ctx context.Context, svc *ExportService, srcKey, dstKey string) error {
+ att := svc.repos.Attachments
+ bk, err := blob.OpenBucket(ctx, att.GetConnString())
+ if err != nil {
+ return err
+ }
+ defer func() { _ = bk.Close() }()
+
+ r, err := bk.NewReader(ctx, att.GetFullPath(srcKey), nil)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = r.Close() }()
+
+ w, err := bk.NewWriter(ctx, att.GetFullPath(dstKey), nil)
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ _ = w.Close()
+ return err
+ }
+ return w.Close()
+}
diff --git a/backend/internal/core/services/service_user_session_test.go b/backend/internal/core/services/service_user_session_test.go
index ae2cf5e26..9e0407df5 100644
--- a/backend/internal/core/services/service_user_session_test.go
+++ b/backend/internal/core/services/service_user_session_test.go
@@ -50,7 +50,7 @@ func TestRenewToken_InvalidatesPriorToken(t *testing.T) {
// The new token authenticates.
_, err = tRepos.AuthTokens.GetUserFromToken(ctx, hasher.HashToken(renewed.Raw))
- assert.NoError(t, err)
+ require.NoError(t, err)
// The old token does NOT authenticate.
_, err = tRepos.AuthTokens.GetUserFromToken(ctx, hasher.HashToken(old.Raw))
@@ -85,7 +85,7 @@ func TestChangePassword_RevokesOtherSessions_KeepsCurrent(t *testing.T) {
// Current token still authenticates.
_, err = tRepos.AuthTokens.GetUserFromToken(ctx, hasher.HashToken(current.Raw))
- assert.NoError(t, err, "current session must remain valid")
+ require.NoError(t, err, "current session must remain valid")
// Login works with the new password and not the old.
_, err = tSvc.User.Login(ctx, usr.Email, "new-cp-pw", false)
diff --git a/backend/internal/data/ent/client.go b/backend/internal/data/ent/client.go
index d0352d22b..8d7d57c0f 100644
--- a/backend/internal/data/ent/client.go
+++ b/backend/internal/data/ent/client.go
@@ -24,6 +24,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entityfield"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/maintenanceentry"
@@ -56,6 +57,8 @@ type Client struct {
EntityTemplate *EntityTemplateClient
// EntityType is the client for interacting with the EntityType builders.
EntityType *EntityTypeClient
+ // Export is the client for interacting with the Export builders.
+ Export *ExportClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// GroupInvitationToken is the client for interacting with the GroupInvitationToken builders.
@@ -93,6 +96,7 @@ func (c *Client) init() {
c.EntityField = NewEntityFieldClient(c.config)
c.EntityTemplate = NewEntityTemplateClient(c.config)
c.EntityType = NewEntityTypeClient(c.config)
+ c.Export = NewExportClient(c.config)
c.Group = NewGroupClient(c.config)
c.GroupInvitationToken = NewGroupInvitationTokenClient(c.config)
c.MaintenanceEntry = NewMaintenanceEntryClient(c.config)
@@ -202,6 +206,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
EntityField: NewEntityFieldClient(cfg),
EntityTemplate: NewEntityTemplateClient(cfg),
EntityType: NewEntityTypeClient(cfg),
+ Export: NewExportClient(cfg),
Group: NewGroupClient(cfg),
GroupInvitationToken: NewGroupInvitationTokenClient(cfg),
MaintenanceEntry: NewMaintenanceEntryClient(cfg),
@@ -238,6 +243,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
EntityField: NewEntityFieldClient(cfg),
EntityTemplate: NewEntityTemplateClient(cfg),
EntityType: NewEntityTypeClient(cfg),
+ Export: NewExportClient(cfg),
Group: NewGroupClient(cfg),
GroupInvitationToken: NewGroupInvitationTokenClient(cfg),
MaintenanceEntry: NewMaintenanceEntryClient(cfg),
@@ -277,7 +283,7 @@ func (c *Client) Close() error {
func (c *Client) Use(hooks ...Hook) {
for _, n := range []interface{ Use(...Hook) }{
c.APIKey, c.Attachment, c.AuthRoles, c.AuthTokens, c.Entity, c.EntityField,
- c.EntityTemplate, c.EntityType, c.Group, c.GroupInvitationToken,
+ c.EntityTemplate, c.EntityType, c.Export, c.Group, c.GroupInvitationToken,
c.MaintenanceEntry, c.Notifier, c.PasswordResetTokens, c.Tag, c.TemplateField,
c.User, c.UserGroup,
} {
@@ -290,7 +296,7 @@ func (c *Client) Use(hooks ...Hook) {
func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{
c.APIKey, c.Attachment, c.AuthRoles, c.AuthTokens, c.Entity, c.EntityField,
- c.EntityTemplate, c.EntityType, c.Group, c.GroupInvitationToken,
+ c.EntityTemplate, c.EntityType, c.Export, c.Group, c.GroupInvitationToken,
c.MaintenanceEntry, c.Notifier, c.PasswordResetTokens, c.Tag, c.TemplateField,
c.User, c.UserGroup,
} {
@@ -317,6 +323,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
return c.EntityTemplate.mutate(ctx, m)
case *EntityTypeMutation:
return c.EntityType.mutate(ctx, m)
+ case *ExportMutation:
+ return c.Export.mutate(ctx, m)
case *GroupMutation:
return c.Group.mutate(ctx, m)
case *GroupInvitationTokenMutation:
@@ -1740,6 +1748,155 @@ func (c *EntityTypeClient) mutate(ctx context.Context, m *EntityTypeMutation) (V
}
}
+// ExportClient is a client for the Export schema.
+type ExportClient struct {
+ config
+}
+
+// NewExportClient returns a client for the Export from the given config.
+func NewExportClient(c config) *ExportClient {
+ return &ExportClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `export.Hooks(f(g(h())))`.
+func (c *ExportClient) Use(hooks ...Hook) {
+ c.hooks.Export = append(c.hooks.Export, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `export.Intercept(f(g(h())))`.
+func (c *ExportClient) Intercept(interceptors ...Interceptor) {
+ c.inters.Export = append(c.inters.Export, interceptors...)
+}
+
+// Create returns a builder for creating a Export entity.
+func (c *ExportClient) Create() *ExportCreate {
+ mutation := newExportMutation(c.config, OpCreate)
+ return &ExportCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of Export entities.
+func (c *ExportClient) CreateBulk(builders ...*ExportCreate) *ExportCreateBulk {
+ return &ExportCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *ExportClient) MapCreateBulk(slice any, setFunc func(*ExportCreate, int)) *ExportCreateBulk {
+ rv := reflect.ValueOf(slice)
+ if rv.Kind() != reflect.Slice {
+ return &ExportCreateBulk{err: fmt.Errorf("calling to ExportClient.MapCreateBulk with wrong type %T, need slice", slice)}
+ }
+ builders := make([]*ExportCreate, rv.Len())
+ for i := 0; i < rv.Len(); i++ {
+ builders[i] = c.Create()
+ setFunc(builders[i], i)
+ }
+ return &ExportCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for Export.
+func (c *ExportClient) Update() *ExportUpdate {
+ mutation := newExportMutation(c.config, OpUpdate)
+ return &ExportUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *ExportClient) UpdateOne(_m *Export) *ExportUpdateOne {
+ mutation := newExportMutation(c.config, OpUpdateOne, withExport(_m))
+ return &ExportUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *ExportClient) UpdateOneID(id uuid.UUID) *ExportUpdateOne {
+ mutation := newExportMutation(c.config, OpUpdateOne, withExportID(id))
+ return &ExportUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for Export.
+func (c *ExportClient) Delete() *ExportDelete {
+ mutation := newExportMutation(c.config, OpDelete)
+ return &ExportDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *ExportClient) DeleteOne(_m *Export) *ExportDeleteOne {
+ return c.DeleteOneID(_m.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *ExportClient) DeleteOneID(id uuid.UUID) *ExportDeleteOne {
+ builder := c.Delete().Where(export.ID(id))
+ builder.mutation.id = &id
+ builder.mutation.op = OpDeleteOne
+ return &ExportDeleteOne{builder}
+}
+
+// Query returns a query builder for Export.
+func (c *ExportClient) Query() *ExportQuery {
+ return &ExportQuery{
+ config: c.config,
+ ctx: &QueryContext{Type: TypeExport},
+ inters: c.Interceptors(),
+ }
+}
+
+// Get returns a Export entity by its id.
+func (c *ExportClient) Get(ctx context.Context, id uuid.UUID) (*Export, error) {
+ return c.Query().Where(export.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *ExportClient) GetX(ctx context.Context, id uuid.UUID) *Export {
+ obj, err := c.Get(ctx, id)
+ if err != nil {
+ panic(err)
+ }
+ return obj
+}
+
+// QueryGroup queries the group edge of a Export.
+func (c *ExportClient) QueryGroup(_m *Export) *GroupQuery {
+ query := (&GroupClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := _m.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(export.Table, export.FieldID, id),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, export.GroupTable, export.GroupColumn),
+ )
+ fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
+// Hooks returns the client hooks.
+func (c *ExportClient) Hooks() []Hook {
+ return c.hooks.Export
+}
+
+// Interceptors returns the client interceptors.
+func (c *ExportClient) Interceptors() []Interceptor {
+ return c.inters.Export
+}
+
+func (c *ExportClient) mutate(ctx context.Context, m *ExportMutation) (Value, error) {
+ switch m.Op() {
+ case OpCreate:
+ return (&ExportCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdate:
+ return (&ExportUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpUpdateOne:
+ return (&ExportUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+ case OpDelete, OpDeleteOne:
+ return (&ExportDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+ default:
+ return nil, fmt.Errorf("ent: unknown Export mutation op: %q", m.Op())
+ }
+}
+
// GroupClient is a client for the Group schema.
type GroupClient struct {
config
@@ -1960,6 +2117,22 @@ func (c *GroupClient) QueryEntityTemplates(_m *Group) *EntityTemplateQuery {
return query
}
+// QueryExports queries the exports edge of a Group.
+func (c *GroupClient) QueryExports(_m *Group) *ExportQuery {
+ query := (&ExportClient{config: c.config}).Query()
+ query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+ id := _m.ID
+ step := sqlgraph.NewStep(
+ sqlgraph.From(group.Table, group.FieldID, id),
+ sqlgraph.To(export.Table, export.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, group.ExportsTable, group.ExportsColumn),
+ )
+ fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step)
+ return fromV, nil
+ }
+ return query
+}
+
// QueryUserGroups queries the user_groups edge of a Group.
func (c *GroupClient) QueryUserGroups(_m *Group) *UserGroupQuery {
query := (&UserGroupClient{config: c.config}).Query()
@@ -3308,12 +3481,12 @@ func (c *UserGroupClient) mutate(ctx context.Context, m *UserGroupMutation) (Val
type (
hooks struct {
APIKey, Attachment, AuthRoles, AuthTokens, Entity, EntityField, EntityTemplate,
- EntityType, Group, GroupInvitationToken, MaintenanceEntry, Notifier,
+ EntityType, Export, Group, GroupInvitationToken, MaintenanceEntry, Notifier,
PasswordResetTokens, Tag, TemplateField, User, UserGroup []ent.Hook
}
inters struct {
APIKey, Attachment, AuthRoles, AuthTokens, Entity, EntityField, EntityTemplate,
- EntityType, Group, GroupInvitationToken, MaintenanceEntry, Notifier,
+ EntityType, Export, Group, GroupInvitationToken, MaintenanceEntry, Notifier,
PasswordResetTokens, Tag, TemplateField, User, UserGroup []ent.Interceptor
}
)
diff --git a/backend/internal/data/ent/ent.go b/backend/internal/data/ent/ent.go
index d574ec3b6..8539764b8 100644
--- a/backend/internal/data/ent/ent.go
+++ b/backend/internal/data/ent/ent.go
@@ -20,6 +20,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entityfield"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/maintenanceentry"
@@ -97,6 +98,7 @@ func checkColumn(t, c string) error {
entityfield.Table: entityfield.ValidColumn,
entitytemplate.Table: entitytemplate.ValidColumn,
entitytype.Table: entitytype.ValidColumn,
+ export.Table: export.ValidColumn,
group.Table: group.ValidColumn,
groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
maintenanceentry.Table: maintenanceentry.ValidColumn,
diff --git a/backend/internal/data/ent/export.go b/backend/internal/data/ent/export.go
new file mode 100644
index 000000000..021048412
--- /dev/null
+++ b/backend/internal/data/ent/export.go
@@ -0,0 +1,226 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+)
+
+// Export is the model entity for the Export schema.
+type Export struct {
+ config `json:"-"`
+ // ID of the ent.
+ ID uuid.UUID `json:"id,omitempty"`
+ // CreatedAt holds the value of the "created_at" field.
+ CreatedAt time.Time `json:"created_at,omitempty"`
+ // UpdatedAt holds the value of the "updated_at" field.
+ UpdatedAt time.Time `json:"updated_at,omitempty"`
+ // GroupID holds the value of the "group_id" field.
+ GroupID uuid.UUID `json:"group_id,omitempty"`
+ // Kind holds the value of the "kind" field.
+ Kind export.Kind `json:"kind,omitempty"`
+ // Status holds the value of the "status" field.
+ Status export.Status `json:"status,omitempty"`
+ // Progress holds the value of the "progress" field.
+ Progress int `json:"progress,omitempty"`
+ // ArtifactPath holds the value of the "artifact_path" field.
+ ArtifactPath string `json:"artifact_path,omitempty"`
+ // SizeBytes holds the value of the "size_bytes" field.
+ SizeBytes int64 `json:"size_bytes,omitempty"`
+ // Error holds the value of the "error" field.
+ Error string `json:"error,omitempty"`
+ // Edges holds the relations/edges for other nodes in the graph.
+ // The values are being populated by the ExportQuery when eager-loading is set.
+ Edges ExportEdges `json:"edges"`
+ selectValues sql.SelectValues
+}
+
+// ExportEdges holds the relations/edges for other nodes in the graph.
+type ExportEdges struct {
+ // Group holds the value of the group edge.
+ Group *Group `json:"group,omitempty"`
+ // loadedTypes holds the information for reporting if a
+ // type was loaded (or requested) in eager-loading or not.
+ loadedTypes [1]bool
+}
+
+// GroupOrErr returns the Group value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e ExportEdges) GroupOrErr() (*Group, error) {
+ if e.Group != nil {
+ return e.Group, nil
+ } else if e.loadedTypes[0] {
+ return nil, &NotFoundError{label: group.Label}
+ }
+ return nil, &NotLoadedError{edge: "group"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*Export) scanValues(columns []string) ([]any, error) {
+ values := make([]any, len(columns))
+ for i := range columns {
+ switch columns[i] {
+ case export.FieldProgress, export.FieldSizeBytes:
+ values[i] = new(sql.NullInt64)
+ case export.FieldKind, export.FieldStatus, export.FieldArtifactPath, export.FieldError:
+ values[i] = new(sql.NullString)
+ case export.FieldCreatedAt, export.FieldUpdatedAt:
+ values[i] = new(sql.NullTime)
+ case export.FieldID, export.FieldGroupID:
+ values[i] = new(uuid.UUID)
+ default:
+ values[i] = new(sql.UnknownType)
+ }
+ }
+ return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the Export fields.
+func (_m *Export) assignValues(columns []string, values []any) error {
+ if m, n := len(values), len(columns); m < n {
+ return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+ }
+ for i := range columns {
+ switch columns[i] {
+ case export.FieldID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field id", values[i])
+ } else if value != nil {
+ _m.ID = *value
+ }
+ case export.FieldCreatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field created_at", values[i])
+ } else if value.Valid {
+ _m.CreatedAt = value.Time
+ }
+ case export.FieldUpdatedAt:
+ if value, ok := values[i].(*sql.NullTime); !ok {
+ return fmt.Errorf("unexpected type %T for field updated_at", values[i])
+ } else if value.Valid {
+ _m.UpdatedAt = value.Time
+ }
+ case export.FieldGroupID:
+ if value, ok := values[i].(*uuid.UUID); !ok {
+ return fmt.Errorf("unexpected type %T for field group_id", values[i])
+ } else if value != nil {
+ _m.GroupID = *value
+ }
+ case export.FieldKind:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field kind", values[i])
+ } else if value.Valid {
+ _m.Kind = export.Kind(value.String)
+ }
+ case export.FieldStatus:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field status", values[i])
+ } else if value.Valid {
+ _m.Status = export.Status(value.String)
+ }
+ case export.FieldProgress:
+ if value, ok := values[i].(*sql.NullInt64); !ok {
+ return fmt.Errorf("unexpected type %T for field progress", values[i])
+ } else if value.Valid {
+ _m.Progress = int(value.Int64)
+ }
+ case export.FieldArtifactPath:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field artifact_path", values[i])
+ } else if value.Valid {
+ _m.ArtifactPath = value.String
+ }
+ case export.FieldSizeBytes:
+ if value, ok := values[i].(*sql.NullInt64); !ok {
+ return fmt.Errorf("unexpected type %T for field size_bytes", values[i])
+ } else if value.Valid {
+ _m.SizeBytes = value.Int64
+ }
+ case export.FieldError:
+ if value, ok := values[i].(*sql.NullString); !ok {
+ return fmt.Errorf("unexpected type %T for field error", values[i])
+ } else if value.Valid {
+ _m.Error = value.String
+ }
+ default:
+ _m.selectValues.Set(columns[i], values[i])
+ }
+ }
+ return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the Export.
+// This includes values selected through modifiers, order, etc.
+func (_m *Export) Value(name string) (ent.Value, error) {
+ return _m.selectValues.Get(name)
+}
+
+// QueryGroup queries the "group" edge of the Export entity.
+func (_m *Export) QueryGroup() *GroupQuery {
+ return NewExportClient(_m.config).QueryGroup(_m)
+}
+
+// Update returns a builder for updating this Export.
+// Note that you need to call Export.Unwrap() before calling this method if this Export
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (_m *Export) Update() *ExportUpdateOne {
+ return NewExportClient(_m.config).UpdateOne(_m)
+}
+
+// Unwrap unwraps the Export entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (_m *Export) Unwrap() *Export {
+ _tx, ok := _m.config.driver.(*txDriver)
+ if !ok {
+ panic("ent: Export is not a transactional entity")
+ }
+ _m.config.driver = _tx.drv
+ return _m
+}
+
+// String implements the fmt.Stringer.
+func (_m *Export) String() string {
+ var builder strings.Builder
+ builder.WriteString("Export(")
+ builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
+ builder.WriteString("created_at=")
+ builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("updated_at=")
+ builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
+ builder.WriteString(", ")
+ builder.WriteString("group_id=")
+ builder.WriteString(fmt.Sprintf("%v", _m.GroupID))
+ builder.WriteString(", ")
+ builder.WriteString("kind=")
+ builder.WriteString(fmt.Sprintf("%v", _m.Kind))
+ builder.WriteString(", ")
+ builder.WriteString("status=")
+ builder.WriteString(fmt.Sprintf("%v", _m.Status))
+ builder.WriteString(", ")
+ builder.WriteString("progress=")
+ builder.WriteString(fmt.Sprintf("%v", _m.Progress))
+ builder.WriteString(", ")
+ builder.WriteString("artifact_path=")
+ builder.WriteString(_m.ArtifactPath)
+ builder.WriteString(", ")
+ builder.WriteString("size_bytes=")
+ builder.WriteString(fmt.Sprintf("%v", _m.SizeBytes))
+ builder.WriteString(", ")
+ builder.WriteString("error=")
+ builder.WriteString(_m.Error)
+ builder.WriteByte(')')
+ return builder.String()
+}
+
+// Exports is a parsable slice of Export.
+type Exports []*Export
diff --git a/backend/internal/data/ent/export/export.go b/backend/internal/data/ent/export/export.go
new file mode 100644
index 000000000..d4a0b1561
--- /dev/null
+++ b/backend/internal/data/ent/export/export.go
@@ -0,0 +1,210 @@
+// Code generated by ent, DO NOT EDIT.
+
+package export
+
+import (
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+)
+
+const (
+ // Label holds the string label denoting the export type in the database.
+ Label = "export"
+ // FieldID holds the string denoting the id field in the database.
+ FieldID = "id"
+ // FieldCreatedAt holds the string denoting the created_at field in the database.
+ FieldCreatedAt = "created_at"
+ // FieldUpdatedAt holds the string denoting the updated_at field in the database.
+ FieldUpdatedAt = "updated_at"
+ // FieldGroupID holds the string denoting the group_id field in the database.
+ FieldGroupID = "group_id"
+ // FieldKind holds the string denoting the kind field in the database.
+ FieldKind = "kind"
+ // FieldStatus holds the string denoting the status field in the database.
+ FieldStatus = "status"
+ // FieldProgress holds the string denoting the progress field in the database.
+ FieldProgress = "progress"
+ // FieldArtifactPath holds the string denoting the artifact_path field in the database.
+ FieldArtifactPath = "artifact_path"
+ // FieldSizeBytes holds the string denoting the size_bytes field in the database.
+ FieldSizeBytes = "size_bytes"
+ // FieldError holds the string denoting the error field in the database.
+ FieldError = "error"
+ // EdgeGroup holds the string denoting the group edge name in mutations.
+ EdgeGroup = "group"
+ // Table holds the table name of the export in the database.
+ Table = "exports"
+ // GroupTable is the table that holds the group relation/edge.
+ GroupTable = "exports"
+ // GroupInverseTable is the table name for the Group entity.
+ // It exists in this package in order to avoid circular dependency with the "group" package.
+ GroupInverseTable = "groups"
+ // GroupColumn is the table column denoting the group relation/edge.
+ GroupColumn = "group_id"
+)
+
+// Columns holds all SQL columns for export fields.
+var Columns = []string{
+ FieldID,
+ FieldCreatedAt,
+ FieldUpdatedAt,
+ FieldGroupID,
+ FieldKind,
+ FieldStatus,
+ FieldProgress,
+ FieldArtifactPath,
+ FieldSizeBytes,
+ FieldError,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+ for i := range Columns {
+ if column == Columns[i] {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ // DefaultCreatedAt holds the default value on creation for the "created_at" field.
+ DefaultCreatedAt func() time.Time
+ // DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
+ DefaultUpdatedAt func() time.Time
+ // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
+ UpdateDefaultUpdatedAt func() time.Time
+ // DefaultProgress holds the default value on creation for the "progress" field.
+ DefaultProgress int
+ // DefaultSizeBytes holds the default value on creation for the "size_bytes" field.
+ DefaultSizeBytes int64
+ // ErrorValidator is a validator for the "error" field. It is called by the builders before save.
+ ErrorValidator func(string) error
+ // DefaultID holds the default value on creation for the "id" field.
+ DefaultID func() uuid.UUID
+)
+
+// Kind defines the type for the "kind" enum field.
+type Kind string
+
+// KindExport is the default value of the Kind enum.
+const DefaultKind = KindExport
+
+// Kind values.
+const (
+ KindExport Kind = "export"
+ KindImport Kind = "import"
+)
+
+func (k Kind) String() string {
+ return string(k)
+}
+
+// KindValidator is a validator for the "kind" field enum values. It is called by the builders before save.
+func KindValidator(k Kind) error {
+ switch k {
+ case KindExport, KindImport:
+ return nil
+ default:
+ return fmt.Errorf("export: invalid enum value for kind field: %q", k)
+ }
+}
+
+// Status defines the type for the "status" enum field.
+type Status string
+
+// StatusPending is the default value of the Status enum.
+const DefaultStatus = StatusPending
+
+// Status values.
+const (
+ StatusPending Status = "pending"
+ StatusRunning Status = "running"
+ StatusCompleted Status = "completed"
+ StatusFailed Status = "failed"
+)
+
+func (s Status) String() string {
+ return string(s)
+}
+
+// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
+func StatusValidator(s Status) error {
+ switch s {
+ case StatusPending, StatusRunning, StatusCompleted, StatusFailed:
+ return nil
+ default:
+ return fmt.Errorf("export: invalid enum value for status field: %q", s)
+ }
+}
+
+// OrderOption defines the ordering options for the Export queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByGroupID orders the results by the group_id field.
+func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldGroupID, opts...).ToFunc()
+}
+
+// ByKind orders the results by the kind field.
+func ByKind(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldKind, opts...).ToFunc()
+}
+
+// ByStatus orders the results by the status field.
+func ByStatus(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldStatus, opts...).ToFunc()
+}
+
+// ByProgress orders the results by the progress field.
+func ByProgress(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldProgress, opts...).ToFunc()
+}
+
+// ByArtifactPath orders the results by the artifact_path field.
+func ByArtifactPath(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldArtifactPath, opts...).ToFunc()
+}
+
+// BySizeBytes orders the results by the size_bytes field.
+func BySizeBytes(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldSizeBytes, opts...).ToFunc()
+}
+
+// ByError orders the results by the error field.
+func ByError(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldError, opts...).ToFunc()
+}
+
+// ByGroupField orders the results by group field.
+func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
+ }
+}
+func newGroupStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(GroupInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+}
diff --git a/backend/internal/data/ent/export/where.go b/backend/internal/data/ent/export/where.go
new file mode 100644
index 000000000..351db585f
--- /dev/null
+++ b/backend/internal/data/ent/export/where.go
@@ -0,0 +1,500 @@
+// Code generated by ent, DO NOT EDIT.
+
+package export
+
+import (
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldID, id))
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
+func UpdatedAt(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
+func GroupID(v uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldGroupID, v))
+}
+
+// Progress applies equality check predicate on the "progress" field. It's identical to ProgressEQ.
+func Progress(v int) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldProgress, v))
+}
+
+// ArtifactPath applies equality check predicate on the "artifact_path" field. It's identical to ArtifactPathEQ.
+func ArtifactPath(v string) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldArtifactPath, v))
+}
+
+// SizeBytes applies equality check predicate on the "size_bytes" field. It's identical to SizeBytesEQ.
+func SizeBytes(v int64) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldSizeBytes, v))
+}
+
+// Error applies equality check predicate on the "error" field. It's identical to ErrorEQ.
+func Error(v string) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldError, v))
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldCreatedAt, v))
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldCreatedAt, v))
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldCreatedAt, v))
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldCreatedAt, v))
+}
+
+// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
+func UpdatedAtEQ(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
+func UpdatedAtNEQ(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtIn applies the In predicate on the "updated_at" field.
+func UpdatedAtIn(vs ...time.Time) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
+func UpdatedAtNotIn(vs ...time.Time) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtGT applies the GT predicate on the "updated_at" field.
+func UpdatedAtGT(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
+func UpdatedAtGTE(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLT applies the LT predicate on the "updated_at" field.
+func UpdatedAtLT(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
+func UpdatedAtLTE(v time.Time) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldUpdatedAt, v))
+}
+
+// GroupIDEQ applies the EQ predicate on the "group_id" field.
+func GroupIDEQ(v uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldGroupID, v))
+}
+
+// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
+func GroupIDNEQ(v uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldGroupID, v))
+}
+
+// GroupIDIn applies the In predicate on the "group_id" field.
+func GroupIDIn(vs ...uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldGroupID, vs...))
+}
+
+// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
+func GroupIDNotIn(vs ...uuid.UUID) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldGroupID, vs...))
+}
+
+// KindEQ applies the EQ predicate on the "kind" field.
+func KindEQ(v Kind) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldKind, v))
+}
+
+// KindNEQ applies the NEQ predicate on the "kind" field.
+func KindNEQ(v Kind) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldKind, v))
+}
+
+// KindIn applies the In predicate on the "kind" field.
+func KindIn(vs ...Kind) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldKind, vs...))
+}
+
+// KindNotIn applies the NotIn predicate on the "kind" field.
+func KindNotIn(vs ...Kind) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldKind, vs...))
+}
+
+// StatusEQ applies the EQ predicate on the "status" field.
+func StatusEQ(v Status) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldStatus, v))
+}
+
+// StatusNEQ applies the NEQ predicate on the "status" field.
+func StatusNEQ(v Status) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldStatus, v))
+}
+
+// StatusIn applies the In predicate on the "status" field.
+func StatusIn(vs ...Status) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldStatus, vs...))
+}
+
+// StatusNotIn applies the NotIn predicate on the "status" field.
+func StatusNotIn(vs ...Status) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldStatus, vs...))
+}
+
+// ProgressEQ applies the EQ predicate on the "progress" field.
+func ProgressEQ(v int) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldProgress, v))
+}
+
+// ProgressNEQ applies the NEQ predicate on the "progress" field.
+func ProgressNEQ(v int) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldProgress, v))
+}
+
+// ProgressIn applies the In predicate on the "progress" field.
+func ProgressIn(vs ...int) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldProgress, vs...))
+}
+
+// ProgressNotIn applies the NotIn predicate on the "progress" field.
+func ProgressNotIn(vs ...int) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldProgress, vs...))
+}
+
+// ProgressGT applies the GT predicate on the "progress" field.
+func ProgressGT(v int) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldProgress, v))
+}
+
+// ProgressGTE applies the GTE predicate on the "progress" field.
+func ProgressGTE(v int) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldProgress, v))
+}
+
+// ProgressLT applies the LT predicate on the "progress" field.
+func ProgressLT(v int) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldProgress, v))
+}
+
+// ProgressLTE applies the LTE predicate on the "progress" field.
+func ProgressLTE(v int) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldProgress, v))
+}
+
+// ArtifactPathEQ applies the EQ predicate on the "artifact_path" field.
+func ArtifactPathEQ(v string) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldArtifactPath, v))
+}
+
+// ArtifactPathNEQ applies the NEQ predicate on the "artifact_path" field.
+func ArtifactPathNEQ(v string) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldArtifactPath, v))
+}
+
+// ArtifactPathIn applies the In predicate on the "artifact_path" field.
+func ArtifactPathIn(vs ...string) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldArtifactPath, vs...))
+}
+
+// ArtifactPathNotIn applies the NotIn predicate on the "artifact_path" field.
+func ArtifactPathNotIn(vs ...string) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldArtifactPath, vs...))
+}
+
+// ArtifactPathGT applies the GT predicate on the "artifact_path" field.
+func ArtifactPathGT(v string) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldArtifactPath, v))
+}
+
+// ArtifactPathGTE applies the GTE predicate on the "artifact_path" field.
+func ArtifactPathGTE(v string) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldArtifactPath, v))
+}
+
+// ArtifactPathLT applies the LT predicate on the "artifact_path" field.
+func ArtifactPathLT(v string) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldArtifactPath, v))
+}
+
+// ArtifactPathLTE applies the LTE predicate on the "artifact_path" field.
+func ArtifactPathLTE(v string) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldArtifactPath, v))
+}
+
+// ArtifactPathContains applies the Contains predicate on the "artifact_path" field.
+func ArtifactPathContains(v string) predicate.Export {
+ return predicate.Export(sql.FieldContains(FieldArtifactPath, v))
+}
+
+// ArtifactPathHasPrefix applies the HasPrefix predicate on the "artifact_path" field.
+func ArtifactPathHasPrefix(v string) predicate.Export {
+ return predicate.Export(sql.FieldHasPrefix(FieldArtifactPath, v))
+}
+
+// ArtifactPathHasSuffix applies the HasSuffix predicate on the "artifact_path" field.
+func ArtifactPathHasSuffix(v string) predicate.Export {
+ return predicate.Export(sql.FieldHasSuffix(FieldArtifactPath, v))
+}
+
+// ArtifactPathIsNil applies the IsNil predicate on the "artifact_path" field.
+func ArtifactPathIsNil() predicate.Export {
+ return predicate.Export(sql.FieldIsNull(FieldArtifactPath))
+}
+
+// ArtifactPathNotNil applies the NotNil predicate on the "artifact_path" field.
+func ArtifactPathNotNil() predicate.Export {
+ return predicate.Export(sql.FieldNotNull(FieldArtifactPath))
+}
+
+// ArtifactPathEqualFold applies the EqualFold predicate on the "artifact_path" field.
+func ArtifactPathEqualFold(v string) predicate.Export {
+ return predicate.Export(sql.FieldEqualFold(FieldArtifactPath, v))
+}
+
+// ArtifactPathContainsFold applies the ContainsFold predicate on the "artifact_path" field.
+func ArtifactPathContainsFold(v string) predicate.Export {
+ return predicate.Export(sql.FieldContainsFold(FieldArtifactPath, v))
+}
+
+// SizeBytesEQ applies the EQ predicate on the "size_bytes" field.
+func SizeBytesEQ(v int64) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldSizeBytes, v))
+}
+
+// SizeBytesNEQ applies the NEQ predicate on the "size_bytes" field.
+func SizeBytesNEQ(v int64) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldSizeBytes, v))
+}
+
+// SizeBytesIn applies the In predicate on the "size_bytes" field.
+func SizeBytesIn(vs ...int64) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldSizeBytes, vs...))
+}
+
+// SizeBytesNotIn applies the NotIn predicate on the "size_bytes" field.
+func SizeBytesNotIn(vs ...int64) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldSizeBytes, vs...))
+}
+
+// SizeBytesGT applies the GT predicate on the "size_bytes" field.
+func SizeBytesGT(v int64) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldSizeBytes, v))
+}
+
+// SizeBytesGTE applies the GTE predicate on the "size_bytes" field.
+func SizeBytesGTE(v int64) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldSizeBytes, v))
+}
+
+// SizeBytesLT applies the LT predicate on the "size_bytes" field.
+func SizeBytesLT(v int64) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldSizeBytes, v))
+}
+
+// SizeBytesLTE applies the LTE predicate on the "size_bytes" field.
+func SizeBytesLTE(v int64) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldSizeBytes, v))
+}
+
+// ErrorEQ applies the EQ predicate on the "error" field.
+func ErrorEQ(v string) predicate.Export {
+ return predicate.Export(sql.FieldEQ(FieldError, v))
+}
+
+// ErrorNEQ applies the NEQ predicate on the "error" field.
+func ErrorNEQ(v string) predicate.Export {
+ return predicate.Export(sql.FieldNEQ(FieldError, v))
+}
+
+// ErrorIn applies the In predicate on the "error" field.
+func ErrorIn(vs ...string) predicate.Export {
+ return predicate.Export(sql.FieldIn(FieldError, vs...))
+}
+
+// ErrorNotIn applies the NotIn predicate on the "error" field.
+func ErrorNotIn(vs ...string) predicate.Export {
+ return predicate.Export(sql.FieldNotIn(FieldError, vs...))
+}
+
+// ErrorGT applies the GT predicate on the "error" field.
+func ErrorGT(v string) predicate.Export {
+ return predicate.Export(sql.FieldGT(FieldError, v))
+}
+
+// ErrorGTE applies the GTE predicate on the "error" field.
+func ErrorGTE(v string) predicate.Export {
+ return predicate.Export(sql.FieldGTE(FieldError, v))
+}
+
+// ErrorLT applies the LT predicate on the "error" field.
+func ErrorLT(v string) predicate.Export {
+ return predicate.Export(sql.FieldLT(FieldError, v))
+}
+
+// ErrorLTE applies the LTE predicate on the "error" field.
+func ErrorLTE(v string) predicate.Export {
+ return predicate.Export(sql.FieldLTE(FieldError, v))
+}
+
+// ErrorContains applies the Contains predicate on the "error" field.
+func ErrorContains(v string) predicate.Export {
+ return predicate.Export(sql.FieldContains(FieldError, v))
+}
+
+// ErrorHasPrefix applies the HasPrefix predicate on the "error" field.
+func ErrorHasPrefix(v string) predicate.Export {
+ return predicate.Export(sql.FieldHasPrefix(FieldError, v))
+}
+
+// ErrorHasSuffix applies the HasSuffix predicate on the "error" field.
+func ErrorHasSuffix(v string) predicate.Export {
+ return predicate.Export(sql.FieldHasSuffix(FieldError, v))
+}
+
+// ErrorIsNil applies the IsNil predicate on the "error" field.
+func ErrorIsNil() predicate.Export {
+ return predicate.Export(sql.FieldIsNull(FieldError))
+}
+
+// ErrorNotNil applies the NotNil predicate on the "error" field.
+func ErrorNotNil() predicate.Export {
+ return predicate.Export(sql.FieldNotNull(FieldError))
+}
+
+// ErrorEqualFold applies the EqualFold predicate on the "error" field.
+func ErrorEqualFold(v string) predicate.Export {
+ return predicate.Export(sql.FieldEqualFold(FieldError, v))
+}
+
+// ErrorContainsFold applies the ContainsFold predicate on the "error" field.
+func ErrorContainsFold(v string) predicate.Export {
+ return predicate.Export(sql.FieldContainsFold(FieldError, v))
+}
+
+// HasGroup applies the HasEdge predicate on the "group" edge.
+func HasGroup() predicate.Export {
+ return predicate.Export(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
+func HasGroupWith(preds ...predicate.Group) predicate.Export {
+ return predicate.Export(func(s *sql.Selector) {
+ step := newGroupStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.Export) predicate.Export {
+ return predicate.Export(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.Export) predicate.Export {
+ return predicate.Export(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.Export) predicate.Export {
+ return predicate.Export(sql.NotPredicates(p))
+}
diff --git a/backend/internal/data/ent/export_create.go b/backend/internal/data/ent/export_create.go
new file mode 100644
index 000000000..753d35490
--- /dev/null
+++ b/backend/internal/data/ent/export_create.go
@@ -0,0 +1,437 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+)
+
+// ExportCreate is the builder for creating a Export entity.
+type ExportCreate struct {
+ config
+ mutation *ExportMutation
+ hooks []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (_c *ExportCreate) SetCreatedAt(v time.Time) *ExportCreate {
+ _c.mutation.SetCreatedAt(v)
+ return _c
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableCreatedAt(v *time.Time) *ExportCreate {
+ if v != nil {
+ _c.SetCreatedAt(*v)
+ }
+ return _c
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (_c *ExportCreate) SetUpdatedAt(v time.Time) *ExportCreate {
+ _c.mutation.SetUpdatedAt(v)
+ return _c
+}
+
+// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableUpdatedAt(v *time.Time) *ExportCreate {
+ if v != nil {
+ _c.SetUpdatedAt(*v)
+ }
+ return _c
+}
+
+// SetGroupID sets the "group_id" field.
+func (_c *ExportCreate) SetGroupID(v uuid.UUID) *ExportCreate {
+ _c.mutation.SetGroupID(v)
+ return _c
+}
+
+// SetKind sets the "kind" field.
+func (_c *ExportCreate) SetKind(v export.Kind) *ExportCreate {
+ _c.mutation.SetKind(v)
+ return _c
+}
+
+// SetNillableKind sets the "kind" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableKind(v *export.Kind) *ExportCreate {
+ if v != nil {
+ _c.SetKind(*v)
+ }
+ return _c
+}
+
+// SetStatus sets the "status" field.
+func (_c *ExportCreate) SetStatus(v export.Status) *ExportCreate {
+ _c.mutation.SetStatus(v)
+ return _c
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableStatus(v *export.Status) *ExportCreate {
+ if v != nil {
+ _c.SetStatus(*v)
+ }
+ return _c
+}
+
+// SetProgress sets the "progress" field.
+func (_c *ExportCreate) SetProgress(v int) *ExportCreate {
+ _c.mutation.SetProgress(v)
+ return _c
+}
+
+// SetNillableProgress sets the "progress" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableProgress(v *int) *ExportCreate {
+ if v != nil {
+ _c.SetProgress(*v)
+ }
+ return _c
+}
+
+// SetArtifactPath sets the "artifact_path" field.
+func (_c *ExportCreate) SetArtifactPath(v string) *ExportCreate {
+ _c.mutation.SetArtifactPath(v)
+ return _c
+}
+
+// SetNillableArtifactPath sets the "artifact_path" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableArtifactPath(v *string) *ExportCreate {
+ if v != nil {
+ _c.SetArtifactPath(*v)
+ }
+ return _c
+}
+
+// SetSizeBytes sets the "size_bytes" field.
+func (_c *ExportCreate) SetSizeBytes(v int64) *ExportCreate {
+ _c.mutation.SetSizeBytes(v)
+ return _c
+}
+
+// SetNillableSizeBytes sets the "size_bytes" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableSizeBytes(v *int64) *ExportCreate {
+ if v != nil {
+ _c.SetSizeBytes(*v)
+ }
+ return _c
+}
+
+// SetError sets the "error" field.
+func (_c *ExportCreate) SetError(v string) *ExportCreate {
+ _c.mutation.SetError(v)
+ return _c
+}
+
+// SetNillableError sets the "error" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableError(v *string) *ExportCreate {
+ if v != nil {
+ _c.SetError(*v)
+ }
+ return _c
+}
+
+// SetID sets the "id" field.
+func (_c *ExportCreate) SetID(v uuid.UUID) *ExportCreate {
+ _c.mutation.SetID(v)
+ return _c
+}
+
+// SetNillableID sets the "id" field if the given value is not nil.
+func (_c *ExportCreate) SetNillableID(v *uuid.UUID) *ExportCreate {
+ if v != nil {
+ _c.SetID(*v)
+ }
+ return _c
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (_c *ExportCreate) SetGroup(v *Group) *ExportCreate {
+ return _c.SetGroupID(v.ID)
+}
+
+// Mutation returns the ExportMutation object of the builder.
+func (_c *ExportCreate) Mutation() *ExportMutation {
+ return _c.mutation
+}
+
+// Save creates the Export in the database.
+func (_c *ExportCreate) Save(ctx context.Context) (*Export, error) {
+ _c.defaults()
+ return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (_c *ExportCreate) SaveX(ctx context.Context) *Export {
+ v, err := _c.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (_c *ExportCreate) Exec(ctx context.Context) error {
+ _, err := _c.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_c *ExportCreate) ExecX(ctx context.Context) {
+ if err := _c.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (_c *ExportCreate) defaults() {
+ if _, ok := _c.mutation.CreatedAt(); !ok {
+ v := export.DefaultCreatedAt()
+ _c.mutation.SetCreatedAt(v)
+ }
+ if _, ok := _c.mutation.UpdatedAt(); !ok {
+ v := export.DefaultUpdatedAt()
+ _c.mutation.SetUpdatedAt(v)
+ }
+ if _, ok := _c.mutation.Kind(); !ok {
+ v := export.DefaultKind
+ _c.mutation.SetKind(v)
+ }
+ if _, ok := _c.mutation.Status(); !ok {
+ v := export.DefaultStatus
+ _c.mutation.SetStatus(v)
+ }
+ if _, ok := _c.mutation.Progress(); !ok {
+ v := export.DefaultProgress
+ _c.mutation.SetProgress(v)
+ }
+ if _, ok := _c.mutation.SizeBytes(); !ok {
+ v := export.DefaultSizeBytes
+ _c.mutation.SetSizeBytes(v)
+ }
+ if _, ok := _c.mutation.ID(); !ok {
+ v := export.DefaultID()
+ _c.mutation.SetID(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (_c *ExportCreate) check() error {
+ if _, ok := _c.mutation.CreatedAt(); !ok {
+ return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Export.created_at"`)}
+ }
+ if _, ok := _c.mutation.UpdatedAt(); !ok {
+ return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Export.updated_at"`)}
+ }
+ if _, ok := _c.mutation.GroupID(); !ok {
+ return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "Export.group_id"`)}
+ }
+ if _, ok := _c.mutation.Kind(); !ok {
+ return &ValidationError{Name: "kind", err: errors.New(`ent: missing required field "Export.kind"`)}
+ }
+ if v, ok := _c.mutation.Kind(); ok {
+ if err := export.KindValidator(v); err != nil {
+ return &ValidationError{Name: "kind", err: fmt.Errorf(`ent: validator failed for field "Export.kind": %w`, err)}
+ }
+ }
+ if _, ok := _c.mutation.Status(); !ok {
+ return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Export.status"`)}
+ }
+ if v, ok := _c.mutation.Status(); ok {
+ if err := export.StatusValidator(v); err != nil {
+ return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Export.status": %w`, err)}
+ }
+ }
+ if _, ok := _c.mutation.Progress(); !ok {
+ return &ValidationError{Name: "progress", err: errors.New(`ent: missing required field "Export.progress"`)}
+ }
+ if _, ok := _c.mutation.SizeBytes(); !ok {
+ return &ValidationError{Name: "size_bytes", err: errors.New(`ent: missing required field "Export.size_bytes"`)}
+ }
+ if v, ok := _c.mutation.Error(); ok {
+ if err := export.ErrorValidator(v); err != nil {
+ return &ValidationError{Name: "error", err: fmt.Errorf(`ent: validator failed for field "Export.error": %w`, err)}
+ }
+ }
+ if len(_c.mutation.GroupIDs()) == 0 {
+ return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Export.group"`)}
+ }
+ return nil
+}
+
+func (_c *ExportCreate) sqlSave(ctx context.Context) (*Export, error) {
+ if err := _c.check(); err != nil {
+ return nil, err
+ }
+ _node, _spec := _c.createSpec()
+ if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ if _spec.ID.Value != nil {
+ if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
+ _node.ID = *id
+ } else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
+ return nil, err
+ }
+ }
+ _c.mutation.id = &_node.ID
+ _c.mutation.done = true
+ return _node, nil
+}
+
+func (_c *ExportCreate) createSpec() (*Export, *sqlgraph.CreateSpec) {
+ var (
+ _node = &Export{config: _c.config}
+ _spec = sqlgraph.NewCreateSpec(export.Table, sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID))
+ )
+ if id, ok := _c.mutation.ID(); ok {
+ _node.ID = id
+ _spec.ID.Value = &id
+ }
+ if value, ok := _c.mutation.CreatedAt(); ok {
+ _spec.SetField(export.FieldCreatedAt, field.TypeTime, value)
+ _node.CreatedAt = value
+ }
+ if value, ok := _c.mutation.UpdatedAt(); ok {
+ _spec.SetField(export.FieldUpdatedAt, field.TypeTime, value)
+ _node.UpdatedAt = value
+ }
+ if value, ok := _c.mutation.Kind(); ok {
+ _spec.SetField(export.FieldKind, field.TypeEnum, value)
+ _node.Kind = value
+ }
+ if value, ok := _c.mutation.Status(); ok {
+ _spec.SetField(export.FieldStatus, field.TypeEnum, value)
+ _node.Status = value
+ }
+ if value, ok := _c.mutation.Progress(); ok {
+ _spec.SetField(export.FieldProgress, field.TypeInt, value)
+ _node.Progress = value
+ }
+ if value, ok := _c.mutation.ArtifactPath(); ok {
+ _spec.SetField(export.FieldArtifactPath, field.TypeString, value)
+ _node.ArtifactPath = value
+ }
+ if value, ok := _c.mutation.SizeBytes(); ok {
+ _spec.SetField(export.FieldSizeBytes, field.TypeInt64, value)
+ _node.SizeBytes = value
+ }
+ if value, ok := _c.mutation.Error(); ok {
+ _spec.SetField(export.FieldError, field.TypeString, value)
+ _node.Error = value
+ }
+ if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: export.GroupTable,
+ Columns: []string{export.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _node.GroupID = nodes[0]
+ _spec.Edges = append(_spec.Edges, edge)
+ }
+ return _node, _spec
+}
+
+// ExportCreateBulk is the builder for creating many Export entities in bulk.
+type ExportCreateBulk struct {
+ config
+ err error
+ builders []*ExportCreate
+}
+
+// Save creates the Export entities in the database.
+func (_c *ExportCreateBulk) Save(ctx context.Context) ([]*Export, error) {
+ if _c.err != nil {
+ return nil, _c.err
+ }
+ specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
+ nodes := make([]*Export, len(_c.builders))
+ mutators := make([]Mutator, len(_c.builders))
+ for i := range _c.builders {
+ func(i int, root context.Context) {
+ builder := _c.builders[i]
+ builder.defaults()
+ var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+ mutation, ok := m.(*ExportMutation)
+ if !ok {
+ return nil, fmt.Errorf("unexpected mutation type %T", m)
+ }
+ if err := builder.check(); err != nil {
+ return nil, err
+ }
+ builder.mutation = mutation
+ var err error
+ nodes[i], specs[i] = builder.createSpec()
+ if i < len(mutators)-1 {
+ _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
+ } else {
+ spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+ // Invoke the actual operation on the latest mutation in the chain.
+ if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
+ if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ mutation.id = &nodes[i].ID
+ mutation.done = true
+ return nodes[i], nil
+ })
+ for i := len(builder.hooks) - 1; i >= 0; i-- {
+ mut = builder.hooks[i](mut)
+ }
+ mutators[i] = mut
+ }(i, ctx)
+ }
+ if len(mutators) > 0 {
+ if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (_c *ExportCreateBulk) SaveX(ctx context.Context) []*Export {
+ v, err := _c.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Exec executes the query.
+func (_c *ExportCreateBulk) Exec(ctx context.Context) error {
+ _, err := _c.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_c *ExportCreateBulk) ExecX(ctx context.Context) {
+ if err := _c.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/export_delete.go b/backend/internal/data/ent/export_delete.go
new file mode 100644
index 000000000..5d95f6599
--- /dev/null
+++ b/backend/internal/data/ent/export_delete.go
@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/predicate"
+)
+
+// ExportDelete is the builder for deleting a Export entity.
+type ExportDelete struct {
+ config
+ hooks []Hook
+ mutation *ExportMutation
+}
+
+// Where appends a list predicates to the ExportDelete builder.
+func (_d *ExportDelete) Where(ps ...predicate.Export) *ExportDelete {
+ _d.mutation.Where(ps...)
+ return _d
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (_d *ExportDelete) Exec(ctx context.Context) (int, error) {
+ return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_d *ExportDelete) ExecX(ctx context.Context) int {
+ n, err := _d.Exec(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (_d *ExportDelete) sqlExec(ctx context.Context) (int, error) {
+ _spec := sqlgraph.NewDeleteSpec(export.Table, sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID))
+ if ps := _d.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
+ if err != nil && sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ _d.mutation.done = true
+ return affected, err
+}
+
+// ExportDeleteOne is the builder for deleting a single Export entity.
+type ExportDeleteOne struct {
+ _d *ExportDelete
+}
+
+// Where appends a list predicates to the ExportDelete builder.
+func (_d *ExportDeleteOne) Where(ps ...predicate.Export) *ExportDeleteOne {
+ _d._d.mutation.Where(ps...)
+ return _d
+}
+
+// Exec executes the deletion query.
+func (_d *ExportDeleteOne) Exec(ctx context.Context) error {
+ n, err := _d._d.Exec(ctx)
+ switch {
+ case err != nil:
+ return err
+ case n == 0:
+ return &NotFoundError{export.Label}
+ default:
+ return nil
+ }
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_d *ExportDeleteOne) ExecX(ctx context.Context) {
+ if err := _d.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
diff --git a/backend/internal/data/ent/export_query.go b/backend/internal/data/ent/export_query.go
new file mode 100644
index 000000000..5c4dac54f
--- /dev/null
+++ b/backend/internal/data/ent/export_query.go
@@ -0,0 +1,607 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "fmt"
+ "math"
+
+ "entgo.io/ent"
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/predicate"
+)
+
+// ExportQuery is the builder for querying Export entities.
+type ExportQuery struct {
+ config
+ ctx *QueryContext
+ order []export.OrderOption
+ inters []Interceptor
+ predicates []predicate.Export
+ withGroup *GroupQuery
+ // intermediate query (i.e. traversal path).
+ sql *sql.Selector
+ path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the ExportQuery builder.
+func (_q *ExportQuery) Where(ps ...predicate.Export) *ExportQuery {
+ _q.predicates = append(_q.predicates, ps...)
+ return _q
+}
+
+// Limit the number of records to be returned by this query.
+func (_q *ExportQuery) Limit(limit int) *ExportQuery {
+ _q.ctx.Limit = &limit
+ return _q
+}
+
+// Offset to start from.
+func (_q *ExportQuery) Offset(offset int) *ExportQuery {
+ _q.ctx.Offset = &offset
+ return _q
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (_q *ExportQuery) Unique(unique bool) *ExportQuery {
+ _q.ctx.Unique = &unique
+ return _q
+}
+
+// Order specifies how the records should be ordered.
+func (_q *ExportQuery) Order(o ...export.OrderOption) *ExportQuery {
+ _q.order = append(_q.order, o...)
+ return _q
+}
+
+// QueryGroup chains the current query on the "group" edge.
+func (_q *ExportQuery) QueryGroup() *GroupQuery {
+ query := (&GroupClient{config: _q.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := _q.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := _q.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(export.Table, export.FieldID, selector),
+ sqlgraph.To(group.Table, group.FieldID),
+ sqlgraph.Edge(sqlgraph.M2O, true, export.GroupTable, export.GroupColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
+// First returns the first Export entity from the query.
+// Returns a *NotFoundError when no Export was found.
+func (_q *ExportQuery) First(ctx context.Context) (*Export, error) {
+ nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
+ if err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nil, &NotFoundError{export.Label}
+ }
+ return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (_q *ExportQuery) FirstX(ctx context.Context) *Export {
+ node, err := _q.First(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return node
+}
+
+// FirstID returns the first Export ID from the query.
+// Returns a *NotFoundError when no Export ID was found.
+func (_q *ExportQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
+ return
+ }
+ if len(ids) == 0 {
+ err = &NotFoundError{export.Label}
+ return
+ }
+ return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (_q *ExportQuery) FirstIDX(ctx context.Context) uuid.UUID {
+ id, err := _q.FirstID(ctx)
+ if err != nil && !IsNotFound(err) {
+ panic(err)
+ }
+ return id
+}
+
+// Only returns a single Export entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one Export entity is found.
+// Returns a *NotFoundError when no Export entities are found.
+func (_q *ExportQuery) Only(ctx context.Context) (*Export, error) {
+ nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
+ if err != nil {
+ return nil, err
+ }
+ switch len(nodes) {
+ case 1:
+ return nodes[0], nil
+ case 0:
+ return nil, &NotFoundError{export.Label}
+ default:
+ return nil, &NotSingularError{export.Label}
+ }
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (_q *ExportQuery) OnlyX(ctx context.Context) *Export {
+ node, err := _q.Only(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// OnlyID is like Only, but returns the only Export ID in the query.
+// Returns a *NotSingularError when more than one Export ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (_q *ExportQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
+ var ids []uuid.UUID
+ if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
+ return
+ }
+ switch len(ids) {
+ case 1:
+ id = ids[0]
+ case 0:
+ err = &NotFoundError{export.Label}
+ default:
+ err = &NotSingularError{export.Label}
+ }
+ return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (_q *ExportQuery) OnlyIDX(ctx context.Context) uuid.UUID {
+ id, err := _q.OnlyID(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// All executes the query and returns a list of Exports.
+func (_q *ExportQuery) All(ctx context.Context) ([]*Export, error) {
+ ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
+ if err := _q.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ qr := querierAll[[]*Export, *ExportQuery]()
+ return withInterceptors[[]*Export](ctx, _q, qr, _q.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (_q *ExportQuery) AllX(ctx context.Context) []*Export {
+ nodes, err := _q.All(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return nodes
+}
+
+// IDs executes the query and returns a list of Export IDs.
+func (_q *ExportQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
+ if _q.ctx.Unique == nil && _q.path != nil {
+ _q.Unique(true)
+ }
+ ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
+ if err = _q.Select(export.FieldID).Scan(ctx, &ids); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (_q *ExportQuery) IDsX(ctx context.Context) []uuid.UUID {
+ ids, err := _q.IDs(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return ids
+}
+
+// Count returns the count of the given query.
+func (_q *ExportQuery) Count(ctx context.Context) (int, error) {
+ ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
+ if err := _q.prepareQuery(ctx); err != nil {
+ return 0, err
+ }
+ return withInterceptors[int](ctx, _q, querierCount[*ExportQuery](), _q.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (_q *ExportQuery) CountX(ctx context.Context) int {
+ count, err := _q.Count(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (_q *ExportQuery) Exist(ctx context.Context) (bool, error) {
+ ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
+ switch _, err := _q.FirstID(ctx); {
+ case IsNotFound(err):
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("ent: check existence: %w", err)
+ default:
+ return true, nil
+ }
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (_q *ExportQuery) ExistX(ctx context.Context) bool {
+ exist, err := _q.Exist(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return exist
+}
+
+// Clone returns a duplicate of the ExportQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (_q *ExportQuery) Clone() *ExportQuery {
+ if _q == nil {
+ return nil
+ }
+ return &ExportQuery{
+ config: _q.config,
+ ctx: _q.ctx.Clone(),
+ order: append([]export.OrderOption{}, _q.order...),
+ inters: append([]Interceptor{}, _q.inters...),
+ predicates: append([]predicate.Export{}, _q.predicates...),
+ withGroup: _q.withGroup.Clone(),
+ // clone intermediate query.
+ sql: _q.sql.Clone(),
+ path: _q.path,
+ }
+}
+
+// WithGroup tells the query-builder to eager-load the nodes that are connected to
+// the "group" edge. The optional arguments are used to configure the query builder of the edge.
+func (_q *ExportQuery) WithGroup(opts ...func(*GroupQuery)) *ExportQuery {
+ query := (&GroupClient{config: _q.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ _q.withGroup = query
+ return _q
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// Count int `json:"count,omitempty"`
+// }
+//
+// client.Export.Query().
+// GroupBy(export.FieldCreatedAt).
+// Aggregate(ent.Count()).
+// Scan(ctx, &v)
+func (_q *ExportQuery) GroupBy(field string, fields ...string) *ExportGroupBy {
+ _q.ctx.Fields = append([]string{field}, fields...)
+ grbuild := &ExportGroupBy{build: _q}
+ grbuild.flds = &_q.ctx.Fields
+ grbuild.label = export.Label
+ grbuild.scan = grbuild.Scan
+ return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+// var v []struct {
+// CreatedAt time.Time `json:"created_at,omitempty"`
+// }
+//
+// client.Export.Query().
+// Select(export.FieldCreatedAt).
+// Scan(ctx, &v)
+func (_q *ExportQuery) Select(fields ...string) *ExportSelect {
+ _q.ctx.Fields = append(_q.ctx.Fields, fields...)
+ sbuild := &ExportSelect{ExportQuery: _q}
+ sbuild.label = export.Label
+ sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
+ return sbuild
+}
+
+// Aggregate returns a ExportSelect configured with the given aggregations.
+func (_q *ExportQuery) Aggregate(fns ...AggregateFunc) *ExportSelect {
+ return _q.Select().Aggregate(fns...)
+}
+
+func (_q *ExportQuery) prepareQuery(ctx context.Context) error {
+ for _, inter := range _q.inters {
+ if inter == nil {
+ return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+ }
+ if trv, ok := inter.(Traverser); ok {
+ if err := trv.Traverse(ctx, _q); err != nil {
+ return err
+ }
+ }
+ }
+ for _, f := range _q.ctx.Fields {
+ if !export.ValidColumn(f) {
+ return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ }
+ if _q.path != nil {
+ prev, err := _q.path(ctx)
+ if err != nil {
+ return err
+ }
+ _q.sql = prev
+ }
+ return nil
+}
+
+func (_q *ExportQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Export, error) {
+ var (
+ nodes = []*Export{}
+ _spec = _q.querySpec()
+ loadedTypes = [1]bool{
+ _q.withGroup != nil,
+ }
+ )
+ _spec.ScanValues = func(columns []string) ([]any, error) {
+ return (*Export).scanValues(nil, columns)
+ }
+ _spec.Assign = func(columns []string, values []any) error {
+ node := &Export{config: _q.config}
+ nodes = append(nodes, node)
+ node.Edges.loadedTypes = loadedTypes
+ return node.assignValues(columns, values)
+ }
+ for i := range hooks {
+ hooks[i](ctx, _spec)
+ }
+ if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
+ return nil, err
+ }
+ if len(nodes) == 0 {
+ return nodes, nil
+ }
+ if query := _q.withGroup; query != nil {
+ if err := _q.loadGroup(ctx, query, nodes, nil,
+ func(n *Export, e *Group) { n.Edges.Group = e }); err != nil {
+ return nil, err
+ }
+ }
+ return nodes, nil
+}
+
+func (_q *ExportQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Export, init func(*Export), assign func(*Export, *Group)) error {
+ ids := make([]uuid.UUID, 0, len(nodes))
+ nodeids := make(map[uuid.UUID][]*Export)
+ for i := range nodes {
+ fk := nodes[i].GroupID
+ if _, ok := nodeids[fk]; !ok {
+ ids = append(ids, fk)
+ }
+ nodeids[fk] = append(nodeids[fk], nodes[i])
+ }
+ if len(ids) == 0 {
+ return nil
+ }
+ query.Where(group.IDIn(ids...))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ nodes, ok := nodeids[n.ID]
+ if !ok {
+ return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
+ }
+ for i := range nodes {
+ assign(nodes[i], n)
+ }
+ }
+ return nil
+}
+
+func (_q *ExportQuery) sqlCount(ctx context.Context) (int, error) {
+ _spec := _q.querySpec()
+ _spec.Node.Columns = _q.ctx.Fields
+ if len(_q.ctx.Fields) > 0 {
+ _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
+ }
+ return sqlgraph.CountNodes(ctx, _q.driver, _spec)
+}
+
+func (_q *ExportQuery) querySpec() *sqlgraph.QuerySpec {
+ _spec := sqlgraph.NewQuerySpec(export.Table, export.Columns, sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID))
+ _spec.From = _q.sql
+ if unique := _q.ctx.Unique; unique != nil {
+ _spec.Unique = *unique
+ } else if _q.path != nil {
+ _spec.Unique = true
+ }
+ if fields := _q.ctx.Fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, export.FieldID)
+ for i := range fields {
+ if fields[i] != export.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+ }
+ }
+ if _q.withGroup != nil {
+ _spec.Node.AddColumnOnce(export.FieldGroupID)
+ }
+ }
+ if ps := _q.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if limit := _q.ctx.Limit; limit != nil {
+ _spec.Limit = *limit
+ }
+ if offset := _q.ctx.Offset; offset != nil {
+ _spec.Offset = *offset
+ }
+ if ps := _q.order; len(ps) > 0 {
+ _spec.Order = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ return _spec
+}
+
+func (_q *ExportQuery) sqlQuery(ctx context.Context) *sql.Selector {
+ builder := sql.Dialect(_q.driver.Dialect())
+ t1 := builder.Table(export.Table)
+ columns := _q.ctx.Fields
+ if len(columns) == 0 {
+ columns = export.Columns
+ }
+ selector := builder.Select(t1.Columns(columns...)...).From(t1)
+ if _q.sql != nil {
+ selector = _q.sql
+ selector.Select(selector.Columns(columns...)...)
+ }
+ if _q.ctx.Unique != nil && *_q.ctx.Unique {
+ selector.Distinct()
+ }
+ for _, p := range _q.predicates {
+ p(selector)
+ }
+ for _, p := range _q.order {
+ p(selector)
+ }
+ if offset := _q.ctx.Offset; offset != nil {
+ // limit is mandatory for offset clause. We start
+ // with default value, and override it below if needed.
+ selector.Offset(*offset).Limit(math.MaxInt32)
+ }
+ if limit := _q.ctx.Limit; limit != nil {
+ selector.Limit(*limit)
+ }
+ return selector
+}
+
+// ExportGroupBy is the group-by builder for Export entities.
+type ExportGroupBy struct {
+ selector
+ build *ExportQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (_g *ExportGroupBy) Aggregate(fns ...AggregateFunc) *ExportGroupBy {
+ _g.fns = append(_g.fns, fns...)
+ return _g
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (_g *ExportGroupBy) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
+ if err := _g.build.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*ExportQuery, *ExportGroupBy](ctx, _g.build, _g, _g.build.inters, v)
+}
+
+func (_g *ExportGroupBy) sqlScan(ctx context.Context, root *ExportQuery, v any) error {
+ selector := root.sqlQuery(ctx).Select()
+ aggregation := make([]string, 0, len(_g.fns))
+ for _, fn := range _g.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ if len(selector.SelectedColumns()) == 0 {
+ columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
+ for _, f := range *_g.flds {
+ columns = append(columns, selector.C(f))
+ }
+ columns = append(columns, aggregation...)
+ selector.Select(columns...)
+ }
+ selector.GroupBy(selector.Columns(*_g.flds...)...)
+ if err := selector.Err(); err != nil {
+ return err
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
+
+// ExportSelect is the builder for selecting fields of Export entities.
+type ExportSelect struct {
+ *ExportQuery
+ selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (_s *ExportSelect) Aggregate(fns ...AggregateFunc) *ExportSelect {
+ _s.fns = append(_s.fns, fns...)
+ return _s
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (_s *ExportSelect) Scan(ctx context.Context, v any) error {
+ ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
+ if err := _s.prepareQuery(ctx); err != nil {
+ return err
+ }
+ return scanWithInterceptors[*ExportQuery, *ExportSelect](ctx, _s.ExportQuery, _s, _s.inters, v)
+}
+
+func (_s *ExportSelect) sqlScan(ctx context.Context, root *ExportQuery, v any) error {
+ selector := root.sqlQuery(ctx)
+ aggregation := make([]string, 0, len(_s.fns))
+ for _, fn := range _s.fns {
+ aggregation = append(aggregation, fn(selector))
+ }
+ switch n := len(*_s.selector.flds); {
+ case n == 0 && len(aggregation) > 0:
+ selector.Select(aggregation...)
+ case n != 0 && len(aggregation) > 0:
+ selector.AppendSelect(aggregation...)
+ }
+ rows := &sql.Rows{}
+ query, args := selector.Query()
+ if err := _s.driver.Query(ctx, query, args, rows); err != nil {
+ return err
+ }
+ defer rows.Close()
+ return sql.ScanSlice(rows, v)
+}
diff --git a/backend/internal/data/ent/export_update.go b/backend/internal/data/ent/export_update.go
new file mode 100644
index 000000000..ac14259ca
--- /dev/null
+++ b/backend/internal/data/ent/export_update.go
@@ -0,0 +1,654 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "entgo.io/ent/dialect/sql"
+ "entgo.io/ent/dialect/sql/sqlgraph"
+ "entgo.io/ent/schema/field"
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/predicate"
+)
+
+// ExportUpdate is the builder for updating Export entities.
+type ExportUpdate struct {
+ config
+ hooks []Hook
+ mutation *ExportMutation
+}
+
+// Where appends a list predicates to the ExportUpdate builder.
+func (_u *ExportUpdate) Where(ps ...predicate.Export) *ExportUpdate {
+ _u.mutation.Where(ps...)
+ return _u
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (_u *ExportUpdate) SetUpdatedAt(v time.Time) *ExportUpdate {
+ _u.mutation.SetUpdatedAt(v)
+ return _u
+}
+
+// SetGroupID sets the "group_id" field.
+func (_u *ExportUpdate) SetGroupID(v uuid.UUID) *ExportUpdate {
+ _u.mutation.SetGroupID(v)
+ return _u
+}
+
+// SetNillableGroupID sets the "group_id" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableGroupID(v *uuid.UUID) *ExportUpdate {
+ if v != nil {
+ _u.SetGroupID(*v)
+ }
+ return _u
+}
+
+// SetKind sets the "kind" field.
+func (_u *ExportUpdate) SetKind(v export.Kind) *ExportUpdate {
+ _u.mutation.SetKind(v)
+ return _u
+}
+
+// SetNillableKind sets the "kind" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableKind(v *export.Kind) *ExportUpdate {
+ if v != nil {
+ _u.SetKind(*v)
+ }
+ return _u
+}
+
+// SetStatus sets the "status" field.
+func (_u *ExportUpdate) SetStatus(v export.Status) *ExportUpdate {
+ _u.mutation.SetStatus(v)
+ return _u
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableStatus(v *export.Status) *ExportUpdate {
+ if v != nil {
+ _u.SetStatus(*v)
+ }
+ return _u
+}
+
+// SetProgress sets the "progress" field.
+func (_u *ExportUpdate) SetProgress(v int) *ExportUpdate {
+ _u.mutation.ResetProgress()
+ _u.mutation.SetProgress(v)
+ return _u
+}
+
+// SetNillableProgress sets the "progress" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableProgress(v *int) *ExportUpdate {
+ if v != nil {
+ _u.SetProgress(*v)
+ }
+ return _u
+}
+
+// AddProgress adds value to the "progress" field.
+func (_u *ExportUpdate) AddProgress(v int) *ExportUpdate {
+ _u.mutation.AddProgress(v)
+ return _u
+}
+
+// SetArtifactPath sets the "artifact_path" field.
+func (_u *ExportUpdate) SetArtifactPath(v string) *ExportUpdate {
+ _u.mutation.SetArtifactPath(v)
+ return _u
+}
+
+// SetNillableArtifactPath sets the "artifact_path" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableArtifactPath(v *string) *ExportUpdate {
+ if v != nil {
+ _u.SetArtifactPath(*v)
+ }
+ return _u
+}
+
+// ClearArtifactPath clears the value of the "artifact_path" field.
+func (_u *ExportUpdate) ClearArtifactPath() *ExportUpdate {
+ _u.mutation.ClearArtifactPath()
+ return _u
+}
+
+// SetSizeBytes sets the "size_bytes" field.
+func (_u *ExportUpdate) SetSizeBytes(v int64) *ExportUpdate {
+ _u.mutation.ResetSizeBytes()
+ _u.mutation.SetSizeBytes(v)
+ return _u
+}
+
+// SetNillableSizeBytes sets the "size_bytes" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableSizeBytes(v *int64) *ExportUpdate {
+ if v != nil {
+ _u.SetSizeBytes(*v)
+ }
+ return _u
+}
+
+// AddSizeBytes adds value to the "size_bytes" field.
+func (_u *ExportUpdate) AddSizeBytes(v int64) *ExportUpdate {
+ _u.mutation.AddSizeBytes(v)
+ return _u
+}
+
+// SetError sets the "error" field.
+func (_u *ExportUpdate) SetError(v string) *ExportUpdate {
+ _u.mutation.SetError(v)
+ return _u
+}
+
+// SetNillableError sets the "error" field if the given value is not nil.
+func (_u *ExportUpdate) SetNillableError(v *string) *ExportUpdate {
+ if v != nil {
+ _u.SetError(*v)
+ }
+ return _u
+}
+
+// ClearError clears the value of the "error" field.
+func (_u *ExportUpdate) ClearError() *ExportUpdate {
+ _u.mutation.ClearError()
+ return _u
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (_u *ExportUpdate) SetGroup(v *Group) *ExportUpdate {
+ return _u.SetGroupID(v.ID)
+}
+
+// Mutation returns the ExportMutation object of the builder.
+func (_u *ExportUpdate) Mutation() *ExportMutation {
+ return _u.mutation
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (_u *ExportUpdate) ClearGroup() *ExportUpdate {
+ _u.mutation.ClearGroup()
+ return _u
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (_u *ExportUpdate) Save(ctx context.Context) (int, error) {
+ _u.defaults()
+ return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (_u *ExportUpdate) SaveX(ctx context.Context) int {
+ affected, err := _u.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return affected
+}
+
+// Exec executes the query.
+func (_u *ExportUpdate) Exec(ctx context.Context) error {
+ _, err := _u.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_u *ExportUpdate) ExecX(ctx context.Context) {
+ if err := _u.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (_u *ExportUpdate) defaults() {
+ if _, ok := _u.mutation.UpdatedAt(); !ok {
+ v := export.UpdateDefaultUpdatedAt()
+ _u.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (_u *ExportUpdate) check() error {
+ if v, ok := _u.mutation.Kind(); ok {
+ if err := export.KindValidator(v); err != nil {
+ return &ValidationError{Name: "kind", err: fmt.Errorf(`ent: validator failed for field "Export.kind": %w`, err)}
+ }
+ }
+ if v, ok := _u.mutation.Status(); ok {
+ if err := export.StatusValidator(v); err != nil {
+ return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Export.status": %w`, err)}
+ }
+ }
+ if v, ok := _u.mutation.Error(); ok {
+ if err := export.ErrorValidator(v); err != nil {
+ return &ValidationError{Name: "error", err: fmt.Errorf(`ent: validator failed for field "Export.error": %w`, err)}
+ }
+ }
+ if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
+ return errors.New(`ent: clearing a required unique edge "Export.group"`)
+ }
+ return nil
+}
+
+func (_u *ExportUpdate) sqlSave(ctx context.Context) (_node int, err error) {
+ if err := _u.check(); err != nil {
+ return _node, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(export.Table, export.Columns, sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID))
+ if ps := _u.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := _u.mutation.UpdatedAt(); ok {
+ _spec.SetField(export.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := _u.mutation.Kind(); ok {
+ _spec.SetField(export.FieldKind, field.TypeEnum, value)
+ }
+ if value, ok := _u.mutation.Status(); ok {
+ _spec.SetField(export.FieldStatus, field.TypeEnum, value)
+ }
+ if value, ok := _u.mutation.Progress(); ok {
+ _spec.SetField(export.FieldProgress, field.TypeInt, value)
+ }
+ if value, ok := _u.mutation.AddedProgress(); ok {
+ _spec.AddField(export.FieldProgress, field.TypeInt, value)
+ }
+ if value, ok := _u.mutation.ArtifactPath(); ok {
+ _spec.SetField(export.FieldArtifactPath, field.TypeString, value)
+ }
+ if _u.mutation.ArtifactPathCleared() {
+ _spec.ClearField(export.FieldArtifactPath, field.TypeString)
+ }
+ if value, ok := _u.mutation.SizeBytes(); ok {
+ _spec.SetField(export.FieldSizeBytes, field.TypeInt64, value)
+ }
+ if value, ok := _u.mutation.AddedSizeBytes(); ok {
+ _spec.AddField(export.FieldSizeBytes, field.TypeInt64, value)
+ }
+ if value, ok := _u.mutation.Error(); ok {
+ _spec.SetField(export.FieldError, field.TypeString, value)
+ }
+ if _u.mutation.ErrorCleared() {
+ _spec.ClearField(export.FieldError, field.TypeString)
+ }
+ if _u.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: export.GroupTable,
+ Columns: []string{export.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: export.GroupTable,
+ Columns: []string{export.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{export.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return 0, err
+ }
+ _u.mutation.done = true
+ return _node, nil
+}
+
+// ExportUpdateOne is the builder for updating a single Export entity.
+type ExportUpdateOne struct {
+ config
+ fields []string
+ hooks []Hook
+ mutation *ExportMutation
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (_u *ExportUpdateOne) SetUpdatedAt(v time.Time) *ExportUpdateOne {
+ _u.mutation.SetUpdatedAt(v)
+ return _u
+}
+
+// SetGroupID sets the "group_id" field.
+func (_u *ExportUpdateOne) SetGroupID(v uuid.UUID) *ExportUpdateOne {
+ _u.mutation.SetGroupID(v)
+ return _u
+}
+
+// SetNillableGroupID sets the "group_id" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableGroupID(v *uuid.UUID) *ExportUpdateOne {
+ if v != nil {
+ _u.SetGroupID(*v)
+ }
+ return _u
+}
+
+// SetKind sets the "kind" field.
+func (_u *ExportUpdateOne) SetKind(v export.Kind) *ExportUpdateOne {
+ _u.mutation.SetKind(v)
+ return _u
+}
+
+// SetNillableKind sets the "kind" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableKind(v *export.Kind) *ExportUpdateOne {
+ if v != nil {
+ _u.SetKind(*v)
+ }
+ return _u
+}
+
+// SetStatus sets the "status" field.
+func (_u *ExportUpdateOne) SetStatus(v export.Status) *ExportUpdateOne {
+ _u.mutation.SetStatus(v)
+ return _u
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableStatus(v *export.Status) *ExportUpdateOne {
+ if v != nil {
+ _u.SetStatus(*v)
+ }
+ return _u
+}
+
+// SetProgress sets the "progress" field.
+func (_u *ExportUpdateOne) SetProgress(v int) *ExportUpdateOne {
+ _u.mutation.ResetProgress()
+ _u.mutation.SetProgress(v)
+ return _u
+}
+
+// SetNillableProgress sets the "progress" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableProgress(v *int) *ExportUpdateOne {
+ if v != nil {
+ _u.SetProgress(*v)
+ }
+ return _u
+}
+
+// AddProgress adds value to the "progress" field.
+func (_u *ExportUpdateOne) AddProgress(v int) *ExportUpdateOne {
+ _u.mutation.AddProgress(v)
+ return _u
+}
+
+// SetArtifactPath sets the "artifact_path" field.
+func (_u *ExportUpdateOne) SetArtifactPath(v string) *ExportUpdateOne {
+ _u.mutation.SetArtifactPath(v)
+ return _u
+}
+
+// SetNillableArtifactPath sets the "artifact_path" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableArtifactPath(v *string) *ExportUpdateOne {
+ if v != nil {
+ _u.SetArtifactPath(*v)
+ }
+ return _u
+}
+
+// ClearArtifactPath clears the value of the "artifact_path" field.
+func (_u *ExportUpdateOne) ClearArtifactPath() *ExportUpdateOne {
+ _u.mutation.ClearArtifactPath()
+ return _u
+}
+
+// SetSizeBytes sets the "size_bytes" field.
+func (_u *ExportUpdateOne) SetSizeBytes(v int64) *ExportUpdateOne {
+ _u.mutation.ResetSizeBytes()
+ _u.mutation.SetSizeBytes(v)
+ return _u
+}
+
+// SetNillableSizeBytes sets the "size_bytes" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableSizeBytes(v *int64) *ExportUpdateOne {
+ if v != nil {
+ _u.SetSizeBytes(*v)
+ }
+ return _u
+}
+
+// AddSizeBytes adds value to the "size_bytes" field.
+func (_u *ExportUpdateOne) AddSizeBytes(v int64) *ExportUpdateOne {
+ _u.mutation.AddSizeBytes(v)
+ return _u
+}
+
+// SetError sets the "error" field.
+func (_u *ExportUpdateOne) SetError(v string) *ExportUpdateOne {
+ _u.mutation.SetError(v)
+ return _u
+}
+
+// SetNillableError sets the "error" field if the given value is not nil.
+func (_u *ExportUpdateOne) SetNillableError(v *string) *ExportUpdateOne {
+ if v != nil {
+ _u.SetError(*v)
+ }
+ return _u
+}
+
+// ClearError clears the value of the "error" field.
+func (_u *ExportUpdateOne) ClearError() *ExportUpdateOne {
+ _u.mutation.ClearError()
+ return _u
+}
+
+// SetGroup sets the "group" edge to the Group entity.
+func (_u *ExportUpdateOne) SetGroup(v *Group) *ExportUpdateOne {
+ return _u.SetGroupID(v.ID)
+}
+
+// Mutation returns the ExportMutation object of the builder.
+func (_u *ExportUpdateOne) Mutation() *ExportMutation {
+ return _u.mutation
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (_u *ExportUpdateOne) ClearGroup() *ExportUpdateOne {
+ _u.mutation.ClearGroup()
+ return _u
+}
+
+// Where appends a list predicates to the ExportUpdate builder.
+func (_u *ExportUpdateOne) Where(ps ...predicate.Export) *ExportUpdateOne {
+ _u.mutation.Where(ps...)
+ return _u
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (_u *ExportUpdateOne) Select(field string, fields ...string) *ExportUpdateOne {
+ _u.fields = append([]string{field}, fields...)
+ return _u
+}
+
+// Save executes the query and returns the updated Export entity.
+func (_u *ExportUpdateOne) Save(ctx context.Context) (*Export, error) {
+ _u.defaults()
+ return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (_u *ExportUpdateOne) SaveX(ctx context.Context) *Export {
+ node, err := _u.Save(ctx)
+ if err != nil {
+ panic(err)
+ }
+ return node
+}
+
+// Exec executes the query on the entity.
+func (_u *ExportUpdateOne) Exec(ctx context.Context) error {
+ _, err := _u.Save(ctx)
+ return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (_u *ExportUpdateOne) ExecX(ctx context.Context) {
+ if err := _u.Exec(ctx); err != nil {
+ panic(err)
+ }
+}
+
+// defaults sets the default values of the builder before save.
+func (_u *ExportUpdateOne) defaults() {
+ if _, ok := _u.mutation.UpdatedAt(); !ok {
+ v := export.UpdateDefaultUpdatedAt()
+ _u.mutation.SetUpdatedAt(v)
+ }
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (_u *ExportUpdateOne) check() error {
+ if v, ok := _u.mutation.Kind(); ok {
+ if err := export.KindValidator(v); err != nil {
+ return &ValidationError{Name: "kind", err: fmt.Errorf(`ent: validator failed for field "Export.kind": %w`, err)}
+ }
+ }
+ if v, ok := _u.mutation.Status(); ok {
+ if err := export.StatusValidator(v); err != nil {
+ return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Export.status": %w`, err)}
+ }
+ }
+ if v, ok := _u.mutation.Error(); ok {
+ if err := export.ErrorValidator(v); err != nil {
+ return &ValidationError{Name: "error", err: fmt.Errorf(`ent: validator failed for field "Export.error": %w`, err)}
+ }
+ }
+ if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
+ return errors.New(`ent: clearing a required unique edge "Export.group"`)
+ }
+ return nil
+}
+
+func (_u *ExportUpdateOne) sqlSave(ctx context.Context) (_node *Export, err error) {
+ if err := _u.check(); err != nil {
+ return _node, err
+ }
+ _spec := sqlgraph.NewUpdateSpec(export.Table, export.Columns, sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID))
+ id, ok := _u.mutation.ID()
+ if !ok {
+ return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Export.id" for update`)}
+ }
+ _spec.Node.ID.Value = id
+ if fields := _u.fields; len(fields) > 0 {
+ _spec.Node.Columns = make([]string, 0, len(fields))
+ _spec.Node.Columns = append(_spec.Node.Columns, export.FieldID)
+ for _, f := range fields {
+ if !export.ValidColumn(f) {
+ return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+ }
+ if f != export.FieldID {
+ _spec.Node.Columns = append(_spec.Node.Columns, f)
+ }
+ }
+ }
+ if ps := _u.mutation.predicates; len(ps) > 0 {
+ _spec.Predicate = func(selector *sql.Selector) {
+ for i := range ps {
+ ps[i](selector)
+ }
+ }
+ }
+ if value, ok := _u.mutation.UpdatedAt(); ok {
+ _spec.SetField(export.FieldUpdatedAt, field.TypeTime, value)
+ }
+ if value, ok := _u.mutation.Kind(); ok {
+ _spec.SetField(export.FieldKind, field.TypeEnum, value)
+ }
+ if value, ok := _u.mutation.Status(); ok {
+ _spec.SetField(export.FieldStatus, field.TypeEnum, value)
+ }
+ if value, ok := _u.mutation.Progress(); ok {
+ _spec.SetField(export.FieldProgress, field.TypeInt, value)
+ }
+ if value, ok := _u.mutation.AddedProgress(); ok {
+ _spec.AddField(export.FieldProgress, field.TypeInt, value)
+ }
+ if value, ok := _u.mutation.ArtifactPath(); ok {
+ _spec.SetField(export.FieldArtifactPath, field.TypeString, value)
+ }
+ if _u.mutation.ArtifactPathCleared() {
+ _spec.ClearField(export.FieldArtifactPath, field.TypeString)
+ }
+ if value, ok := _u.mutation.SizeBytes(); ok {
+ _spec.SetField(export.FieldSizeBytes, field.TypeInt64, value)
+ }
+ if value, ok := _u.mutation.AddedSizeBytes(); ok {
+ _spec.AddField(export.FieldSizeBytes, field.TypeInt64, value)
+ }
+ if value, ok := _u.mutation.Error(); ok {
+ _spec.SetField(export.FieldError, field.TypeString, value)
+ }
+ if _u.mutation.ErrorCleared() {
+ _spec.ClearField(export.FieldError, field.TypeString)
+ }
+ if _u.mutation.GroupCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: export.GroupTable,
+ Columns: []string{export.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.M2O,
+ Inverse: true,
+ Table: export.GroupTable,
+ Columns: []string{export.GroupColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
+ _node = &Export{config: _u.config}
+ _spec.Assign = _node.assignValues
+ _spec.ScanValues = _node.scanValues
+ if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
+ if _, ok := err.(*sqlgraph.NotFoundError); ok {
+ err = &NotFoundError{export.Label}
+ } else if sqlgraph.IsConstraintError(err) {
+ err = &ConstraintError{msg: err.Error(), wrap: err}
+ }
+ return nil, err
+ }
+ _u.mutation.done = true
+ return _node, nil
+}
diff --git a/backend/internal/data/ent/group.go b/backend/internal/data/ent/group.go
index 2a59ead4f..2dfa1523c 100644
--- a/backend/internal/data/ent/group.go
+++ b/backend/internal/data/ent/group.go
@@ -48,11 +48,13 @@ type GroupEdges struct {
Notifiers []*Notifier `json:"notifiers,omitempty"`
// EntityTemplates holds the value of the entity_templates edge.
EntityTemplates []*EntityTemplate `json:"entity_templates,omitempty"`
+ // Exports holds the value of the exports edge.
+ Exports []*Export `json:"exports,omitempty"`
// UserGroups holds the value of the user_groups edge.
UserGroups []*UserGroup `json:"user_groups,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
- loadedTypes [8]bool
+ loadedTypes [9]bool
}
// UsersOrErr returns the Users value or an error if the edge
@@ -118,10 +120,19 @@ func (e GroupEdges) EntityTemplatesOrErr() ([]*EntityTemplate, error) {
return nil, &NotLoadedError{edge: "entity_templates"}
}
+// ExportsOrErr returns the Exports value or an error if the edge
+// was not loaded in eager-loading.
+func (e GroupEdges) ExportsOrErr() ([]*Export, error) {
+ if e.loadedTypes[7] {
+ return e.Exports, nil
+ }
+ return nil, &NotLoadedError{edge: "exports"}
+}
+
// UserGroupsOrErr returns the UserGroups value or an error if the edge
// was not loaded in eager-loading.
func (e GroupEdges) UserGroupsOrErr() ([]*UserGroup, error) {
- if e.loadedTypes[7] {
+ if e.loadedTypes[8] {
return e.UserGroups, nil
}
return nil, &NotLoadedError{edge: "user_groups"}
@@ -231,6 +242,11 @@ func (_m *Group) QueryEntityTemplates() *EntityTemplateQuery {
return NewGroupClient(_m.config).QueryEntityTemplates(_m)
}
+// QueryExports queries the "exports" edge of the Group entity.
+func (_m *Group) QueryExports() *ExportQuery {
+ return NewGroupClient(_m.config).QueryExports(_m)
+}
+
// QueryUserGroups queries the "user_groups" edge of the Group entity.
func (_m *Group) QueryUserGroups() *UserGroupQuery {
return NewGroupClient(_m.config).QueryUserGroups(_m)
diff --git a/backend/internal/data/ent/group/group.go b/backend/internal/data/ent/group/group.go
index 2be8540c1..ed2244985 100644
--- a/backend/internal/data/ent/group/group.go
+++ b/backend/internal/data/ent/group/group.go
@@ -37,6 +37,8 @@ const (
EdgeNotifiers = "notifiers"
// EdgeEntityTemplates holds the string denoting the entity_templates edge name in mutations.
EdgeEntityTemplates = "entity_templates"
+ // EdgeExports holds the string denoting the exports edge name in mutations.
+ EdgeExports = "exports"
// EdgeUserGroups holds the string denoting the user_groups edge name in mutations.
EdgeUserGroups = "user_groups"
// Table holds the table name of the group in the database.
@@ -88,6 +90,13 @@ const (
EntityTemplatesInverseTable = "entity_templates"
// EntityTemplatesColumn is the table column denoting the entity_templates relation/edge.
EntityTemplatesColumn = "group_entity_templates"
+ // ExportsTable is the table that holds the exports relation/edge.
+ ExportsTable = "exports"
+ // ExportsInverseTable is the table name for the Export entity.
+ // It exists in this package in order to avoid circular dependency with the "export" package.
+ ExportsInverseTable = "exports"
+ // ExportsColumn is the table column denoting the exports relation/edge.
+ ExportsColumn = "group_id"
// UserGroupsTable is the table that holds the user_groups relation/edge.
UserGroupsTable = "user_groups"
// UserGroupsInverseTable is the table name for the UserGroup entity.
@@ -263,6 +272,20 @@ func ByEntityTemplates(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
}
}
+// ByExportsCount orders the results by exports count.
+func ByExportsCount(opts ...sql.OrderTermOption) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborsCount(s, newExportsStep(), opts...)
+ }
+}
+
+// ByExports orders the results by exports terms.
+func ByExports(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+ return func(s *sql.Selector) {
+ sqlgraph.OrderByNeighborTerms(s, newExportsStep(), append([]sql.OrderTerm{term}, terms...)...)
+ }
+}
+
// ByUserGroupsCount orders the results by user_groups count.
func ByUserGroupsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
@@ -325,6 +348,13 @@ func newEntityTemplatesStep() *sqlgraph.Step {
sqlgraph.Edge(sqlgraph.O2M, false, EntityTemplatesTable, EntityTemplatesColumn),
)
}
+func newExportsStep() *sqlgraph.Step {
+ return sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.To(ExportsInverseTable, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ExportsTable, ExportsColumn),
+ )
+}
func newUserGroupsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
diff --git a/backend/internal/data/ent/group/where.go b/backend/internal/data/ent/group/where.go
index a7f28d997..facbd0add 100644
--- a/backend/internal/data/ent/group/where.go
+++ b/backend/internal/data/ent/group/where.go
@@ -447,6 +447,29 @@ func HasEntityTemplatesWith(preds ...predicate.EntityTemplate) predicate.Group {
})
}
+// HasExports applies the HasEdge predicate on the "exports" edge.
+func HasExports() predicate.Group {
+ return predicate.Group(func(s *sql.Selector) {
+ step := sqlgraph.NewStep(
+ sqlgraph.From(Table, FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, ExportsTable, ExportsColumn),
+ )
+ sqlgraph.HasNeighbors(s, step)
+ })
+}
+
+// HasExportsWith applies the HasEdge predicate on the "exports" edge with a given conditions (other predicates).
+func HasExportsWith(preds ...predicate.Export) predicate.Group {
+ return predicate.Group(func(s *sql.Selector) {
+ step := newExportsStep()
+ sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+ for _, p := range preds {
+ p(s)
+ }
+ })
+ })
+}
+
// HasUserGroups applies the HasEdge predicate on the "user_groups" edge.
func HasUserGroups() predicate.Group {
return predicate.Group(func(s *sql.Selector) {
diff --git a/backend/internal/data/ent/group_create.go b/backend/internal/data/ent/group_create.go
index 30b6ffbfb..023fbfa38 100644
--- a/backend/internal/data/ent/group_create.go
+++ b/backend/internal/data/ent/group_create.go
@@ -14,6 +14,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entity"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/notifier"
@@ -195,6 +196,21 @@ func (_c *GroupCreate) AddEntityTemplates(v ...*EntityTemplate) *GroupCreate {
return _c.AddEntityTemplateIDs(ids...)
}
+// AddExportIDs adds the "exports" edge to the Export entity by IDs.
+func (_c *GroupCreate) AddExportIDs(ids ...uuid.UUID) *GroupCreate {
+ _c.mutation.AddExportIDs(ids...)
+ return _c
+}
+
+// AddExports adds the "exports" edges to the Export entity.
+func (_c *GroupCreate) AddExports(v ...*Export) *GroupCreate {
+ ids := make([]uuid.UUID, len(v))
+ for i := range v {
+ ids[i] = v[i].ID
+ }
+ return _c.AddExportIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (_c *GroupCreate) Mutation() *GroupMutation {
return _c.mutation
@@ -434,6 +450,22 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
}
_spec.Edges = append(_spec.Edges, edge)
}
+ if nodes := _c.mutation.ExportsIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges = append(_spec.Edges, edge)
+ }
return _node, _spec
}
diff --git a/backend/internal/data/ent/group_query.go b/backend/internal/data/ent/group_query.go
index e22780fce..546268273 100644
--- a/backend/internal/data/ent/group_query.go
+++ b/backend/internal/data/ent/group_query.go
@@ -16,6 +16,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entity"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/notifier"
@@ -39,6 +40,7 @@ type GroupQuery struct {
withInvitationTokens *GroupInvitationTokenQuery
withNotifiers *NotifierQuery
withEntityTemplates *EntityTemplateQuery
+ withExports *ExportQuery
withUserGroups *UserGroupQuery
// intermediate query (i.e. traversal path).
sql *sql.Selector
@@ -230,6 +232,28 @@ func (_q *GroupQuery) QueryEntityTemplates() *EntityTemplateQuery {
return query
}
+// QueryExports chains the current query on the "exports" edge.
+func (_q *GroupQuery) QueryExports() *ExportQuery {
+ query := (&ExportClient{config: _q.config}).Query()
+ query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+ if err := _q.prepareQuery(ctx); err != nil {
+ return nil, err
+ }
+ selector := _q.sqlQuery(ctx)
+ if err := selector.Err(); err != nil {
+ return nil, err
+ }
+ step := sqlgraph.NewStep(
+ sqlgraph.From(group.Table, group.FieldID, selector),
+ sqlgraph.To(export.Table, export.FieldID),
+ sqlgraph.Edge(sqlgraph.O2M, false, group.ExportsTable, group.ExportsColumn),
+ )
+ fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
+ return fromU, nil
+ }
+ return query
+}
+
// QueryUserGroups chains the current query on the "user_groups" edge.
func (_q *GroupQuery) QueryUserGroups() *UserGroupQuery {
query := (&UserGroupClient{config: _q.config}).Query()
@@ -451,6 +475,7 @@ func (_q *GroupQuery) Clone() *GroupQuery {
withInvitationTokens: _q.withInvitationTokens.Clone(),
withNotifiers: _q.withNotifiers.Clone(),
withEntityTemplates: _q.withEntityTemplates.Clone(),
+ withExports: _q.withExports.Clone(),
withUserGroups: _q.withUserGroups.Clone(),
// clone intermediate query.
sql: _q.sql.Clone(),
@@ -535,6 +560,17 @@ func (_q *GroupQuery) WithEntityTemplates(opts ...func(*EntityTemplateQuery)) *G
return _q
}
+// WithExports tells the query-builder to eager-load the nodes that are connected to
+// the "exports" edge. The optional arguments are used to configure the query builder of the edge.
+func (_q *GroupQuery) WithExports(opts ...func(*ExportQuery)) *GroupQuery {
+ query := (&ExportClient{config: _q.config}).Query()
+ for _, opt := range opts {
+ opt(query)
+ }
+ _q.withExports = query
+ return _q
+}
+
// WithUserGroups tells the query-builder to eager-load the nodes that are connected to
// the "user_groups" edge. The optional arguments are used to configure the query builder of the edge.
func (_q *GroupQuery) WithUserGroups(opts ...func(*UserGroupQuery)) *GroupQuery {
@@ -624,7 +660,7 @@ func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
var (
nodes = []*Group{}
_spec = _q.querySpec()
- loadedTypes = [8]bool{
+ loadedTypes = [9]bool{
_q.withUsers != nil,
_q.withEntityTypes != nil,
_q.withEntities != nil,
@@ -632,6 +668,7 @@ func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
_q.withInvitationTokens != nil,
_q.withNotifiers != nil,
_q.withEntityTemplates != nil,
+ _q.withExports != nil,
_q.withUserGroups != nil,
}
)
@@ -704,6 +741,13 @@ func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
return nil, err
}
}
+ if query := _q.withExports; query != nil {
+ if err := _q.loadExports(ctx, query, nodes,
+ func(n *Group) { n.Edges.Exports = []*Export{} },
+ func(n *Group, e *Export) { n.Edges.Exports = append(n.Edges.Exports, e) }); err != nil {
+ return nil, err
+ }
+ }
if query := _q.withUserGroups; query != nil {
if err := _q.loadUserGroups(ctx, query, nodes,
func(n *Group) { n.Edges.UserGroups = []*UserGroup{} },
@@ -960,6 +1004,36 @@ func (_q *GroupQuery) loadEntityTemplates(ctx context.Context, query *EntityTemp
}
return nil
}
+func (_q *GroupQuery) loadExports(ctx context.Context, query *ExportQuery, nodes []*Group, init func(*Group), assign func(*Group, *Export)) error {
+ fks := make([]driver.Value, 0, len(nodes))
+ nodeids := make(map[uuid.UUID]*Group)
+ for i := range nodes {
+ fks = append(fks, nodes[i].ID)
+ nodeids[nodes[i].ID] = nodes[i]
+ if init != nil {
+ init(nodes[i])
+ }
+ }
+ if len(query.ctx.Fields) > 0 {
+ query.ctx.AppendFieldOnce(export.FieldGroupID)
+ }
+ query.Where(predicate.Export(func(s *sql.Selector) {
+ s.Where(sql.InValues(s.C(group.ExportsColumn), fks...))
+ }))
+ neighbors, err := query.All(ctx)
+ if err != nil {
+ return err
+ }
+ for _, n := range neighbors {
+ fk := n.GroupID
+ node, ok := nodeids[fk]
+ if !ok {
+ return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID)
+ }
+ assign(node, n)
+ }
+ return nil
+}
func (_q *GroupQuery) loadUserGroups(ctx context.Context, query *UserGroupQuery, nodes []*Group, init func(*Group), assign func(*Group, *UserGroup)) error {
fks := make([]driver.Value, 0, len(nodes))
nodeids := make(map[uuid.UUID]*Group)
diff --git a/backend/internal/data/ent/group_update.go b/backend/internal/data/ent/group_update.go
index 51458c0cf..17eaa79c0 100644
--- a/backend/internal/data/ent/group_update.go
+++ b/backend/internal/data/ent/group_update.go
@@ -15,6 +15,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entity"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/notifier"
@@ -175,6 +176,21 @@ func (_u *GroupUpdate) AddEntityTemplates(v ...*EntityTemplate) *GroupUpdate {
return _u.AddEntityTemplateIDs(ids...)
}
+// AddExportIDs adds the "exports" edge to the Export entity by IDs.
+func (_u *GroupUpdate) AddExportIDs(ids ...uuid.UUID) *GroupUpdate {
+ _u.mutation.AddExportIDs(ids...)
+ return _u
+}
+
+// AddExports adds the "exports" edges to the Export entity.
+func (_u *GroupUpdate) AddExports(v ...*Export) *GroupUpdate {
+ ids := make([]uuid.UUID, len(v))
+ for i := range v {
+ ids[i] = v[i].ID
+ }
+ return _u.AddExportIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (_u *GroupUpdate) Mutation() *GroupMutation {
return _u.mutation
@@ -327,6 +343,27 @@ func (_u *GroupUpdate) RemoveEntityTemplates(v ...*EntityTemplate) *GroupUpdate
return _u.RemoveEntityTemplateIDs(ids...)
}
+// ClearExports clears all "exports" edges to the Export entity.
+func (_u *GroupUpdate) ClearExports() *GroupUpdate {
+ _u.mutation.ClearExports()
+ return _u
+}
+
+// RemoveExportIDs removes the "exports" edge to Export entities by IDs.
+func (_u *GroupUpdate) RemoveExportIDs(ids ...uuid.UUID) *GroupUpdate {
+ _u.mutation.RemoveExportIDs(ids...)
+ return _u
+}
+
+// RemoveExports removes "exports" edges to Export entities.
+func (_u *GroupUpdate) RemoveExports(v ...*Export) *GroupUpdate {
+ ids := make([]uuid.UUID, len(v))
+ for i := range v {
+ ids[i] = v[i].ID
+ }
+ return _u.RemoveExportIDs(ids...)
+}
+
// Save executes the query and returns the number of nodes affected by the update operation.
func (_u *GroupUpdate) Save(ctx context.Context) (int, error) {
_u.defaults()
@@ -721,6 +758,51 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
+ if _u.mutation.ExportsCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.RemovedExportsIDs(); len(nodes) > 0 && !_u.mutation.ExportsCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.ExportsIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
err = &NotFoundError{group.Label}
@@ -880,6 +962,21 @@ func (_u *GroupUpdateOne) AddEntityTemplates(v ...*EntityTemplate) *GroupUpdateO
return _u.AddEntityTemplateIDs(ids...)
}
+// AddExportIDs adds the "exports" edge to the Export entity by IDs.
+func (_u *GroupUpdateOne) AddExportIDs(ids ...uuid.UUID) *GroupUpdateOne {
+ _u.mutation.AddExportIDs(ids...)
+ return _u
+}
+
+// AddExports adds the "exports" edges to the Export entity.
+func (_u *GroupUpdateOne) AddExports(v ...*Export) *GroupUpdateOne {
+ ids := make([]uuid.UUID, len(v))
+ for i := range v {
+ ids[i] = v[i].ID
+ }
+ return _u.AddExportIDs(ids...)
+}
+
// Mutation returns the GroupMutation object of the builder.
func (_u *GroupUpdateOne) Mutation() *GroupMutation {
return _u.mutation
@@ -1032,6 +1129,27 @@ func (_u *GroupUpdateOne) RemoveEntityTemplates(v ...*EntityTemplate) *GroupUpda
return _u.RemoveEntityTemplateIDs(ids...)
}
+// ClearExports clears all "exports" edges to the Export entity.
+func (_u *GroupUpdateOne) ClearExports() *GroupUpdateOne {
+ _u.mutation.ClearExports()
+ return _u
+}
+
+// RemoveExportIDs removes the "exports" edge to Export entities by IDs.
+func (_u *GroupUpdateOne) RemoveExportIDs(ids ...uuid.UUID) *GroupUpdateOne {
+ _u.mutation.RemoveExportIDs(ids...)
+ return _u
+}
+
+// RemoveExports removes "exports" edges to Export entities.
+func (_u *GroupUpdateOne) RemoveExports(v ...*Export) *GroupUpdateOne {
+ ids := make([]uuid.UUID, len(v))
+ for i := range v {
+ ids[i] = v[i].ID
+ }
+ return _u.RemoveExportIDs(ids...)
+}
+
// Where appends a list predicates to the GroupUpdate builder.
func (_u *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne {
_u.mutation.Where(ps...)
@@ -1456,6 +1574,51 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
}
_spec.Edges.Add = append(_spec.Edges.Add, edge)
}
+ if _u.mutation.ExportsCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.RemovedExportsIDs(); len(nodes) > 0 && !_u.mutation.ExportsCleared() {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+ }
+ if nodes := _u.mutation.ExportsIDs(); len(nodes) > 0 {
+ edge := &sqlgraph.EdgeSpec{
+ Rel: sqlgraph.O2M,
+ Inverse: false,
+ Table: group.ExportsTable,
+ Columns: []string{group.ExportsColumn},
+ Bidi: false,
+ Target: &sqlgraph.EdgeTarget{
+ IDSpec: sqlgraph.NewFieldSpec(export.FieldID, field.TypeUUID),
+ },
+ }
+ for _, k := range nodes {
+ edge.Target.Nodes = append(edge.Target.Nodes, k)
+ }
+ _spec.Edges.Add = append(_spec.Edges.Add, edge)
+ }
_node = &Group{config: _u.config}
_spec.Assign = _node.assignValues
_spec.ScanValues = _node.scanValues
diff --git a/backend/internal/data/ent/has_id.go b/backend/internal/data/ent/has_id.go
index 420641052..7ea3c4a8d 100644
--- a/backend/internal/data/ent/has_id.go
+++ b/backend/internal/data/ent/has_id.go
@@ -36,6 +36,10 @@ func (_m *EntityType) GetID() uuid.UUID {
return _m.ID
}
+func (_m *Export) GetID() uuid.UUID {
+ return _m.ID
+}
+
func (_m *Group) GetID() uuid.UUID {
return _m.ID
}
diff --git a/backend/internal/data/ent/hook/hook.go b/backend/internal/data/ent/hook/hook.go
index 03641dc4a..504a3df1c 100644
--- a/backend/internal/data/ent/hook/hook.go
+++ b/backend/internal/data/ent/hook/hook.go
@@ -105,6 +105,18 @@ func (f EntityTypeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value,
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EntityTypeMutation", m)
}
+// The ExportFunc type is an adapter to allow the use of ordinary
+// function as Export mutator.
+type ExportFunc func(context.Context, *ent.ExportMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f ExportFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+ if mv, ok := m.(*ent.ExportMutation); ok {
+ return f(ctx, mv)
+ }
+ return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ExportMutation", m)
+}
+
// The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
diff --git a/backend/internal/data/ent/migrate/schema.go b/backend/internal/data/ent/migrate/schema.go
index 4bb7e56b9..47a900664 100644
--- a/backend/internal/data/ent/migrate/schema.go
+++ b/backend/internal/data/ent/migrate/schema.go
@@ -328,6 +328,45 @@ var (
},
},
}
+ // ExportsColumns holds the columns for the "exports" table.
+ ExportsColumns = []*schema.Column{
+ {Name: "id", Type: field.TypeUUID},
+ {Name: "created_at", Type: field.TypeTime},
+ {Name: "updated_at", Type: field.TypeTime},
+ {Name: "kind", Type: field.TypeEnum, Enums: []string{"export", "import"}, Default: "export"},
+ {Name: "status", Type: field.TypeEnum, Enums: []string{"pending", "running", "completed", "failed"}, Default: "pending"},
+ {Name: "progress", Type: field.TypeInt, Default: 0},
+ {Name: "artifact_path", Type: field.TypeString, Nullable: true},
+ {Name: "size_bytes", Type: field.TypeInt64, Default: 0},
+ {Name: "error", Type: field.TypeString, Nullable: true, Size: 1000},
+ {Name: "group_id", Type: field.TypeUUID},
+ }
+ // ExportsTable holds the schema information for the "exports" table.
+ ExportsTable = &schema.Table{
+ Name: "exports",
+ Columns: ExportsColumns,
+ PrimaryKey: []*schema.Column{ExportsColumns[0]},
+ ForeignKeys: []*schema.ForeignKey{
+ {
+ Symbol: "exports_groups_exports",
+ Columns: []*schema.Column{ExportsColumns[9]},
+ RefColumns: []*schema.Column{GroupsColumns[0]},
+ OnDelete: schema.Cascade,
+ },
+ },
+ Indexes: []*schema.Index{
+ {
+ Name: "export_group_id",
+ Unique: false,
+ Columns: []*schema.Column{ExportsColumns[9]},
+ },
+ {
+ Name: "export_group_id_status",
+ Unique: false,
+ Columns: []*schema.Column{ExportsColumns[9], ExportsColumns[4]},
+ },
+ },
+ }
// GroupsColumns holds the columns for the "groups" table.
GroupsColumns = []*schema.Column{
{Name: "id", Type: field.TypeUUID},
@@ -626,6 +665,7 @@ var (
EntityFieldsTable,
EntityTemplatesTable,
EntityTypesTable,
+ ExportsTable,
GroupsTable,
GroupInvitationTokensTable,
MaintenanceEntriesTable,
@@ -653,6 +693,7 @@ func init() {
EntityTemplatesTable.ForeignKeys[1].RefTable = GroupsTable
EntityTypesTable.ForeignKeys[0].RefTable = EntityTemplatesTable
EntityTypesTable.ForeignKeys[1].RefTable = GroupsTable
+ ExportsTable.ForeignKeys[0].RefTable = GroupsTable
GroupInvitationTokensTable.ForeignKeys[0].RefTable = GroupsTable
MaintenanceEntriesTable.ForeignKeys[0].RefTable = EntitiesTable
NotifiersTable.ForeignKeys[0].RefTable = GroupsTable
diff --git a/backend/internal/data/ent/mutation.go b/backend/internal/data/ent/mutation.go
index e267e5553..47ee02343 100644
--- a/backend/internal/data/ent/mutation.go
+++ b/backend/internal/data/ent/mutation.go
@@ -20,6 +20,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entityfield"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/maintenanceentry"
@@ -49,6 +50,7 @@ const (
TypeEntityField = "EntityField"
TypeEntityTemplate = "EntityTemplate"
TypeEntityType = "EntityType"
+ TypeExport = "Export"
TypeGroup = "Group"
TypeGroupInvitationToken = "GroupInvitationToken"
TypeMaintenanceEntry = "MaintenanceEntry"
@@ -8564,6 +8566,934 @@ func (m *EntityTypeMutation) ResetEdge(name string) error {
return fmt.Errorf("unknown EntityType edge %s", name)
}
+// ExportMutation represents an operation that mutates the Export nodes in the graph.
+type ExportMutation struct {
+ config
+ op Op
+ typ string
+ id *uuid.UUID
+ created_at *time.Time
+ updated_at *time.Time
+ kind *export.Kind
+ status *export.Status
+ progress *int
+ addprogress *int
+ artifact_path *string
+ size_bytes *int64
+ addsize_bytes *int64
+ error *string
+ clearedFields map[string]struct{}
+ group *uuid.UUID
+ clearedgroup bool
+ done bool
+ oldValue func(context.Context) (*Export, error)
+ predicates []predicate.Export
+}
+
+var _ ent.Mutation = (*ExportMutation)(nil)
+
+// exportOption allows management of the mutation configuration using functional options.
+type exportOption func(*ExportMutation)
+
+// newExportMutation creates new mutation for the Export entity.
+func newExportMutation(c config, op Op, opts ...exportOption) *ExportMutation {
+ m := &ExportMutation{
+ config: c,
+ op: op,
+ typ: TypeExport,
+ clearedFields: make(map[string]struct{}),
+ }
+ for _, opt := range opts {
+ opt(m)
+ }
+ return m
+}
+
+// withExportID sets the ID field of the mutation.
+func withExportID(id uuid.UUID) exportOption {
+ return func(m *ExportMutation) {
+ var (
+ err error
+ once sync.Once
+ value *Export
+ )
+ m.oldValue = func(ctx context.Context) (*Export, error) {
+ once.Do(func() {
+ if m.done {
+ err = errors.New("querying old values post mutation is not allowed")
+ } else {
+ value, err = m.Client().Export.Get(ctx, id)
+ }
+ })
+ return value, err
+ }
+ m.id = &id
+ }
+}
+
+// withExport sets the old Export of the mutation.
+func withExport(node *Export) exportOption {
+ return func(m *ExportMutation) {
+ m.oldValue = func(context.Context) (*Export, error) {
+ return node, nil
+ }
+ m.id = &node.ID
+ }
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m ExportMutation) Client() *Client {
+ client := &Client{config: m.config}
+ client.init()
+ return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m ExportMutation) Tx() (*Tx, error) {
+ if _, ok := m.driver.(*txDriver); !ok {
+ return nil, errors.New("ent: mutation is not running in a transaction")
+ }
+ tx := &Tx{config: m.config}
+ tx.init()
+ return tx, nil
+}
+
+// SetID sets the value of the id field. Note that this
+// operation is only accepted on creation of Export entities.
+func (m *ExportMutation) SetID(id uuid.UUID) {
+ m.id = &id
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *ExportMutation) ID() (id uuid.UUID, exists bool) {
+ if m.id == nil {
+ return
+ }
+ return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *ExportMutation) IDs(ctx context.Context) ([]uuid.UUID, error) {
+ switch {
+ case m.op.Is(OpUpdateOne | OpDeleteOne):
+ id, exists := m.ID()
+ if exists {
+ return []uuid.UUID{id}, nil
+ }
+ fallthrough
+ case m.op.Is(OpUpdate | OpDelete):
+ return m.Client().Export.Query().Where(m.predicates...).IDs(ctx)
+ default:
+ return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+ }
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *ExportMutation) SetCreatedAt(t time.Time) {
+ m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *ExportMutation) CreatedAt() (r time.Time, exists bool) {
+ v := m.created_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+ }
+ return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *ExportMutation) ResetCreatedAt() {
+ m.created_at = nil
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (m *ExportMutation) SetUpdatedAt(t time.Time) {
+ m.updated_at = &t
+}
+
+// UpdatedAt returns the value of the "updated_at" field in the mutation.
+func (m *ExportMutation) UpdatedAt() (r time.Time, exists bool) {
+ v := m.updated_at
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldUpdatedAt returns the old "updated_at" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
+ }
+ return oldValue.UpdatedAt, nil
+}
+
+// ResetUpdatedAt resets all changes to the "updated_at" field.
+func (m *ExportMutation) ResetUpdatedAt() {
+ m.updated_at = nil
+}
+
+// SetGroupID sets the "group_id" field.
+func (m *ExportMutation) SetGroupID(u uuid.UUID) {
+ m.group = &u
+}
+
+// GroupID returns the value of the "group_id" field in the mutation.
+func (m *ExportMutation) GroupID() (r uuid.UUID, exists bool) {
+ v := m.group
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldGroupID returns the old "group_id" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldGroupID(ctx context.Context) (v uuid.UUID, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldGroupID is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldGroupID requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldGroupID: %w", err)
+ }
+ return oldValue.GroupID, nil
+}
+
+// ResetGroupID resets all changes to the "group_id" field.
+func (m *ExportMutation) ResetGroupID() {
+ m.group = nil
+}
+
+// SetKind sets the "kind" field.
+func (m *ExportMutation) SetKind(e export.Kind) {
+ m.kind = &e
+}
+
+// Kind returns the value of the "kind" field in the mutation.
+func (m *ExportMutation) Kind() (r export.Kind, exists bool) {
+ v := m.kind
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldKind returns the old "kind" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldKind(ctx context.Context) (v export.Kind, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldKind is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldKind requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldKind: %w", err)
+ }
+ return oldValue.Kind, nil
+}
+
+// ResetKind resets all changes to the "kind" field.
+func (m *ExportMutation) ResetKind() {
+ m.kind = nil
+}
+
+// SetStatus sets the "status" field.
+func (m *ExportMutation) SetStatus(e export.Status) {
+ m.status = &e
+}
+
+// Status returns the value of the "status" field in the mutation.
+func (m *ExportMutation) Status() (r export.Status, exists bool) {
+ v := m.status
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldStatus returns the old "status" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldStatus(ctx context.Context) (v export.Status, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldStatus is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldStatus requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldStatus: %w", err)
+ }
+ return oldValue.Status, nil
+}
+
+// ResetStatus resets all changes to the "status" field.
+func (m *ExportMutation) ResetStatus() {
+ m.status = nil
+}
+
+// SetProgress sets the "progress" field.
+func (m *ExportMutation) SetProgress(i int) {
+ m.progress = &i
+ m.addprogress = nil
+}
+
+// Progress returns the value of the "progress" field in the mutation.
+func (m *ExportMutation) Progress() (r int, exists bool) {
+ v := m.progress
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldProgress returns the old "progress" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldProgress(ctx context.Context) (v int, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldProgress is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldProgress requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldProgress: %w", err)
+ }
+ return oldValue.Progress, nil
+}
+
+// AddProgress adds i to the "progress" field.
+func (m *ExportMutation) AddProgress(i int) {
+ if m.addprogress != nil {
+ *m.addprogress += i
+ } else {
+ m.addprogress = &i
+ }
+}
+
+// AddedProgress returns the value that was added to the "progress" field in this mutation.
+func (m *ExportMutation) AddedProgress() (r int, exists bool) {
+ v := m.addprogress
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ResetProgress resets all changes to the "progress" field.
+func (m *ExportMutation) ResetProgress() {
+ m.progress = nil
+ m.addprogress = nil
+}
+
+// SetArtifactPath sets the "artifact_path" field.
+func (m *ExportMutation) SetArtifactPath(s string) {
+ m.artifact_path = &s
+}
+
+// ArtifactPath returns the value of the "artifact_path" field in the mutation.
+func (m *ExportMutation) ArtifactPath() (r string, exists bool) {
+ v := m.artifact_path
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldArtifactPath returns the old "artifact_path" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldArtifactPath(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldArtifactPath is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldArtifactPath requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldArtifactPath: %w", err)
+ }
+ return oldValue.ArtifactPath, nil
+}
+
+// ClearArtifactPath clears the value of the "artifact_path" field.
+func (m *ExportMutation) ClearArtifactPath() {
+ m.artifact_path = nil
+ m.clearedFields[export.FieldArtifactPath] = struct{}{}
+}
+
+// ArtifactPathCleared returns if the "artifact_path" field was cleared in this mutation.
+func (m *ExportMutation) ArtifactPathCleared() bool {
+ _, ok := m.clearedFields[export.FieldArtifactPath]
+ return ok
+}
+
+// ResetArtifactPath resets all changes to the "artifact_path" field.
+func (m *ExportMutation) ResetArtifactPath() {
+ m.artifact_path = nil
+ delete(m.clearedFields, export.FieldArtifactPath)
+}
+
+// SetSizeBytes sets the "size_bytes" field.
+func (m *ExportMutation) SetSizeBytes(i int64) {
+ m.size_bytes = &i
+ m.addsize_bytes = nil
+}
+
+// SizeBytes returns the value of the "size_bytes" field in the mutation.
+func (m *ExportMutation) SizeBytes() (r int64, exists bool) {
+ v := m.size_bytes
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldSizeBytes returns the old "size_bytes" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldSizeBytes(ctx context.Context) (v int64, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldSizeBytes is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldSizeBytes requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldSizeBytes: %w", err)
+ }
+ return oldValue.SizeBytes, nil
+}
+
+// AddSizeBytes adds i to the "size_bytes" field.
+func (m *ExportMutation) AddSizeBytes(i int64) {
+ if m.addsize_bytes != nil {
+ *m.addsize_bytes += i
+ } else {
+ m.addsize_bytes = &i
+ }
+}
+
+// AddedSizeBytes returns the value that was added to the "size_bytes" field in this mutation.
+func (m *ExportMutation) AddedSizeBytes() (r int64, exists bool) {
+ v := m.addsize_bytes
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ResetSizeBytes resets all changes to the "size_bytes" field.
+func (m *ExportMutation) ResetSizeBytes() {
+ m.size_bytes = nil
+ m.addsize_bytes = nil
+}
+
+// SetError sets the "error" field.
+func (m *ExportMutation) SetError(s string) {
+ m.error = &s
+}
+
+// Error returns the value of the "error" field in the mutation.
+func (m *ExportMutation) Error() (r string, exists bool) {
+ v := m.error
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldError returns the old "error" field's value of the Export entity.
+// If the Export object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *ExportMutation) OldError(ctx context.Context) (v string, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldError is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldError requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldError: %w", err)
+ }
+ return oldValue.Error, nil
+}
+
+// ClearError clears the value of the "error" field.
+func (m *ExportMutation) ClearError() {
+ m.error = nil
+ m.clearedFields[export.FieldError] = struct{}{}
+}
+
+// ErrorCleared returns if the "error" field was cleared in this mutation.
+func (m *ExportMutation) ErrorCleared() bool {
+ _, ok := m.clearedFields[export.FieldError]
+ return ok
+}
+
+// ResetError resets all changes to the "error" field.
+func (m *ExportMutation) ResetError() {
+ m.error = nil
+ delete(m.clearedFields, export.FieldError)
+}
+
+// ClearGroup clears the "group" edge to the Group entity.
+func (m *ExportMutation) ClearGroup() {
+ m.clearedgroup = true
+ m.clearedFields[export.FieldGroupID] = struct{}{}
+}
+
+// GroupCleared reports if the "group" edge to the Group entity was cleared.
+func (m *ExportMutation) GroupCleared() bool {
+ return m.clearedgroup
+}
+
+// GroupIDs returns the "group" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// GroupID instead. It exists only for internal usage by the builders.
+func (m *ExportMutation) GroupIDs() (ids []uuid.UUID) {
+ if id := m.group; id != nil {
+ ids = append(ids, *id)
+ }
+ return
+}
+
+// ResetGroup resets all changes to the "group" edge.
+func (m *ExportMutation) ResetGroup() {
+ m.group = nil
+ m.clearedgroup = false
+}
+
+// Where appends a list predicates to the ExportMutation builder.
+func (m *ExportMutation) Where(ps ...predicate.Export) {
+ m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the ExportMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *ExportMutation) WhereP(ps ...func(*sql.Selector)) {
+ p := make([]predicate.Export, len(ps))
+ for i := range ps {
+ p[i] = ps[i]
+ }
+ m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *ExportMutation) Op() Op {
+ return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *ExportMutation) SetOp(op Op) {
+ m.op = op
+}
+
+// Type returns the node type of this mutation (Export).
+func (m *ExportMutation) Type() string {
+ return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *ExportMutation) Fields() []string {
+ fields := make([]string, 0, 9)
+ if m.created_at != nil {
+ fields = append(fields, export.FieldCreatedAt)
+ }
+ if m.updated_at != nil {
+ fields = append(fields, export.FieldUpdatedAt)
+ }
+ if m.group != nil {
+ fields = append(fields, export.FieldGroupID)
+ }
+ if m.kind != nil {
+ fields = append(fields, export.FieldKind)
+ }
+ if m.status != nil {
+ fields = append(fields, export.FieldStatus)
+ }
+ if m.progress != nil {
+ fields = append(fields, export.FieldProgress)
+ }
+ if m.artifact_path != nil {
+ fields = append(fields, export.FieldArtifactPath)
+ }
+ if m.size_bytes != nil {
+ fields = append(fields, export.FieldSizeBytes)
+ }
+ if m.error != nil {
+ fields = append(fields, export.FieldError)
+ }
+ return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *ExportMutation) Field(name string) (ent.Value, bool) {
+ switch name {
+ case export.FieldCreatedAt:
+ return m.CreatedAt()
+ case export.FieldUpdatedAt:
+ return m.UpdatedAt()
+ case export.FieldGroupID:
+ return m.GroupID()
+ case export.FieldKind:
+ return m.Kind()
+ case export.FieldStatus:
+ return m.Status()
+ case export.FieldProgress:
+ return m.Progress()
+ case export.FieldArtifactPath:
+ return m.ArtifactPath()
+ case export.FieldSizeBytes:
+ return m.SizeBytes()
+ case export.FieldError:
+ return m.Error()
+ }
+ return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *ExportMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+ switch name {
+ case export.FieldCreatedAt:
+ return m.OldCreatedAt(ctx)
+ case export.FieldUpdatedAt:
+ return m.OldUpdatedAt(ctx)
+ case export.FieldGroupID:
+ return m.OldGroupID(ctx)
+ case export.FieldKind:
+ return m.OldKind(ctx)
+ case export.FieldStatus:
+ return m.OldStatus(ctx)
+ case export.FieldProgress:
+ return m.OldProgress(ctx)
+ case export.FieldArtifactPath:
+ return m.OldArtifactPath(ctx)
+ case export.FieldSizeBytes:
+ return m.OldSizeBytes(ctx)
+ case export.FieldError:
+ return m.OldError(ctx)
+ }
+ return nil, fmt.Errorf("unknown Export field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *ExportMutation) SetField(name string, value ent.Value) error {
+ switch name {
+ case export.FieldCreatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetCreatedAt(v)
+ return nil
+ case export.FieldUpdatedAt:
+ v, ok := value.(time.Time)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetUpdatedAt(v)
+ return nil
+ case export.FieldGroupID:
+ v, ok := value.(uuid.UUID)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetGroupID(v)
+ return nil
+ case export.FieldKind:
+ v, ok := value.(export.Kind)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetKind(v)
+ return nil
+ case export.FieldStatus:
+ v, ok := value.(export.Status)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetStatus(v)
+ return nil
+ case export.FieldProgress:
+ v, ok := value.(int)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetProgress(v)
+ return nil
+ case export.FieldArtifactPath:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetArtifactPath(v)
+ return nil
+ case export.FieldSizeBytes:
+ v, ok := value.(int64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetSizeBytes(v)
+ return nil
+ case export.FieldError:
+ v, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetError(v)
+ return nil
+ }
+ return fmt.Errorf("unknown Export field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *ExportMutation) AddedFields() []string {
+ var fields []string
+ if m.addprogress != nil {
+ fields = append(fields, export.FieldProgress)
+ }
+ if m.addsize_bytes != nil {
+ fields = append(fields, export.FieldSizeBytes)
+ }
+ return fields
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *ExportMutation) AddedField(name string) (ent.Value, bool) {
+ switch name {
+ case export.FieldProgress:
+ return m.AddedProgress()
+ case export.FieldSizeBytes:
+ return m.AddedSizeBytes()
+ }
+ return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *ExportMutation) AddField(name string, value ent.Value) error {
+ switch name {
+ case export.FieldProgress:
+ v, ok := value.(int)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddProgress(v)
+ return nil
+ case export.FieldSizeBytes:
+ v, ok := value.(int64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddSizeBytes(v)
+ return nil
+ }
+ return fmt.Errorf("unknown Export numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *ExportMutation) ClearedFields() []string {
+ var fields []string
+ if m.FieldCleared(export.FieldArtifactPath) {
+ fields = append(fields, export.FieldArtifactPath)
+ }
+ if m.FieldCleared(export.FieldError) {
+ fields = append(fields, export.FieldError)
+ }
+ return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *ExportMutation) FieldCleared(name string) bool {
+ _, ok := m.clearedFields[name]
+ return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *ExportMutation) ClearField(name string) error {
+ switch name {
+ case export.FieldArtifactPath:
+ m.ClearArtifactPath()
+ return nil
+ case export.FieldError:
+ m.ClearError()
+ return nil
+ }
+ return fmt.Errorf("unknown Export nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *ExportMutation) ResetField(name string) error {
+ switch name {
+ case export.FieldCreatedAt:
+ m.ResetCreatedAt()
+ return nil
+ case export.FieldUpdatedAt:
+ m.ResetUpdatedAt()
+ return nil
+ case export.FieldGroupID:
+ m.ResetGroupID()
+ return nil
+ case export.FieldKind:
+ m.ResetKind()
+ return nil
+ case export.FieldStatus:
+ m.ResetStatus()
+ return nil
+ case export.FieldProgress:
+ m.ResetProgress()
+ return nil
+ case export.FieldArtifactPath:
+ m.ResetArtifactPath()
+ return nil
+ case export.FieldSizeBytes:
+ m.ResetSizeBytes()
+ return nil
+ case export.FieldError:
+ m.ResetError()
+ return nil
+ }
+ return fmt.Errorf("unknown Export field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *ExportMutation) AddedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.group != nil {
+ edges = append(edges, export.EdgeGroup)
+ }
+ return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *ExportMutation) AddedIDs(name string) []ent.Value {
+ switch name {
+ case export.EdgeGroup:
+ if id := m.group; id != nil {
+ return []ent.Value{*id}
+ }
+ }
+ return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *ExportMutation) RemovedEdges() []string {
+ edges := make([]string, 0, 1)
+ return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *ExportMutation) RemovedIDs(name string) []ent.Value {
+ return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *ExportMutation) ClearedEdges() []string {
+ edges := make([]string, 0, 1)
+ if m.clearedgroup {
+ edges = append(edges, export.EdgeGroup)
+ }
+ return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *ExportMutation) EdgeCleared(name string) bool {
+ switch name {
+ case export.EdgeGroup:
+ return m.clearedgroup
+ }
+ return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *ExportMutation) ClearEdge(name string) error {
+ switch name {
+ case export.EdgeGroup:
+ m.ClearGroup()
+ return nil
+ }
+ return fmt.Errorf("unknown Export unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *ExportMutation) ResetEdge(name string) error {
+ switch name {
+ case export.EdgeGroup:
+ m.ResetGroup()
+ return nil
+ }
+ return fmt.Errorf("unknown Export edge %s", name)
+}
+
// GroupMutation represents an operation that mutates the Group nodes in the graph.
type GroupMutation struct {
config
@@ -8596,6 +9526,9 @@ type GroupMutation struct {
entity_templates map[uuid.UUID]struct{}
removedentity_templates map[uuid.UUID]struct{}
clearedentity_templates bool
+ exports map[uuid.UUID]struct{}
+ removedexports map[uuid.UUID]struct{}
+ clearedexports bool
done bool
oldValue func(context.Context) (*Group, error)
predicates []predicate.Group
@@ -9227,6 +10160,60 @@ func (m *GroupMutation) ResetEntityTemplates() {
m.removedentity_templates = nil
}
+// AddExportIDs adds the "exports" edge to the Export entity by ids.
+func (m *GroupMutation) AddExportIDs(ids ...uuid.UUID) {
+ if m.exports == nil {
+ m.exports = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ m.exports[ids[i]] = struct{}{}
+ }
+}
+
+// ClearExports clears the "exports" edge to the Export entity.
+func (m *GroupMutation) ClearExports() {
+ m.clearedexports = true
+}
+
+// ExportsCleared reports if the "exports" edge to the Export entity was cleared.
+func (m *GroupMutation) ExportsCleared() bool {
+ return m.clearedexports
+}
+
+// RemoveExportIDs removes the "exports" edge to the Export entity by IDs.
+func (m *GroupMutation) RemoveExportIDs(ids ...uuid.UUID) {
+ if m.removedexports == nil {
+ m.removedexports = make(map[uuid.UUID]struct{})
+ }
+ for i := range ids {
+ delete(m.exports, ids[i])
+ m.removedexports[ids[i]] = struct{}{}
+ }
+}
+
+// RemovedExports returns the removed IDs of the "exports" edge to the Export entity.
+func (m *GroupMutation) RemovedExportsIDs() (ids []uuid.UUID) {
+ for id := range m.removedexports {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// ExportsIDs returns the "exports" edge IDs in the mutation.
+func (m *GroupMutation) ExportsIDs() (ids []uuid.UUID) {
+ for id := range m.exports {
+ ids = append(ids, id)
+ }
+ return
+}
+
+// ResetExports resets all changes to the "exports" edge.
+func (m *GroupMutation) ResetExports() {
+ m.exports = nil
+ m.clearedexports = false
+ m.removedexports = nil
+}
+
// Where appends a list predicates to the GroupMutation builder.
func (m *GroupMutation) Where(ps ...predicate.Group) {
m.predicates = append(m.predicates, ps...)
@@ -9411,7 +10398,7 @@ func (m *GroupMutation) ResetField(name string) error {
// AddedEdges returns all edge names that were set/added in this mutation.
func (m *GroupMutation) AddedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
if m.users != nil {
edges = append(edges, group.EdgeUsers)
}
@@ -9433,6 +10420,9 @@ func (m *GroupMutation) AddedEdges() []string {
if m.entity_templates != nil {
edges = append(edges, group.EdgeEntityTemplates)
}
+ if m.exports != nil {
+ edges = append(edges, group.EdgeExports)
+ }
return edges
}
@@ -9482,13 +10472,19 @@ func (m *GroupMutation) AddedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case group.EdgeExports:
+ ids := make([]ent.Value, 0, len(m.exports))
+ for id := range m.exports {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// RemovedEdges returns all edge names that were removed in this mutation.
func (m *GroupMutation) RemovedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
if m.removedusers != nil {
edges = append(edges, group.EdgeUsers)
}
@@ -9510,6 +10506,9 @@ func (m *GroupMutation) RemovedEdges() []string {
if m.removedentity_templates != nil {
edges = append(edges, group.EdgeEntityTemplates)
}
+ if m.removedexports != nil {
+ edges = append(edges, group.EdgeExports)
+ }
return edges
}
@@ -9559,13 +10558,19 @@ func (m *GroupMutation) RemovedIDs(name string) []ent.Value {
ids = append(ids, id)
}
return ids
+ case group.EdgeExports:
+ ids := make([]ent.Value, 0, len(m.removedexports))
+ for id := range m.removedexports {
+ ids = append(ids, id)
+ }
+ return ids
}
return nil
}
// ClearedEdges returns all edge names that were cleared in this mutation.
func (m *GroupMutation) ClearedEdges() []string {
- edges := make([]string, 0, 7)
+ edges := make([]string, 0, 8)
if m.clearedusers {
edges = append(edges, group.EdgeUsers)
}
@@ -9587,6 +10592,9 @@ func (m *GroupMutation) ClearedEdges() []string {
if m.clearedentity_templates {
edges = append(edges, group.EdgeEntityTemplates)
}
+ if m.clearedexports {
+ edges = append(edges, group.EdgeExports)
+ }
return edges
}
@@ -9608,6 +10616,8 @@ func (m *GroupMutation) EdgeCleared(name string) bool {
return m.clearednotifiers
case group.EdgeEntityTemplates:
return m.clearedentity_templates
+ case group.EdgeExports:
+ return m.clearedexports
}
return false
}
@@ -9645,6 +10655,9 @@ func (m *GroupMutation) ResetEdge(name string) error {
case group.EdgeEntityTemplates:
m.ResetEntityTemplates()
return nil
+ case group.EdgeExports:
+ m.ResetExports()
+ return nil
}
return fmt.Errorf("unknown Group edge %s", name)
}
diff --git a/backend/internal/data/ent/predicate/predicate.go b/backend/internal/data/ent/predicate/predicate.go
index fd2ac993f..5801da30d 100644
--- a/backend/internal/data/ent/predicate/predicate.go
+++ b/backend/internal/data/ent/predicate/predicate.go
@@ -30,6 +30,9 @@ type EntityTemplate func(*sql.Selector)
// EntityType is the predicate function for entitytype builders.
type EntityType func(*sql.Selector)
+// Export is the predicate function for export builders.
+type Export func(*sql.Selector)
+
// Group is the predicate function for group builders.
type Group func(*sql.Selector)
diff --git a/backend/internal/data/ent/runtime.go b/backend/internal/data/ent/runtime.go
index 454e857a5..416198439 100644
--- a/backend/internal/data/ent/runtime.go
+++ b/backend/internal/data/ent/runtime.go
@@ -13,6 +13,7 @@ import (
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entityfield"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytemplate"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/entitytype"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/group"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/groupinvitationtoken"
"github.com/sysadminsmedia/homebox/backend/internal/data/ent/maintenanceentry"
@@ -425,6 +426,37 @@ func init() {
entitytypeDescID := entitytypeMixinFields0[0].Descriptor()
// entitytype.DefaultID holds the default value on creation for the id field.
entitytype.DefaultID = entitytypeDescID.Default.(func() uuid.UUID)
+ exportMixin := schema.Export{}.Mixin()
+ exportMixinFields0 := exportMixin[0].Fields()
+ _ = exportMixinFields0
+ exportFields := schema.Export{}.Fields()
+ _ = exportFields
+ // exportDescCreatedAt is the schema descriptor for created_at field.
+ exportDescCreatedAt := exportMixinFields0[1].Descriptor()
+ // export.DefaultCreatedAt holds the default value on creation for the created_at field.
+ export.DefaultCreatedAt = exportDescCreatedAt.Default.(func() time.Time)
+ // exportDescUpdatedAt is the schema descriptor for updated_at field.
+ exportDescUpdatedAt := exportMixinFields0[2].Descriptor()
+ // export.DefaultUpdatedAt holds the default value on creation for the updated_at field.
+ export.DefaultUpdatedAt = exportDescUpdatedAt.Default.(func() time.Time)
+ // export.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
+ export.UpdateDefaultUpdatedAt = exportDescUpdatedAt.UpdateDefault.(func() time.Time)
+ // exportDescProgress is the schema descriptor for progress field.
+ exportDescProgress := exportFields[2].Descriptor()
+ // export.DefaultProgress holds the default value on creation for the progress field.
+ export.DefaultProgress = exportDescProgress.Default.(int)
+ // exportDescSizeBytes is the schema descriptor for size_bytes field.
+ exportDescSizeBytes := exportFields[4].Descriptor()
+ // export.DefaultSizeBytes holds the default value on creation for the size_bytes field.
+ export.DefaultSizeBytes = exportDescSizeBytes.Default.(int64)
+ // exportDescError is the schema descriptor for error field.
+ exportDescError := exportFields[5].Descriptor()
+ // export.ErrorValidator is a validator for the "error" field. It is called by the builders before save.
+ export.ErrorValidator = exportDescError.Validators[0].(func(string) error)
+ // exportDescID is the schema descriptor for id field.
+ exportDescID := exportMixinFields0[0].Descriptor()
+ // export.DefaultID holds the default value on creation for the id field.
+ export.DefaultID = exportDescID.Default.(func() uuid.UUID)
groupMixin := schema.Group{}.Mixin()
groupMixinFields0 := groupMixin[0].Fields()
_ = groupMixinFields0
diff --git a/backend/internal/data/ent/schema/export.go b/backend/internal/data/ent/schema/export.go
new file mode 100644
index 000000000..eb46ba1af
--- /dev/null
+++ b/backend/internal/data/ent/schema/export.go
@@ -0,0 +1,62 @@
+package schema
+
+import (
+ "entgo.io/ent"
+ "entgo.io/ent/schema/field"
+ "entgo.io/ent/schema/index"
+
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/schema/mixins"
+)
+
+// Export holds the schema definition for the Export entity. An Export row
+// tracks a collection-archive job: its lifecycle status and, on completion,
+// the blob storage key for the produced zip artifact.
+type Export struct {
+ ent.Schema
+}
+
+func (Export) Mixin() []ent.Mixin {
+ return []ent.Mixin{
+ mixins.BaseMixin{},
+ GroupMixin{
+ ref: "exports",
+ field: "group_id",
+ },
+ }
+}
+
+func (Export) Fields() []ent.Field {
+ return []ent.Field{
+ // kind distinguishes server-produced export artifacts from
+ // user-uploaded import zips. The whole row lifecycle (status,
+ // progress, error) applies identically to both flavors — only the
+ // terminal action differs ("download" vs "restore"). Keeping them
+ // in one table avoids duplicating the entire job-tracking schema.
+ field.Enum("kind").
+ Values("export", "import").
+ Default("export"),
+ field.Enum("status").
+ Values("pending", "running", "completed", "failed").
+ Default("pending"),
+ field.Int("progress").
+ Default(0),
+ // artifact_path is the blob key this row points at: for kind=export
+ // it's the server-produced zip; for kind=import it's the upload
+ // staged at "{gid}/imports/{uuid}.zip" before the worker restores
+ // it.
+ field.String("artifact_path").
+ Optional(),
+ field.Int64("size_bytes").
+ Default(0),
+ field.String("error").
+ MaxLen(1000).
+ Optional(),
+ }
+}
+
+func (Export) Indexes() []ent.Index {
+ return []ent.Index{
+ index.Fields("group_id"),
+ index.Fields("group_id", "status"),
+ }
+}
diff --git a/backend/internal/data/ent/schema/group.go b/backend/internal/data/ent/schema/group.go
index a3b134498..9089bcc3f 100644
--- a/backend/internal/data/ent/schema/group.go
+++ b/backend/internal/data/ent/schema/group.go
@@ -52,6 +52,7 @@ func (Group) Edges() []ent.Edge {
owned("invitation_tokens", GroupInvitationToken.Type),
owned("notifiers", Notifier.Type),
owned("entity_templates", EntityTemplate.Type),
+ owned("exports", Export.Type),
// $scaffold_edge
}
}
diff --git a/backend/internal/data/ent/tx.go b/backend/internal/data/ent/tx.go
index 48063134f..9128dffac 100644
--- a/backend/internal/data/ent/tx.go
+++ b/backend/internal/data/ent/tx.go
@@ -28,6 +28,8 @@ type Tx struct {
EntityTemplate *EntityTemplateClient
// EntityType is the client for interacting with the EntityType builders.
EntityType *EntityTypeClient
+ // Export is the client for interacting with the Export builders.
+ Export *ExportClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// GroupInvitationToken is the client for interacting with the GroupInvitationToken builders.
@@ -185,6 +187,7 @@ func (tx *Tx) init() {
tx.EntityField = NewEntityFieldClient(tx.config)
tx.EntityTemplate = NewEntityTemplateClient(tx.config)
tx.EntityType = NewEntityTypeClient(tx.config)
+ tx.Export = NewExportClient(tx.config)
tx.Group = NewGroupClient(tx.config)
tx.GroupInvitationToken = NewGroupInvitationTokenClient(tx.config)
tx.MaintenanceEntry = NewMaintenanceEntryClient(tx.config)
diff --git a/backend/internal/data/migrations/postgres/20260512130001_add_exports.sql b/backend/internal/data/migrations/postgres/20260512130001_add_exports.sql
new file mode 100644
index 000000000..b2af48b58
--- /dev/null
+++ b/backend/internal/data/migrations/postgres/20260512130001_add_exports.sql
@@ -0,0 +1,23 @@
+-- +goose Up
+-- Create "exports" table
+CREATE TABLE IF NOT EXISTS "exports" (
+ "id" uuid NOT NULL,
+ "created_at" timestamptz NOT NULL,
+ "updated_at" timestamptz NOT NULL,
+ "kind" character varying NOT NULL DEFAULT 'export'
+ CHECK ("kind" IN ('export', 'import')),
+ "status" character varying NOT NULL DEFAULT 'pending'
+ CHECK ("status" IN ('pending', 'running', 'completed', 'failed')),
+ "progress" bigint NOT NULL DEFAULT 0,
+ "artifact_path" character varying NULL,
+ "size_bytes" bigint NOT NULL DEFAULT 0,
+ "error" character varying NULL
+ CHECK ("error" IS NULL OR char_length("error") <= 1000),
+ "group_id" uuid NOT NULL,
+ PRIMARY KEY ("id"),
+ CONSTRAINT "exports_groups_exports" FOREIGN KEY ("group_id") REFERENCES "groups" ("id") ON UPDATE NO ACTION ON DELETE CASCADE
+);
+-- Create index "export_group_id" to table: "exports"
+CREATE INDEX IF NOT EXISTS "export_group_id" ON "exports" ("group_id");
+-- Create index "export_group_id_status" to table: "exports"
+CREATE INDEX IF NOT EXISTS "export_group_id_status" ON "exports" ("group_id", "status");
diff --git a/backend/internal/data/migrations/sqlite3/20260512130000_add_exports.sql b/backend/internal/data/migrations/sqlite3/20260512130000_add_exports.sql
new file mode 100644
index 000000000..1aee8a5d4
--- /dev/null
+++ b/backend/internal/data/migrations/sqlite3/20260512130000_add_exports.sql
@@ -0,0 +1,27 @@
+-- +goose Up
+create table if not exists exports
+(
+ id uuid not null
+ primary key,
+ created_at datetime not null,
+ updated_at datetime not null,
+ kind text default 'export' not null
+ check (kind in ('export', 'import')),
+ status text default 'pending' not null
+ check (status in ('pending', 'running', 'completed', 'failed')),
+ progress integer default 0 not null,
+ artifact_path text,
+ size_bytes integer default 0 not null,
+ error text
+ check (error is null or length(error) <= 1000),
+ group_id uuid not null
+ constraint exports_groups_exports
+ references groups
+ on delete cascade
+);
+
+create index if not exists export_group_id
+ on exports (group_id);
+
+create index if not exists export_group_id_status
+ on exports (group_id, status);
diff --git a/backend/internal/data/repo/repo_entities.go b/backend/internal/data/repo/repo_entities.go
index 64e8feb4b..c92b17775 100644
--- a/backend/internal/data/repo/repo_entities.go
+++ b/backend/internal/data/repo/repo_entities.go
@@ -1703,6 +1703,107 @@ func (r *EntityRepository) GetAllZeroImportRef(ctx context.Context, gid uuid.UUI
return ids, nil
}
+// patchSyncTags reconciles an entity's tag set against want: tags in want
+// but not currently attached are added; currently attached tags absent from
+// want are removed. want must be non-nil — callers omit the call entirely
+// when the patch doesn't touch tags.
+func patchSyncTags(ctx context.Context, tx *ent.Tx, gid, id uuid.UUID, want []uuid.UUID) error {
+ tagsCtx, tagsSpan := entityTracer().Start(ctx, "repo.EntityRepository.Patch.tags",
+ trace.WithAttributes(attribute.Int("tags.input.count", len(want))))
+ defer tagsSpan.End()
+
+ currentTags, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).QueryTag().All(tagsCtx)
+ if err != nil {
+ recordSpanError(tagsSpan, err)
+ return err
+ }
+ set := newIDSet(currentTags)
+
+ addTags := []uuid.UUID{}
+ for _, l := range want {
+ if set.Contains(l) {
+ set.Remove(l)
+ } else {
+ addTags = append(addTags, l)
+ }
+ }
+
+ if len(addTags) > 0 {
+ if err := tx.Entity.Update().
+ Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).
+ AddTagIDs(addTags...).
+ Exec(tagsCtx); err != nil {
+ recordSpanError(tagsSpan, err)
+ return err
+ }
+ }
+ if set.Len() > 0 {
+ if err := tx.Entity.Update().
+ Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).
+ RemoveTagIDs(set.Slice()...).
+ Exec(tagsCtx); err != nil {
+ recordSpanError(tagsSpan, err)
+ return err
+ }
+ }
+ tagsSpan.SetAttributes(
+ attribute.Int("tags.added.count", len(addTags)),
+ attribute.Int("tags.removed.count", set.Len()),
+ )
+ return nil
+}
+
+// patchSyncChildLocations propagates a parent move down to children when the
+// entity has SyncChildEntityLocations enabled. No-op when the flag is off.
+func patchSyncChildLocations(ctx context.Context, tx *ent.Tx, gid, id, parentID uuid.UUID) error {
+ syncCtx, syncSpan := entityTracer().Start(ctx, "repo.EntityRepository.Patch.syncChildLocations")
+ defer syncSpan.End()
+
+ entityEnt, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).Only(syncCtx)
+ if err != nil {
+ recordSpanError(syncSpan, err)
+ return err
+ }
+ syncSpan.SetAttributes(attribute.Bool("entity.sync_child_locations", entityEnt.SyncChildEntityLocations))
+ if !entityEnt.SyncChildEntityLocations {
+ return nil
+ }
+
+ children, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).QueryChildren().All(syncCtx)
+ if err != nil {
+ recordSpanError(syncSpan, err)
+ return err
+ }
+ updatedCount := 0
+ for _, child := range children {
+ childParent, err := child.QueryParent().First(syncCtx)
+ switch {
+ case err == nil:
+ if childParent.ID == parentID {
+ continue
+ }
+ case ent.IsNotFound(err):
+ // Child has no parent yet — treat as "needs the new parent."
+ default:
+ // Any other error (transient DB failure, context cancel, etc.)
+ // must NOT be interpreted as "missing parent → reparent" — that
+ // would silently move rows on a network blip.
+ recordSpanError(syncSpan, err)
+ return err
+ }
+ if err := child.Update().SetParentID(parentID).Exec(syncCtx); err != nil {
+ recordSpanError(syncSpan, err)
+ return err
+ }
+ updatedCount++
+ }
+ syncSpan.SetAttributes(
+ attribute.Int("children.count", len(children)),
+ attribute.Int("children.updated.count", updatedCount),
+ )
+ return nil
+}
+
func (r *EntityRepository) Patch(ctx context.Context, gid, id uuid.UUID, data EntityPatch) error {
ctx, span := entityTracer().Start(ctx, "repo.EntityRepository.Patch",
trace.WithAttributes(
@@ -1787,93 +1888,17 @@ func (r *EntityRepository) Patch(ctx context.Context, gid, id uuid.UUID, data En
execSpan.End()
if data.TagIDs != nil {
- tagsCtx, tagsSpan := entityTracer().Start(ctx, "repo.EntityRepository.Patch.tags",
- trace.WithAttributes(attribute.Int("tags.input.count", len(data.TagIDs))))
- currentTags, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).QueryTag().All(tagsCtx)
- if err != nil {
- recordSpanError(tagsSpan, err)
- tagsSpan.End()
+ if err := patchSyncTags(ctx, tx, gid, id, data.TagIDs); err != nil {
recordSpanError(span, err)
return err
}
- set := newIDSet(currentTags)
-
- addTags := []uuid.UUID{}
- for _, l := range data.TagIDs {
- if set.Contains(l) {
- set.Remove(l)
- } else {
- addTags = append(addTags, l)
- }
- }
-
- if len(addTags) > 0 {
- if err := tx.Entity.Update().
- Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).
- AddTagIDs(addTags...).
- Exec(tagsCtx); err != nil {
- recordSpanError(tagsSpan, err)
- tagsSpan.End()
- recordSpanError(span, err)
- return err
- }
- }
- if set.Len() > 0 {
- if err := tx.Entity.Update().
- Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).
- RemoveTagIDs(set.Slice()...).
- Exec(tagsCtx); err != nil {
- recordSpanError(tagsSpan, err)
- tagsSpan.End()
- recordSpanError(span, err)
- return err
- }
- }
- tagsSpan.SetAttributes(
- attribute.Int("tags.added.count", len(addTags)),
- attribute.Int("tags.removed.count", set.Len()),
- )
- tagsSpan.End()
}
if data.ParentID != uuid.Nil {
- syncCtx, syncSpan := entityTracer().Start(ctx, "repo.EntityRepository.Patch.syncChildLocations")
- entityEnt, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).Only(syncCtx)
- if err != nil {
- recordSpanError(syncSpan, err)
- syncSpan.End()
+ if err := patchSyncChildLocations(ctx, tx, gid, id, data.ParentID); err != nil {
recordSpanError(span, err)
return err
}
- syncSpan.SetAttributes(attribute.Bool("entity.sync_child_locations", entityEnt.SyncChildEntityLocations))
- if entityEnt.SyncChildEntityLocations {
- children, err := tx.Entity.Query().Where(entity.ID(id), entity.HasGroupWith(group.ID(gid))).QueryChildren().All(syncCtx)
- if err != nil {
- recordSpanError(syncSpan, err)
- syncSpan.End()
- recordSpanError(span, err)
- return err
- }
- updatedCount := 0
- for _, child := range children {
- childParent, err := child.QueryParent().First(syncCtx)
- if err != nil || childParent.ID != data.ParentID {
- err = child.Update().SetParentID(data.ParentID).Exec(syncCtx)
- if err != nil {
- recordSpanError(syncSpan, err)
- syncSpan.End()
- recordSpanError(span, err)
- return err
- }
- updatedCount++
- }
- }
- syncSpan.SetAttributes(
- attribute.Int("children.count", len(children)),
- attribute.Int("children.updated.count", updatedCount),
- )
- }
- syncSpan.End()
}
_, commitSpan := entityTracer().Start(ctx, "repo.EntityRepository.Patch.commit")
diff --git a/backend/internal/data/repo/repo_exports.go b/backend/internal/data/repo/repo_exports.go
new file mode 100644
index 000000000..3ffa2a24d
--- /dev/null
+++ b/backend/internal/data/repo/repo_exports.go
@@ -0,0 +1,188 @@
+package repo
+
+import (
+ "context"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/uuid"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent"
+ "github.com/sysadminsmedia/homebox/backend/internal/data/ent/export"
+)
+
+// ExportRepository persists Export job rows. Every method is group-scoped:
+// callers pass the requesting tenant's gid and the repo refuses to act on
+// rows owned by a different group.
+type ExportRepository struct {
+ db *ent.Client
+}
+
+type ExportOut struct {
+ ID uuid.UUID `json:"id"`
+ GroupID uuid.UUID `json:"groupId"`
+ // Kind is "export" for server-produced backup artifacts, "import" for
+ // user-uploaded restore zips. The lifecycle fields below behave the
+ // same for both.
+ Kind string `json:"kind"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+ Status string `json:"status"`
+ Progress int `json:"progress"`
+ ArtifactPath string `json:"artifactPath,omitempty"`
+ SizeBytes int64 `json:"sizeBytes"`
+ Error string `json:"error,omitempty"`
+}
+
+func mapExport(e *ent.Export) ExportOut {
+ return ExportOut{
+ ID: e.ID,
+ GroupID: e.GroupID,
+ Kind: string(e.Kind),
+ CreatedAt: e.CreatedAt,
+ UpdatedAt: e.UpdatedAt,
+ Status: string(e.Status),
+ Progress: e.Progress,
+ ArtifactPath: e.ArtifactPath,
+ SizeBytes: e.SizeBytes,
+ Error: e.Error,
+ }
+}
+
+func (r *ExportRepository) Create(ctx context.Context, gid uuid.UUID) (ExportOut, error) {
+ e, err := r.db.Export.Create().
+ SetGroupID(gid).
+ Save(ctx)
+ if err != nil {
+ return ExportOut{}, err
+ }
+ return mapExport(e), nil
+}
+
+// CreateImport stages a new pending row representing an upload that the
+// worker will restore. The uploadKey points at the blob already written
+// to "{gid}/imports/{uuid}.zip", and sizeBytes is the streamed upload
+// size so the UI can show "X MB queued" before the worker even starts.
+func (r *ExportRepository) CreateImport(ctx context.Context, gid uuid.UUID, uploadKey string, sizeBytes int64) (ExportOut, error) {
+ e, err := r.db.Export.Create().
+ SetGroupID(gid).
+ SetKind(export.KindImport).
+ SetArtifactPath(uploadKey).
+ SetSizeBytes(sizeBytes).
+ Save(ctx)
+ if err != nil {
+ return ExportOut{}, err
+ }
+ return mapExport(e), nil
+}
+
+func (r *ExportRepository) ListByGroup(ctx context.Context, gid uuid.UUID) ([]ExportOut, error) {
+ rows, err := r.db.Export.Query().
+ Where(export.GroupID(gid)).
+ Order(ent.Desc(export.FieldCreatedAt)).
+ All(ctx)
+ if err != nil {
+ return nil, err
+ }
+ out := make([]ExportOut, len(rows))
+ for i, e := range rows {
+ out[i] = mapExport(e)
+ }
+ return out, nil
+}
+
+// Get returns an export iff it exists AND is owned by gid.
+func (r *ExportRepository) Get(ctx context.Context, gid uuid.UUID, id uuid.UUID) (ExportOut, error) {
+ e, err := r.db.Export.Query().
+ Where(export.ID(id), export.GroupID(gid)).
+ Only(ctx)
+ if err != nil {
+ return ExportOut{}, err
+ }
+ return mapExport(e), nil
+}
+
+// SetRunning, SetProgress, SetCompleted, and SetFailed all carry gid so the
+// underlying UPDATE matches only when the row belongs to that group. A
+// mismatched gid yields ent.NotFoundError rather than a silent cross-tenant
+// mutation — matching the package contract documented on ExportRepository.
+func (r *ExportRepository) SetRunning(ctx context.Context, gid, id uuid.UUID) error {
+ return r.db.Export.UpdateOneID(id).
+ Where(export.GroupID(gid)).
+ SetStatus(export.StatusRunning).
+ SetProgress(0).
+ Exec(ctx)
+}
+
+func (r *ExportRepository) SetProgress(ctx context.Context, gid, id uuid.UUID, pct int) error {
+ if pct < 0 {
+ pct = 0
+ } else if pct > 100 {
+ pct = 100
+ }
+ return r.db.Export.UpdateOneID(id).
+ Where(export.GroupID(gid)).
+ SetProgress(pct).
+ Exec(ctx)
+}
+
+func (r *ExportRepository) SetCompleted(ctx context.Context, gid, id uuid.UUID, artifactPath string, sizeBytes int64) error {
+ return r.db.Export.UpdateOneID(id).
+ Where(export.GroupID(gid)).
+ SetStatus(export.StatusCompleted).
+ SetProgress(100).
+ SetArtifactPath(artifactPath).
+ SetSizeBytes(sizeBytes).
+ Exec(ctx)
+}
+
+func (r *ExportRepository) SetFailed(ctx context.Context, gid, id uuid.UUID, errMsg string) error {
+ const maxErrBytes = 1000
+ if len(errMsg) > maxErrBytes {
+ // Cut at the last rune boundary that keeps the total ≤ maxErrBytes.
+ // Plain byte-slicing can split a multibyte rune and the resulting
+ // invalid UTF-8 fails Postgres' UTF8 encoding check on insert,
+ // masking the real failure with a database error.
+ cut := 0
+ for i, r := range errMsg {
+ end := i + utf8.RuneLen(r)
+ if end > maxErrBytes {
+ break
+ }
+ cut = end
+ }
+ errMsg = errMsg[:cut]
+ }
+ return r.db.Export.UpdateOneID(id).
+ Where(export.GroupID(gid)).
+ SetStatus(export.StatusFailed).
+ SetError(errMsg).
+ Exec(ctx)
+}
+
+// Delete removes an export row scoped to gid. Callers must remove the blob
+// artifact separately if one exists.
+func (r *ExportRepository) Delete(ctx context.Context, gid uuid.UUID, id uuid.UUID) (int, error) {
+ return r.db.Export.Delete().
+ Where(export.ID(id), export.GroupID(gid)).
+ Exec(ctx)
+}
+
+// ListOlderThan returns rows older than cutoff so the sweep task can drop
+// each one's blob artifact before removing the DB row. The row carries the
+// only persisted pointer to the blob, so the caller MUST delete the row only
+// after the blob is gone (or confirmed absent) — otherwise a transient bucket
+// outage would orphan the blob with no path to find it again. Not scoped by
+// group on purpose: this is the cleanup task that sweeps every tenant.
+func (r *ExportRepository) ListOlderThan(ctx context.Context, cutoff time.Time) ([]ExportOut, error) {
+ rows, err := r.db.Export.Query().
+ Where(export.CreatedAtLT(cutoff)).
+ All(ctx)
+ if err != nil {
+ return nil, err
+ }
+ out := make([]ExportOut, len(rows))
+ for i, e := range rows {
+ out[i] = mapExport(e)
+ }
+ return out, nil
+}
diff --git a/backend/internal/data/repo/repos_all.go b/backend/internal/data/repo/repos_all.go
index 0c4d367de..31ab59d12 100644
--- a/backend/internal/data/repo/repos_all.go
+++ b/backend/internal/data/repo/repos_all.go
@@ -21,6 +21,7 @@ type AllRepos struct {
Attachments *AttachmentRepo
MaintEntry *MaintenanceEntryRepository
Notifiers *NotifierRepository
+ Exports *ExportRepository
}
func New(db *ent.Client, bus *eventbus.EventBus, storage config.Storage, pubSubConn string, thumbnail config.Thumbnail) *AllRepos {
@@ -38,5 +39,6 @@ func New(db *ent.Client, bus *eventbus.EventBus, storage config.Storage, pubSubC
Attachments: attachments,
MaintEntry: &MaintenanceEntryRepository{db},
Notifiers: NewNotifierRepository(db),
+ Exports: &ExportRepository{db},
}
}
diff --git a/backend/internal/sys/config/conf.go b/backend/internal/sys/config/conf.go
index 7567c5c07..e947ccb4a 100644
--- a/backend/internal/sys/config/conf.go
+++ b/backend/internal/sys/config/conf.go
@@ -87,12 +87,19 @@ type DebugConf struct {
}
type WebConfig struct {
- Port string `yaml:"port" conf:"default:7745"`
- Host string `yaml:"host"`
- MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"`
- ReadTimeout time.Duration `yaml:"read_timeout" conf:"default:10s"`
- WriteTimeout time.Duration `yaml:"write_timeout" conf:"default:10s"`
- IdleTimeout time.Duration `yaml:"idle_timeout" conf:"default:30s"`
+ Port string `yaml:"port" conf:"default:7745"`
+ Host string `yaml:"host"`
+ // MaxUploadSize is the body cap (in MB) applied to ordinary upload
+ // endpoints (attachments, item imports, etc.). Defaults to 10 MB.
+ MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"`
+ // MaxImportSize is the body cap (in MB) for collection-restore uploads
+ // (POST /v1/group/import). Set independently because a full collection
+ // backup including attachments can be much larger than a single asset
+ // upload. Defaults to 1 GB.
+ MaxImportSize int64 `yaml:"max_import_upload" conf:"default:1024"`
+ ReadTimeout time.Duration `yaml:"read_timeout" conf:"default:10s"`
+ WriteTimeout time.Duration `yaml:"write_timeout" conf:"default:10s"`
+ IdleTimeout time.Duration `yaml:"idle_timeout" conf:"default:30s"`
}
type LabelMakerConf struct {
diff --git a/backend/internal/sys/config/conf_redact_test.go b/backend/internal/sys/config/conf_redact_test.go
index 3a4300817..4f9833128 100644
--- a/backend/internal/sys/config/conf_redact_test.go
+++ b/backend/internal/sys/config/conf_redact_test.go
@@ -2,7 +2,6 @@ package config
import (
"encoding/json"
- "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -140,6 +139,6 @@ func Test_Config_FullMarshalRedactsAllSecrets(t *testing.T) {
"bs-secret",
"otel-secret",
} {
- assert.Falsef(t, strings.Contains(string(out), secret), "expected %q to be redacted in output", secret)
+ assert.NotContainsf(t, string(out), secret, "expected %q to be redacted in output", secret)
}
}
diff --git a/backend/internal/web/mid/security.go b/backend/internal/web/mid/security.go
index 0f196f5c4..3a6c8aeab 100644
--- a/backend/internal/web/mid/security.go
+++ b/backend/internal/web/mid/security.go
@@ -2,6 +2,8 @@ package mid
import (
"net/http"
+ "sort"
+ "strings"
)
// SecurityHeaders is a middleware that will set security headers on the response
@@ -34,3 +36,33 @@ func MaxBodySize(maxBytes int64) func(http.Handler) http.Handler {
})
}
}
+
+// MaxBodySizeByPath is like MaxBodySize but picks the cap by URL path
+// prefix. Useful when one route legitimately accepts a much larger body
+// than the rest of the API (e.g., collection imports vs. attachment
+// uploads). The longest matching prefix wins, and the match is segment-
+// aware so "/api/v1/group/import" does not accidentally apply to a
+// sibling like "/api/v1/group/imports". If none match, defaultMB
+// applies. Sizes are in MB.
+func MaxBodySizeByPath(defaultMB int64, overrides map[string]int64) func(http.Handler) http.Handler {
+ prefixes := make([]string, 0, len(overrides))
+ for p := range overrides {
+ prefixes = append(prefixes, p)
+ }
+ sort.Slice(prefixes, func(i, j int) bool {
+ return len(prefixes[i]) > len(prefixes[j])
+ })
+ return func(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ limit := defaultMB
+ for _, prefix := range prefixes {
+ if r.URL.Path == prefix || strings.HasPrefix(r.URL.Path, prefix+"/") {
+ limit = overrides[prefix]
+ break
+ }
+ }
+ r.Body = http.MaxBytesReader(w, r.Body, limit*1024*1024)
+ h.ServeHTTP(w, r)
+ })
+ }
+}
diff --git a/docs/public/api/openapi-3.0.json b/docs/public/api/openapi-3.0.json
index d5a19e3df..a16bb34b1 100644
--- a/docs/public/api/openapi-3.0.json
+++ b/docs/public/api/openapi-3.0.json
@@ -1258,6 +1258,203 @@
}
}
},
+ "/v1/group/exports": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Returns export job rows for the caller's group, newest first.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "List Collection Exports",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/v1.Results-repo_ExportOut"
+ }
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Start a Collection Export",
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get an Export",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Deletes the export row and its blob artifact.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Delete an Export",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}/download": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Download an Export Artifact",
+ "parameters": [
+ {
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/zip": {
+ "schema": {
+ "type": "string",
+ "format": "binary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Import a Collection Zip",
+ "requestBody": {
+ "content": {
+ "multipart/form-data": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "description": "Export zip",
+ "type": "string",
+ "format": "binary"
+ }
+ },
+ "required": [
+ "file"
+ ]
+ }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -3927,6 +4124,80 @@
}
}
},
+ "ent.Export": {
+ "type": "object",
+ "properties": {
+ "artifact_path": {
+ "description": "ArtifactPath holds the value of the \"artifact_path\" field.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "CreatedAt holds the value of the \"created_at\" field.",
+ "type": "string"
+ },
+ "edges": {
+ "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ExportQuery when eager-loading is set.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ent.ExportEdges"
+ }
+ ]
+ },
+ "error": {
+ "description": "Error holds the value of the \"error\" field.",
+ "type": "string"
+ },
+ "group_id": {
+ "description": "GroupID holds the value of the \"group_id\" field.",
+ "type": "string"
+ },
+ "id": {
+ "description": "ID of the ent.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind holds the value of the \"kind\" field.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/export.Kind"
+ }
+ ]
+ },
+ "progress": {
+ "description": "Progress holds the value of the \"progress\" field.",
+ "type": "integer"
+ },
+ "size_bytes": {
+ "description": "SizeBytes holds the value of the \"size_bytes\" field.",
+ "type": "integer"
+ },
+ "status": {
+ "description": "Status holds the value of the \"status\" field.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/export.Status"
+ }
+ ]
+ },
+ "updated_at": {
+ "description": "UpdatedAt holds the value of the \"updated_at\" field.",
+ "type": "string"
+ }
+ }
+ },
+ "ent.ExportEdges": {
+ "type": "object",
+ "properties": {
+ "group": {
+ "description": "Group holds the value of the group edge.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ent.Group"
+ }
+ ]
+ }
+ }
+ },
"ent.Group": {
"type": "object",
"properties": {
@@ -3984,6 +4255,13 @@
"$ref": "#/components/schemas/ent.EntityType"
}
},
+ "exports": {
+ "description": "Exports holds the value of the exports edge.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ent.Export"
+ }
+ },
"invitation_tokens": {
"description": "InvitationTokens holds the value of the invitation_tokens edge.",
"type": "array",
@@ -4571,6 +4849,36 @@
"TypeTime"
]
},
+ "export.Kind": {
+ "type": "string",
+ "enum": [
+ "export",
+ "export",
+ "import"
+ ],
+ "x-enum-varnames": [
+ "DefaultKind",
+ "KindExport",
+ "KindImport"
+ ]
+ },
+ "export.Status": {
+ "type": "string",
+ "enum": [
+ "pending",
+ "pending",
+ "running",
+ "completed",
+ "failed"
+ ],
+ "x-enum-varnames": [
+ "DefaultStatus",
+ "StatusPending",
+ "StatusRunning",
+ "StatusCompleted",
+ "StatusFailed"
+ ]
+ },
"repo.APIKeyCreate": {
"type": "object",
"required": [
@@ -5499,6 +5807,42 @@
}
}
},
+ "repo.ExportOut": {
+ "type": "object",
+ "properties": {
+ "artifactPath": {
+ "type": "string"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is \"export\" for server-produced backup artifacts, \"import\" for\nuser-uploaded restore zips. The lifecycle fields below behave the\nsame for both.",
+ "type": "string"
+ },
+ "progress": {
+ "type": "integer"
+ },
+ "sizeBytes": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"repo.Group": {
"type": "object",
"properties": {
@@ -6403,6 +6747,17 @@
}
}
},
+ "v1.Results-repo_ExportOut": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/repo.ExportOut"
+ }
+ }
+ }
+ },
"v1.TelemetryStatus": {
"type": "object",
"properties": {
diff --git a/docs/public/api/openapi-3.0.yaml b/docs/public/api/openapi-3.0.yaml
index 33db3b0a0..867abc4fa 100644
--- a/docs/public/api/openapi-3.0.yaml
+++ b/docs/public/api/openapi-3.0.yaml
@@ -753,6 +753,126 @@ paths:
responses:
"204":
description: No Content
+ /v1/group/exports:
+ get:
+ security:
+ - Bearer: []
+ description: Returns export job rows for the caller's group, newest first.
+ tags:
+ - Group
+ summary: List Collection Exports
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/v1.Results-repo_ExportOut"
+ post:
+ security:
+ - Bearer: []
+ description: Creates a pending export row and enqueues the build job. Poll the
+ listing endpoint or watch the WebSocket for completion.
+ tags:
+ - Group
+ summary: Start a Collection Export
+ responses:
+ "202":
+ description: Accepted
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
+ "/v1/group/exports/{id}":
+ get:
+ security:
+ - Bearer: []
+ tags:
+ - Group
+ summary: Get an Export
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
+ delete:
+ security:
+ - Bearer: []
+ description: Deletes the export row and its blob artifact.
+ tags:
+ - Group
+ summary: Delete an Export
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "204":
+ description: No Content
+ "/v1/group/exports/{id}/download":
+ get:
+ security:
+ - Bearer: []
+ tags:
+ - Group
+ summary: Download an Export Artifact
+ parameters:
+ - description: Export ID
+ name: id
+ in: path
+ required: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: OK
+ content:
+ application/zip:
+ schema:
+ type: string
+ format: binary
+ /v1/group/import:
+ post:
+ security:
+ - Bearer: []
+ description: Uploads a collection-export zip and enqueues the import job. The
+ destination group must be empty. Returns the tracked import row so
+ clients can poll for progress.
+ tags:
+ - Group
+ summary: Import a Collection Zip
+ requestBody:
+ content:
+ multipart/form-data:
+ schema:
+ type: object
+ properties:
+ file:
+ description: Export zip
+ type: string
+ format: binary
+ required:
+ - file
+ required: true
+ responses:
+ "202":
+ description: Accepted
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/repo.ExportOut"
/v1/groups:
get:
security:
@@ -2410,6 +2530,55 @@ components:
description: Group holds the value of the group edge.
allOf:
- $ref: "#/components/schemas/ent.Group"
+ ent.Export:
+ type: object
+ properties:
+ artifact_path:
+ description: ArtifactPath holds the value of the "artifact_path" field.
+ type: string
+ created_at:
+ description: CreatedAt holds the value of the "created_at" field.
+ type: string
+ edges:
+ description: >-
+ Edges holds the relations/edges for other nodes in the graph.
+
+ The values are being populated by the ExportQuery when eager-loading is set.
+ allOf:
+ - $ref: "#/components/schemas/ent.ExportEdges"
+ error:
+ description: Error holds the value of the "error" field.
+ type: string
+ group_id:
+ description: GroupID holds the value of the "group_id" field.
+ type: string
+ id:
+ description: ID of the ent.
+ type: string
+ kind:
+ description: Kind holds the value of the "kind" field.
+ allOf:
+ - $ref: "#/components/schemas/export.Kind"
+ progress:
+ description: Progress holds the value of the "progress" field.
+ type: integer
+ size_bytes:
+ description: SizeBytes holds the value of the "size_bytes" field.
+ type: integer
+ status:
+ description: Status holds the value of the "status" field.
+ allOf:
+ - $ref: "#/components/schemas/export.Status"
+ updated_at:
+ description: UpdatedAt holds the value of the "updated_at" field.
+ type: string
+ ent.ExportEdges:
+ type: object
+ properties:
+ group:
+ description: Group holds the value of the group edge.
+ allOf:
+ - $ref: "#/components/schemas/ent.Group"
ent.Group:
type: object
properties:
@@ -2453,6 +2622,11 @@ components:
type: array
items:
$ref: "#/components/schemas/ent.EntityType"
+ exports:
+ description: Exports holds the value of the exports edge.
+ type: array
+ items:
+ $ref: "#/components/schemas/ent.Export"
invitation_tokens:
description: InvitationTokens holds the value of the invitation_tokens edge.
type: array
@@ -2862,6 +3036,30 @@ components:
- TypeNumber
- TypeBoolean
- TypeTime
+ export.Kind:
+ type: string
+ enum:
+ - export
+ - export
+ - import
+ x-enum-varnames:
+ - DefaultKind
+ - KindExport
+ - KindImport
+ export.Status:
+ type: string
+ enum:
+ - pending
+ - pending
+ - running
+ - completed
+ - failed
+ x-enum-varnames:
+ - DefaultStatus
+ - StatusPending
+ - StatusRunning
+ - StatusCompleted
+ - StatusFailed
repo.APIKeyCreate:
type: object
required:
@@ -3505,6 +3703,33 @@ components:
type: string
warrantyExpires:
type: string
+ repo.ExportOut:
+ type: object
+ properties:
+ artifactPath:
+ type: string
+ createdAt:
+ type: string
+ error:
+ type: string
+ groupId:
+ type: string
+ id:
+ type: string
+ kind:
+ description: |-
+ Kind is "export" for server-produced backup artifacts, "import" for
+ user-uploaded restore zips. The lifecycle fields below behave the
+ same for both.
+ type: string
+ progress:
+ type: integer
+ sizeBytes:
+ type: integer
+ status:
+ type: string
+ updatedAt:
+ type: string
repo.Group:
type: object
properties:
@@ -4108,6 +4333,13 @@ components:
token:
type: string
minLength: 20
+ v1.Results-repo_ExportOut:
+ type: object
+ properties:
+ items:
+ type: array
+ items:
+ $ref: "#/components/schemas/repo.ExportOut"
v1.TelemetryStatus:
type: object
properties:
diff --git a/docs/public/api/swagger-2.0.json b/docs/public/api/swagger-2.0.json
index 0852613ec..257fa81d1 100644
--- a/docs/public/api/swagger-2.0.json
+++ b/docs/public/api/swagger-2.0.json
@@ -1152,6 +1152,183 @@
}
}
},
+ "/v1/group/exports": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Returns export job rows for the caller's group, newest first.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "List Collection Exports",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1.Results-repo_ExportOut"
+ }
+ }
+ }
+ },
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Creates a pending export row and enqueues the build job. Poll the listing endpoint or watch the WebSocket for completion.",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Start a Collection Export",
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Get an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
+ "delete": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Deletes the export row and its blob artifact.",
+ "tags": [
+ "Group"
+ ],
+ "summary": "Delete an Export",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "204": {
+ "description": "No Content"
+ }
+ }
+ }
+ },
+ "/v1/group/exports/{id}/download": {
+ "get": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "produces": [
+ "application/zip"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Download an Export Artifact",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Export ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "file"
+ }
+ }
+ }
+ }
+ },
+ "/v1/group/import": {
+ "post": {
+ "security": [
+ {
+ "Bearer": []
+ }
+ ],
+ "description": "Uploads a collection-export zip and enqueues the import job. The destination group must be empty. Returns the tracked import row so clients can poll for progress.",
+ "consumes": [
+ "multipart/form-data"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Group"
+ ],
+ "summary": "Import a Collection Zip",
+ "parameters": [
+ {
+ "type": "file",
+ "description": "Export zip",
+ "name": "file",
+ "in": "formData",
+ "required": true
+ }
+ ],
+ "responses": {
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ }
+ },
"/v1/groups": {
"get": {
"security": [
@@ -3705,6 +3882,80 @@
}
}
},
+ "ent.Export": {
+ "type": "object",
+ "properties": {
+ "artifact_path": {
+ "description": "ArtifactPath holds the value of the \"artifact_path\" field.",
+ "type": "string"
+ },
+ "created_at": {
+ "description": "CreatedAt holds the value of the \"created_at\" field.",
+ "type": "string"
+ },
+ "edges": {
+ "description": "Edges holds the relations/edges for other nodes in the graph.\nThe values are being populated by the ExportQuery when eager-loading is set.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.ExportEdges"
+ }
+ ]
+ },
+ "error": {
+ "description": "Error holds the value of the \"error\" field.",
+ "type": "string"
+ },
+ "group_id": {
+ "description": "GroupID holds the value of the \"group_id\" field.",
+ "type": "string"
+ },
+ "id": {
+ "description": "ID of the ent.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind holds the value of the \"kind\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Kind"
+ }
+ ]
+ },
+ "progress": {
+ "description": "Progress holds the value of the \"progress\" field.",
+ "type": "integer"
+ },
+ "size_bytes": {
+ "description": "SizeBytes holds the value of the \"size_bytes\" field.",
+ "type": "integer"
+ },
+ "status": {
+ "description": "Status holds the value of the \"status\" field.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/export.Status"
+ }
+ ]
+ },
+ "updated_at": {
+ "description": "UpdatedAt holds the value of the \"updated_at\" field.",
+ "type": "string"
+ }
+ }
+ },
+ "ent.ExportEdges": {
+ "type": "object",
+ "properties": {
+ "group": {
+ "description": "Group holds the value of the group edge.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ent.Group"
+ }
+ ]
+ }
+ }
+ },
"ent.Group": {
"type": "object",
"properties": {
@@ -3762,6 +4013,13 @@
"$ref": "#/definitions/ent.EntityType"
}
},
+ "exports": {
+ "description": "Exports holds the value of the exports edge.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ent.Export"
+ }
+ },
"invitation_tokens": {
"description": "InvitationTokens holds the value of the invitation_tokens edge.",
"type": "array",
@@ -4349,6 +4607,36 @@
"TypeTime"
]
},
+ "export.Kind": {
+ "type": "string",
+ "enum": [
+ "export",
+ "export",
+ "import"
+ ],
+ "x-enum-varnames": [
+ "DefaultKind",
+ "KindExport",
+ "KindImport"
+ ]
+ },
+ "export.Status": {
+ "type": "string",
+ "enum": [
+ "pending",
+ "pending",
+ "running",
+ "completed",
+ "failed"
+ ],
+ "x-enum-varnames": [
+ "DefaultStatus",
+ "StatusPending",
+ "StatusRunning",
+ "StatusCompleted",
+ "StatusFailed"
+ ]
+ },
"repo.APIKeyCreate": {
"type": "object",
"required": [
@@ -5277,6 +5565,42 @@
}
}
},
+ "repo.ExportOut": {
+ "type": "object",
+ "properties": {
+ "artifactPath": {
+ "type": "string"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "groupId": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is \"export\" for server-produced backup artifacts, \"import\" for\nuser-uploaded restore zips. The lifecycle fields below behave the\nsame for both.",
+ "type": "string"
+ },
+ "progress": {
+ "type": "integer"
+ },
+ "sizeBytes": {
+ "type": "integer"
+ },
+ "status": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"repo.Group": {
"type": "object",
"properties": {
@@ -6181,6 +6505,17 @@
}
}
},
+ "v1.Results-repo_ExportOut": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/repo.ExportOut"
+ }
+ }
+ }
+ },
"v1.TelemetryStatus": {
"type": "object",
"properties": {
diff --git a/docs/public/api/swagger-2.0.yaml b/docs/public/api/swagger-2.0.yaml
index b9e8b30c5..6332c8ec5 100644
--- a/docs/public/api/swagger-2.0.yaml
+++ b/docs/public/api/swagger-2.0.yaml
@@ -490,6 +490,54 @@ definitions:
- $ref: '#/definitions/ent.Group'
description: Group holds the value of the group edge.
type: object
+ ent.Export:
+ properties:
+ artifact_path:
+ description: ArtifactPath holds the value of the "artifact_path" field.
+ type: string
+ created_at:
+ description: CreatedAt holds the value of the "created_at" field.
+ type: string
+ edges:
+ allOf:
+ - $ref: '#/definitions/ent.ExportEdges'
+ description: |-
+ Edges holds the relations/edges for other nodes in the graph.
+ The values are being populated by the ExportQuery when eager-loading is set.
+ error:
+ description: Error holds the value of the "error" field.
+ type: string
+ group_id:
+ description: GroupID holds the value of the "group_id" field.
+ type: string
+ id:
+ description: ID of the ent.
+ type: string
+ kind:
+ allOf:
+ - $ref: '#/definitions/export.Kind'
+ description: Kind holds the value of the "kind" field.
+ progress:
+ description: Progress holds the value of the "progress" field.
+ type: integer
+ size_bytes:
+ description: SizeBytes holds the value of the "size_bytes" field.
+ type: integer
+ status:
+ allOf:
+ - $ref: '#/definitions/export.Status'
+ description: Status holds the value of the "status" field.
+ updated_at:
+ description: UpdatedAt holds the value of the "updated_at" field.
+ type: string
+ type: object
+ ent.ExportEdges:
+ properties:
+ group:
+ allOf:
+ - $ref: '#/definitions/ent.Group'
+ description: Group holds the value of the group edge.
+ type: object
ent.Group:
properties:
created_at:
@@ -531,6 +579,11 @@ definitions:
items:
$ref: '#/definitions/ent.EntityType'
type: array
+ exports:
+ description: Exports holds the value of the exports edge.
+ items:
+ $ref: '#/definitions/ent.Export'
+ type: array
invitation_tokens:
description: InvitationTokens holds the value of the invitation_tokens edge.
items:
@@ -933,6 +986,30 @@ definitions:
- TypeNumber
- TypeBoolean
- TypeTime
+ export.Kind:
+ enum:
+ - export
+ - export
+ - import
+ type: string
+ x-enum-varnames:
+ - DefaultKind
+ - KindExport
+ - KindImport
+ export.Status:
+ enum:
+ - pending
+ - pending
+ - running
+ - completed
+ - failed
+ type: string
+ x-enum-varnames:
+ - DefaultStatus
+ - StatusPending
+ - StatusRunning
+ - StatusCompleted
+ - StatusFailed
repo.APIKeyCreate:
properties:
expiresAt:
@@ -1576,6 +1653,33 @@ definitions:
required:
- name
type: object
+ repo.ExportOut:
+ properties:
+ artifactPath:
+ type: string
+ createdAt:
+ type: string
+ error:
+ type: string
+ groupId:
+ type: string
+ id:
+ type: string
+ kind:
+ description: |-
+ Kind is "export" for server-produced backup artifacts, "import" for
+ user-uploaded restore zips. The lifecycle fields below behave the
+ same for both.
+ type: string
+ progress:
+ type: integer
+ sizeBytes:
+ type: integer
+ status:
+ type: string
+ updatedAt:
+ type: string
+ type: object
repo.Group:
properties:
createdAt:
@@ -2179,6 +2283,13 @@ definitions:
- password
- token
type: object
+ v1.Results-repo_ExportOut:
+ properties:
+ items:
+ items:
+ $ref: '#/definitions/repo.ExportOut'
+ type: array
+ type: object
v1.TelemetryStatus:
properties:
enabled:
@@ -2935,6 +3046,117 @@ paths:
summary: Update Entity Type
tags:
- Entity Types
+ /v1/group/exports:
+ get:
+ description: Returns export job rows for the caller's group, newest first.
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/v1.Results-repo_ExportOut'
+ security:
+ - Bearer: []
+ summary: List Collection Exports
+ tags:
+ - Group
+ post:
+ description: Creates a pending export row and enqueues the build job. Poll the
+ listing endpoint or watch the WebSocket for completion.
+ produces:
+ - application/json
+ responses:
+ "202":
+ description: Accepted
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Start a Collection Export
+ tags:
+ - Group
+ /v1/group/exports/{id}:
+ delete:
+ description: Deletes the export row and its blob artifact.
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ responses:
+ "204":
+ description: No Content
+ security:
+ - Bearer: []
+ summary: Delete an Export
+ tags:
+ - Group
+ get:
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Get an Export
+ tags:
+ - Group
+ /v1/group/exports/{id}/download:
+ get:
+ parameters:
+ - description: Export ID
+ in: path
+ name: id
+ required: true
+ type: string
+ produces:
+ - application/zip
+ responses:
+ "200":
+ description: OK
+ schema:
+ type: file
+ security:
+ - Bearer: []
+ summary: Download an Export Artifact
+ tags:
+ - Group
+ /v1/group/import:
+ post:
+ consumes:
+ - multipart/form-data
+ description: Uploads a collection-export zip and enqueues the import job. The
+ destination group must be empty. Returns the tracked import row so clients
+ can poll for progress.
+ parameters:
+ - description: Export zip
+ in: formData
+ name: file
+ required: true
+ type: file
+ produces:
+ - application/json
+ responses:
+ "202":
+ description: Accepted
+ schema:
+ $ref: '#/definitions/repo.ExportOut'
+ security:
+ - Bearer: []
+ summary: Import a Collection Zip
+ tags:
+ - Group
/v1/groups:
delete:
produces:
diff --git a/docs/src/content/docs/en/quick-start/configure/index.mdx b/docs/src/content/docs/en/quick-start/configure/index.mdx
index 35183356b..071745c0c 100644
--- a/docs/src/content/docs/en/quick-start/configure/index.mdx
+++ b/docs/src/content/docs/en/quick-start/configure/index.mdx
@@ -53,8 +53,8 @@ import {Tabs, TabItem} from "@astrojs/starlight/components";
| HBOX_AUTH_RATE_LIMIT_MAX_BACKOFF | 5m | maximum backoff duration for rate limiting |
| HBOX_DEBUG_ENABLED | false | enable debug mode (exposes pprof + expvar handlers and prints the loaded configuration to stdout with secrets redacted). The debug listener binds to loopback only (127.0.0.1); tunnel via SSH if remote access is needed. |
| HBOX_DEBUG_PORT | 4000 | port to run debug server on (always bound to 127.0.0.1) |
-| HBOX_DEMO | false | enable demo mode, which seeds a `demo@example.com` user and sample inventory on first boot. When `HBOX_MODE=production` the app refuses to start with demo enabled unless `HBOX_DEMO_PASSWORD` is also set (the public `demo/demo` default would otherwise be guessable on a reachable host). |
-| HBOX_DEMO_PASSWORD | | password for the seeded demo user. Required (minimum 12 characters) when `HBOX_DEMO=true` and `HBOX_MODE=production`. In development mode an unset value falls back to the public default `demo`. |
+| HBOX_DEMO | false | enable demo mode, which seeds a `demo@example.com` user and sample inventory on first boot. When `HBOX_MODE=production` the app refuses to start with demo enabled unless `HBOX_DEMO_PASSWORD` is also set (the public `demo/demodemo` default would otherwise be guessable on a reachable host). |
+| HBOX_DEMO_PASSWORD | | password for the seeded demo user. Required (minimum 12 characters) when `HBOX_DEMO=true` and `HBOX_MODE=production`. In development mode an unset value falls back to the public default `demodemo`. |
| HBOX_OIDC_ENABLED | false | enable OpenID Connect (OIDC) authentication |
| HBOX_OIDC_ISSUER_URL | | OIDC provider issuer URL (required when OIDC is enabled) |
| HBOX_OIDC_CLIENT_ID | | OIDC client ID (required when OIDC is enabled) |
diff --git a/frontend/components/Item/Selector.vue b/frontend/components/Item/Selector.vue
index 0ce026bf7..82825d5c2 100644
--- a/frontend/components/Item/Selector.vue
+++ b/frontend/components/Item/Selector.vue
@@ -39,14 +39,22 @@
{{ localizedNoResultsText }}
-
+
-
-
-
- {{ displayValue(item) }}
+
+
+
+
+ {{ displayValue(entry.item) }}
+
@@ -59,7 +67,7 @@
import { computed, ref, watch } from "vue";
import { Check, ChevronsUpDown, X } from "lucide-vue-next";
import fuzzysort from "fuzzysort";
- import { useVModel } from "@vueuse/core";
+ import { unrefElement, useElementSize, useScroll, useVModel } from "@vueuse/core";
import { useI18n } from "vue-i18n";
import { Button } from "~/components/ui/button";
import { Command, CommandEmpty, CommandGroup, CommandInput, CommandItem, CommandList } from "~/components/ui/command";
@@ -86,6 +94,7 @@
excludeItems?: ItemsObject[];
isLoading?: boolean;
triggerSearch?: () => Promise;
+ itemHeight?: number;
}
const emit = defineEmits(["update:modelValue", "update:search"]);
@@ -102,6 +111,7 @@
excludeItems: undefined,
isLoading: false,
triggerSearch: undefined,
+ itemHeight: 32,
});
const id = useId();
@@ -206,4 +216,52 @@
const commandListKey = computed(() => {
return JSON.stringify(filtered.value.map(item => itemKey(item)));
});
+
+ // Virtualize the list so the popover stays responsive with thousands of items.
+ const itemHeightPx = computed(() => props.itemHeight);
+ const OVERSCAN = 5;
+
+ const listRef = ref();
+ const scrollContainer = computed(() => {
+ return (unrefElement(listRef.value) as HTMLElement | null) ?? null;
+ });
+ const { y: scrollTop } = useScroll(scrollContainer);
+ const { height: viewportHeight } = useElementSize(scrollContainer);
+
+ const visibleRange = computed(() => {
+ const total = filtered.value.length;
+ if (total === 0) return { start: 0, end: 0 };
+ const h = viewportHeight.value || 300;
+ const ih = itemHeightPx.value;
+ const start = Math.max(0, Math.floor(scrollTop.value / ih) - OVERSCAN);
+ const visibleCount = Math.ceil(h / ih) + OVERSCAN * 2;
+ const end = Math.min(total, start + visibleCount);
+ return { start, end };
+ });
+
+ const visibleItems = computed(() => {
+ const { start, end } = visibleRange.value;
+ return filtered.value.slice(start, end).map((item, i) => ({
+ item,
+ index: start + i,
+ }));
+ });
+
+ const topPadding = computed(() => visibleRange.value.start * itemHeightPx.value);
+ const bottomPadding = computed(
+ () => Math.max(0, filtered.value.length - visibleRange.value.end) * itemHeightPx.value
+ );
+
+ watch([() => search.value, () => props.items], () => {
+ const el = scrollContainer.value;
+ if (el) el.scrollTop = 0;
+ });
+
+ watch(open, isOpen => {
+ if (!isOpen) return;
+ nextTick(() => {
+ const el = scrollContainer.value;
+ if (el) el.scrollTop = 0;
+ });
+ });
diff --git a/frontend/composables/use-server-events.ts b/frontend/composables/use-server-events.ts
index 779b79b2a..93e79b13f 100644
--- a/frontend/composables/use-server-events.ts
+++ b/frontend/composables/use-server-events.ts
@@ -5,6 +5,8 @@ export enum ServerEvent {
EntityMutation = "entity.mutation",
TagMutation = "tag.mutation",
UserMutation = "user.mutation",
+ ExportMutation = "export.mutation",
+ ImportMutation = "import.mutation",
}
export type EventMessage = {
@@ -54,6 +56,8 @@ function connect(onmessage: (m: EventMessage) => void) {
thorttled.set(ServerEvent.EntityMutation, useThrottleFn(onmessage, 1000));
thorttled.set(ServerEvent.TagMutation, useThrottleFn(onmessage, 1000));
thorttled.set(ServerEvent.UserMutation, useThrottleFn(onmessage, 1000));
+ thorttled.set(ServerEvent.ExportMutation, useThrottleFn(onmessage, 500));
+ thorttled.set(ServerEvent.ImportMutation, useThrottleFn(onmessage, 500));
ws.onmessage = msg => {
const pm = JSON.parse(msg.data);
diff --git a/frontend/lib/api/classes/backups.ts b/frontend/lib/api/classes/backups.ts
new file mode 100644
index 000000000..50888a207
--- /dev/null
+++ b/frontend/lib/api/classes/backups.ts
@@ -0,0 +1,61 @@
+import { BaseAPI, route } from "../base";
+import type { ExportOut, ResultsRepoExportOut } from "../types/data-contracts";
+
+/**
+ * Re-export so consumers only need to import from this module. The shape is
+ * generated from the Go `repo.ExportOut` struct via swagger.
+ */
+export type CollectionExport = ExportOut;
+
+/**
+ * Client for the collection backup/restore endpoints. Always group-scoped:
+ * the server reads the tenant from the auth token and refuses to act on
+ * anything that doesn't belong to it.
+ */
+export class BackupsAPI extends BaseAPI {
+ /** Kick off a new export. Returns the pending job row. */
+ startExport() {
+ return this.http.post({
+ url: route("/group/exports"),
+ });
+ }
+
+ /** List every export job for the current group, newest first. */
+ list() {
+ return this.http.get({
+ url: route("/group/exports"),
+ });
+ }
+
+ /** Fetch a single export job. */
+ get(id: string) {
+ return this.http.get({
+ url: route(`/group/exports/${id}`),
+ });
+ }
+
+ /** Delete a job row and its blob artifact. */
+ delete(id: string) {
+ return this.http.delete({
+ url: route(`/group/exports/${id}`),
+ });
+ }
+
+ /** Returns the URL to download the artifact directly. */
+ downloadURL(id: string) {
+ return route(`/group/exports/${id}/download`);
+ }
+
+ /**
+ * Upload a previously-produced export zip and enqueue an import job. The
+ * destination group must be empty; the server returns 409 otherwise.
+ */
+ importZip(file: File | Blob) {
+ const formData = new FormData();
+ formData.append("file", file);
+ return this.http.post({
+ url: route("/group/import"),
+ data: formData,
+ });
+ }
+}
diff --git a/frontend/lib/api/types/data-contracts.ts b/frontend/lib/api/types/data-contracts.ts
index 35b54fdac..5417d8e30 100644
--- a/frontend/lib/api/types/data-contracts.ts
+++ b/frontend/lib/api/types/data-contracts.ts
@@ -35,6 +35,20 @@ export enum EntityPathType {
EntityPathTypeItem = "item",
}
+export enum ExportStatus {
+ DefaultStatus = "pending",
+ StatusPending = "pending",
+ StatusRunning = "running",
+ StatusCompleted = "completed",
+ StatusFailed = "failed",
+}
+
+export enum ExportKind {
+ DefaultKind = "export",
+ KindExport = "export",
+ KindImport = "import",
+}
+
export enum EntityfieldType {
TypeText = "text",
TypeNumber = "number",
@@ -361,6 +375,39 @@ export interface EntEntityTypeEdges {
group: EntGroup;
}
+export interface EntExport {
+ /** ArtifactPath holds the value of the "artifact_path" field. */
+ artifact_path: string;
+ /** CreatedAt holds the value of the "created_at" field. */
+ created_at: string;
+ /**
+ * Edges holds the relations/edges for other nodes in the graph.
+ * The values are being populated by the ExportQuery when eager-loading is set.
+ */
+ edges: EntExportEdges;
+ /** Error holds the value of the "error" field. */
+ error: string;
+ /** GroupID holds the value of the "group_id" field. */
+ group_id: string;
+ /** ID of the ent. */
+ id: string;
+ /** Kind holds the value of the "kind" field. */
+ kind: ExportKind;
+ /** Progress holds the value of the "progress" field. */
+ progress: number;
+ /** SizeBytes holds the value of the "size_bytes" field. */
+ size_bytes: number;
+ /** Status holds the value of the "status" field. */
+ status: ExportStatus;
+ /** UpdatedAt holds the value of the "updated_at" field. */
+ updated_at: string;
+}
+
+export interface EntExportEdges {
+ /** Group holds the value of the group edge. */
+ group: EntGroup;
+}
+
export interface EntGroup {
/** CreatedAt holds the value of the "created_at" field. */
created_at: string;
@@ -386,6 +433,8 @@ export interface EntGroupEdges {
entity_templates: EntEntityTemplate[];
/** EntityTypes holds the value of the entity_types edge. */
entity_types: EntEntityType[];
+ /** Exports holds the value of the exports edge. */
+ exports: EntExport[];
/** InvitationTokens holds the value of the invitation_tokens edge. */
invitation_tokens: EntGroupInvitationToken[];
/** Notifiers holds the value of the notifiers edge. */
@@ -980,6 +1029,24 @@ export interface EntityUpdate {
warrantyExpires: Date | string;
}
+export interface ExportOut {
+ artifactPath: string;
+ createdAt: Date | string;
+ error: string;
+ groupId: string;
+ id: string;
+ /**
+ * Kind is "export" for server-produced backup artifacts, "import" for
+ * user-uploaded restore zips. The lifecycle fields below behave the
+ * same for both.
+ */
+ kind: string;
+ progress: number;
+ sizeBytes: number;
+ status: string;
+ updatedAt: Date | string;
+}
+
export interface Group {
createdAt: Date | string;
currency: string;
@@ -1333,6 +1400,10 @@ export interface ResetPasswordRequest {
token: string;
}
+export interface ResultsRepoExportOut {
+ items: ExportOut[];
+}
+
export interface TelemetryStatus {
enabled: boolean;
}
diff --git a/frontend/lib/api/user.ts b/frontend/lib/api/user.ts
index c8f4cbd37..e898bb1e4 100644
--- a/frontend/lib/api/user.ts
+++ b/frontend/lib/api/user.ts
@@ -12,6 +12,7 @@ import { NotifiersAPI } from "./classes/notifiers";
import { MaintenanceAPI } from "./classes/maintenance";
import { ProductAPI } from "./classes/product";
import { TemplatesApi } from "./classes/templates";
+import { BackupsAPI } from "./classes/backups";
import type { Requests } from "~~/lib/requests";
export class UserClient extends BaseAPI {
@@ -28,6 +29,7 @@ export class UserClient extends BaseAPI {
reports: ReportsAPI;
notifiers: NotifiersAPI;
products: ProductAPI;
+ backups: BackupsAPI;
/** Backward-compat shim that delegates to the entities (items) API. */
locations: {
@@ -55,6 +57,7 @@ export class UserClient extends BaseAPI {
this.reports = new ReportsAPI(requests);
this.notifiers = new NotifiersAPI(requests);
this.products = new ProductAPI(requests);
+ this.backups = new BackupsAPI(requests);
// Backward-compat shim: api.locations.* delegates to api.items.*
this.locations = {
diff --git a/frontend/locales/en.json b/frontend/locales/en.json
index d2fb0ff41..85435aff3 100644
--- a/frontend/locales/en.json
+++ b/frontend/locales/en.json
@@ -922,6 +922,26 @@
"zero_datetimes_sub": "Resets the time value for all date time fields in your inventory to the beginning of the date. This is to fix a bug that was introduced early on in the development of the site that caused the time value to be stored with the time which caused issues with date fields displaying accurate values. ''See Github Issue #236 for more details.''"
},
"actions_sub": "Apply Actions to your inventory in bulk. These are irreversible actions. ''Be careful.''",
+ "backups": "Backup & Restore",
+ "backups_sub": "Export your entire collection — entities, tags, attachments, and metadata — to a portable zip archive. Restore an archive into a freshly registered, empty collection on this or another server.",
+ "backups_set": {
+ "create": "Create Backup",
+ "create_button": "Start Backup",
+ "create_sub": "Builds a zip with every entity, tag, custom field, attachment, and maintenance record in this collection. The job runs in the background; the artifact will appear in the table below when it's ready.",
+ "delete_confirm": "Delete this backup artifact? This cannot be undone.",
+ "download": "Download",
+ "failed": "Backup failed. Check server logs for details.",
+ "list_empty": "No backups yet.",
+ "restore": "Restore from Backup",
+ "restore_button": "Upload & Restore",
+ "restore_sub": "Upload a backup zip to populate this collection. The collection must contain no items yet — default locations and tags are wiped automatically as part of the restore.",
+ "table": {
+ "actions": "Actions",
+ "created": "Created",
+ "size": "Size",
+ "status": "Status"
+ }
+ },
"demo_mode_error": {
"wipe_inventory": "Inventory, tags, locations and maintenance records cannot be wiped whilst Homebox is in demo mode. Please ensure that you are not in demo mode and try again."
},
@@ -948,6 +968,12 @@
"reports_sub": "Generate different reports for your inventory.",
"toast": {
"asset_success": "{ results } assets have been updated.",
+ "backup_started": "Backup started — it will appear in the table when ready.",
+ "backup_start_failed": "Failed to start backup.",
+ "backup_delete_failed": "Failed to delete backup.",
+ "restore_started": "Restore started — refresh shortly to see imported items.",
+ "restore_failed": "Restore failed.",
+ "restore_requires_empty": "Restore requires a collection with no items. Switch to a freshly registered collection (default locations and tags are fine) and try again.",
"failed_create_missing_thumbnails": "Failed to create missing thumbnails.",
"failed_ensure_ids": "Failed to ensure asset IDs.",
"failed_ensure_import_refs": "Failed to ensure import refs.",
diff --git a/frontend/pages/collection/index/tools.vue b/frontend/pages/collection/index/tools.vue
index f10c1298e..b7117f61d 100644
--- a/frontend/pages/collection/index/tools.vue
+++ b/frontend/pages/collection/index/tools.vue
@@ -50,6 +50,77 @@
+
+
+
+
+ {{ $t("tools.backups") }}
+ {{ $t("tools.backups_sub") }}
+
+
+
+
+ {{ $t("tools.backups_set.create") }}
+ {{ $t("tools.backups_set.create_sub") }}
+ {{ $t("tools.backups_set.create_button") }}
+
+
+
+
+
+ | {{ $t("tools.backups_set.table.created") }} |
+ {{ $t("tools.backups_set.table.status") }} |
+ {{ $t("tools.backups_set.table.size") }} |
+ {{ $t("tools.backups_set.table.actions") }} |
+
+
+
+
+ | {{ formatCreated(b.createdAt) }} |
+
+ {{ b.status }}
+ ({{ b.progress }}%)
+
+ {{ $t("tools.backups_set.failed") }}
+
+ |
+ {{ b.status === "completed" ? formatBytes(b.sizeBytes) : "—" }} |
+
+
+ {{ $t("tools.backups_set.download") }}
+
+
+ |
+
+
+
+
+ {{ $t("tools.backups_set.list_empty") }}
+
+
+
+ {{ $t("tools.backups_set.restore") }}
+ {{ $t("tools.backups_set.restore_sub") }}
+
+
+
+
+
+
+
@@ -110,6 +181,9 @@
import MdiArrowRight from "~icons/mdi/arrow-right";
import MdiDatabase from "~icons/mdi/database";
import MdiAlert from "~icons/mdi/alert";
+ import MdiPackageVariant from "~icons/mdi/package-variant";
+ import { ServerEvent, onServerEvent } from "@/composables/use-server-events";
+ import type { CollectionExport } from "@/lib/api/classes/backups";
import { useDialog } from "~/components/ui/dialog-provider";
import { DialogID } from "~/components/ui/dialog-provider/utils";
import AppImportDialog from "@/components/App/ImportDialog.vue";
@@ -233,6 +307,90 @@
toast.success(t("tools.toast.asset_success", { results: result.data.completed }));
};
+ // ---------------------------------------------------------------------------
+ // Backup & Restore
+ // ---------------------------------------------------------------------------
+
+ const backups = ref([]);
+ const restoreInput = ref(null);
+
+ async function refreshBackups() {
+ const { data, error } = await api.backups.list();
+ if (error || !data) {
+ return;
+ }
+ backups.value = data.items ?? [];
+ }
+
+ // Initial fetch + live refresh on export/import lifecycle events.
+ refreshBackups();
+ onServerEvent(ServerEvent.ExportMutation, refreshBackups);
+ onServerEvent(ServerEvent.ImportMutation, refreshBackups);
+
+ function downloadUrl(id: string): string {
+ return api.backups.downloadURL(id);
+ }
+
+ function formatBytes(n: number): string {
+ if (!n) return "0 B";
+ const units = ["B", "KB", "MB", "GB"];
+ let i = 0;
+ let v = n;
+ while (v >= 1024 && i < units.length - 1) {
+ v /= 1024;
+ i++;
+ }
+ return `${v.toFixed(v >= 10 || i === 0 ? 0 : 1)} ${units[i]}`;
+ }
+
+ function formatCreated(iso: string): string {
+ return new Date(iso).toLocaleString();
+ }
+
+ async function startBackup() {
+ const { error } = await api.backups.startExport();
+ if (error) {
+ toast.error(t("tools.toast.backup_start_failed"));
+ return;
+ }
+ toast.success(t("tools.toast.backup_started"));
+ await refreshBackups();
+ }
+
+ async function deleteBackup(id: string) {
+ const { isCanceled } = await confirm.open(t("tools.backups_set.delete_confirm"));
+ if (isCanceled) {
+ return;
+ }
+ const { error } = await api.backups.delete(id);
+ if (error) {
+ toast.error(t("tools.toast.backup_delete_failed"));
+ return;
+ }
+ await refreshBackups();
+ }
+
+ async function onRestoreFile(e: Event) {
+ const input = e.target as HTMLInputElement;
+ const file = input.files?.[0];
+ // Reset so the user can re-pick the same file later if needed.
+ input.value = "";
+ if (!file) {
+ return;
+ }
+ const { error, status } = await api.backups.importZip(file);
+ if (error) {
+ // 409 = empty-group precondition failed.
+ if (status === 409) {
+ toast.error(t("tools.toast.restore_requires_empty"));
+ } else {
+ toast.error(t("tools.toast.restore_failed"));
+ }
+ return;
+ }
+ toast.success(t("tools.toast.restore_started"));
+ }
+
const wipeInventory = async () => {
if (status.value?.demo) {
await confirm.open(t("tools.demo_mode_error.wipe_inventory"));
diff --git a/frontend/pages/index.vue b/frontend/pages/index.vue
index 9c1c734c4..bb102adde 100644
--- a/frontend/pages/index.vue
+++ b/frontend/pages/index.vue
@@ -62,7 +62,7 @@
if (data.demo) {
username.value = "demo@example.com";
- password.value = "demo";
+ password.value = "demodemo";
}
return data;
});
@@ -70,7 +70,7 @@
whenever(status, status => {
if (status?.demo) {
email.value = "demo@example.com";
- loginPassword.value = "demo";
+ loginPassword.value = "demodemo";
}
// Auto-redirect to OIDC if autoRedirect is enabled, but not if there's an OIDC initialization error
@@ -382,7 +382,7 @@
{{ $t("global.email") }} demo@example.com
- {{ $t("global.password") }} demo
+ {{ $t("global.password") }} demodemo
{
await page.goto("/home");
await expect(page).toHaveURL("/");
await page.fill("input[type='text']", "demo@example.com");
- await page.fill("input[type='password']", "demo");
+ await page.fill("input[type='password']", "demodemo");
await page.click("button[type='submit']");
await expect(page).toHaveURL("/home");
});
diff --git a/frontend/test/e2e/wipe-inventory.browser.spec.ts b/frontend/test/e2e/wipe-inventory.browser.spec.ts
index e4a280b18..5f87f1ac6 100644
--- a/frontend/test/e2e/wipe-inventory.browser.spec.ts
+++ b/frontend/test/e2e/wipe-inventory.browser.spec.ts
@@ -27,7 +27,7 @@ async function mockStatus(page: Page, demo: boolean) {
});
}
-async function login(page: Page, email = "demo@example.com", password = "demo") {
+async function login(page: Page, email = "demo@example.com", password = "demodemo") {
await page.goto("/home");
await expect(page).toHaveURL("/");
await page.fill("input[type='text']", email);