From 30037ff60b0031b5549a9400ef8414824ae2a313 Mon Sep 17 00:00:00 2001 From: qqkzlm <40537739+qqkzlm@users.noreply.github.com> Date: Sun, 10 May 2026 12:01:24 +0800 Subject: [PATCH] feat: import missing backup records from object storage --- Dockerfile | 3 +- backend/Dockerfile | 54 ++- .../internal/handler/admin/backup_handler.go | 18 + .../internal/repository/backup_pg_dumper.go | 16 + .../internal/repository/backup_s3_store.go | 34 ++ backend/internal/server/routes/admin.go | 2 + backend/internal/service/backup_service.go | 319 +++++++++++++++++- .../internal/service/backup_service_test.go | 220 ++++++++++++ deploy/Dockerfile | 21 +- frontend/package.json | 6 + frontend/pnpm-workspace.yaml | 6 + frontend/src/api/admin/backup.ts | 38 +++ frontend/src/i18n/locales/en.ts | 42 ++- frontend/src/i18n/locales/zh.ts | 42 ++- frontend/src/views/admin/BackupView.vue | 159 ++++++++- 15 files changed, 953 insertions(+), 27 deletions(-) create mode 100644 frontend/pnpm-workspace.yaml diff --git a/Dockerfile b/Dockerfile index 7befb46498a..af722291bf3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,10 +21,11 @@ FROM ${NODE_IMAGE} AS frontend-builder WORKDIR /app/frontend # Install pnpm -RUN corepack enable && corepack prepare pnpm@latest --activate +RUN corepack enable && corepack prepare pnpm@9.15.9 --activate # Install dependencies first (better caching) COPY frontend/package.json frontend/pnpm-lock.yaml ./ +COPY frontend/pnpm-workspace.yaml ./ RUN pnpm install --frozen-lockfile # Copy frontend source and build diff --git a/backend/Dockerfile b/backend/Dockerfile index f153d686679..5d3b2ed0d24 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -1,24 +1,54 @@ -FROM golang:1.26.3-alpine +ARG GOLANG_IMAGE=golang:1.26.3-alpine +ARG ALPINE_IMAGE=alpine:3.21 +ARG POSTGRES_IMAGE=postgres:18-alpine +ARG GOPROXY=https://goproxy.cn,direct +ARG GOSUMDB=sum.golang.google.cn + +FROM ${GOLANG_IMAGE} AS builder + +ARG GOPROXY +ARG GOSUMDB + +ENV GOPROXY=${GOPROXY} +ENV GOSUMDB=${GOSUMDB} WORKDIR /app -# 安装必要的工具 -RUN apk add --no-cache git +RUN apk add --no-cache git ca-certificates tzdata -# 复制go.mod和go.sum COPY go.mod go.sum ./ - -# 下载依赖 RUN go mod download -# 复制源代码 COPY . . -# 构建应用 -RUN go build -o main ./cmd/server/ +RUN CGO_ENABLED=0 GOOS=linux go build \ + -ldflags="-s -w" \ + -trimpath \ + -o /app/sub2api \ + ./cmd/server + +FROM ${POSTGRES_IMAGE} AS pg-client + +FROM ${ALPINE_IMAGE} + +RUN apk add --no-cache \ + ca-certificates \ + tzdata \ + libpq \ + zstd-libs \ + lz4-libs \ + krb5-libs \ + libldap \ + libedit \ + && rm -rf /var/cache/apk/* + +COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump +COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql +COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/ +COPY --from=builder /app/sub2api /app/sub2api + +WORKDIR /app -# 暴露端口 EXPOSE 8080 -# 运行应用 -CMD ["./main"] +CMD ["/app/sub2api"] diff --git a/backend/internal/handler/admin/backup_handler.go b/backend/internal/handler/admin/backup_handler.go index 2f528322f37..377a706083e 100644 --- a/backend/internal/handler/admin/backup_handler.go +++ b/backend/internal/handler/admin/backup_handler.go @@ -159,6 +159,24 @@ func (h *BackupHandler) GetDownloadURL(c *gin.Context) { response.Success(c, gin.H{"url": url}) } +func (h *BackupHandler) PreviewImportBackups(c *gin.Context) { + preview, err := h.backupService.PreviewImportBackups(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, preview) +} + +func (h *BackupHandler) ImportMissingBackups(c *gin.Context) { + result, err := h.backupService.ImportMissingBackups(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + // ─── 恢复操作(需要重新输入管理员密码) ─── type RestoreBackupRequest struct { diff --git a/backend/internal/repository/backup_pg_dumper.go b/backend/internal/repository/backup_pg_dumper.go index e9a92ef29d7..3c47153c806 100644 --- a/backend/internal/repository/backup_pg_dumper.go +++ b/backend/internal/repository/backup_pg_dumper.go @@ -20,8 +20,19 @@ func NewPgDumper(cfg *config.Config) service.DBDumper { return &PgDumper{cfg: &cfg.Database} } +func requirePostgresClient(name string) error { + if _, err := exec.LookPath(name); err != nil { + return fmt.Errorf("%s not found in PATH; backup/restore requires PostgreSQL client tools in the runtime image: %w", name, err) + } + return nil +} + // Dump executes pg_dump and returns a streaming reader of the output func (d *PgDumper) Dump(ctx context.Context) (io.ReadCloser, error) { + if err := requirePostgresClient("pg_dump"); err != nil { + return nil, err + } + args := []string{ "-h", d.cfg.Host, "-p", fmt.Sprintf("%d", d.cfg.Port), @@ -56,11 +67,16 @@ func (d *PgDumper) Dump(ctx context.Context) (io.ReadCloser, error) { // Restore executes psql to restore from a streaming reader func (d *PgDumper) Restore(ctx context.Context, data io.Reader) error { + if err := requirePostgresClient("psql"); err != nil { + return err + } + args := []string{ "-h", d.cfg.Host, "-p", fmt.Sprintf("%d", d.cfg.Port), "-U", d.cfg.User, "-d", d.cfg.DBName, + "-v", "ON_ERROR_STOP=1", "--single-transaction", } diff --git a/backend/internal/repository/backup_s3_store.go b/backend/internal/repository/backup_s3_store.go index 5d419f574b6..0f3f0b5d171 100644 --- a/backend/internal/repository/backup_s3_store.go +++ b/backend/internal/repository/backup_s3_store.go @@ -86,6 +86,40 @@ func (s *S3BackupStore) Download(ctx context.Context, key string) (io.ReadCloser return result.Body, nil } +func (s *S3BackupStore) List(ctx context.Context, prefix string) ([]service.BackupObjectInfo, error) { + input := &s3.ListObjectsV2Input{ + Bucket: &s.bucket, + } + if prefix != "" { + input.Prefix = aws.String(prefix) + } + + paginator := s3.NewListObjectsV2Paginator(s.client, input) + objects := make([]service.BackupObjectInfo, 0) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, fmt.Errorf("S3 ListObjectsV2: %w", err) + } + for _, object := range page.Contents { + if object.Key == nil { + continue + } + info := service.BackupObjectInfo{ + Key: aws.ToString(object.Key), + SizeBytes: aws.ToInt64(object.Size), + } + if object.LastModified != nil { + info.LastModified = object.LastModified.Format(time.RFC3339) + } + objects = append(objects, info) + } + } + + return objects, nil +} + func (s *S3BackupStore) Delete(ctx context.Context, key string) error { _, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: &s.bucket, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 6e1059bc829..a36b470d81a 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -485,6 +485,8 @@ func registerBackupRoutes(admin *gin.RouterGroup, h *handler.Handlers) { // 备份操作 backup.POST("", h.Admin.Backup.CreateBackup) backup.GET("", h.Admin.Backup.ListBackups) + backup.GET("/import-preview", h.Admin.Backup.PreviewImportBackups) + backup.POST("/import", h.Admin.Backup.ImportMissingBackups) backup.GET("/:id", h.Admin.Backup.GetBackup) backup.DELETE("/:id", h.Admin.Backup.DeleteBackup) backup.GET("/:id/download-url", h.Admin.Backup.GetDownloadURL) diff --git a/backend/internal/service/backup_service.go b/backend/internal/service/backup_service.go index 2fcf2da89f7..b6172dd078e 100644 --- a/backend/internal/service/backup_service.go +++ b/backend/internal/service/backup_service.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io" + "path" "sort" "strings" "sync" @@ -26,7 +27,8 @@ const ( settingKeyBackupSchedule = "backup_schedule" settingKeyBackupRecords = "backup_records" - maxBackupRecords = 100 + defaultBackupExpireDays = 14 + maxBackupRecords = 0 ) var ( @@ -50,6 +52,7 @@ type DBDumper interface { type BackupObjectStore interface { Upload(ctx context.Context, key string, body io.Reader, contentType string) (sizeBytes int64, err error) Download(ctx context.Context, key string) (io.ReadCloser, error) + List(ctx context.Context, prefix string) ([]BackupObjectInfo, error) Delete(ctx context.Context, key string) error PresignURL(ctx context.Context, key string, expiry time.Duration) (string, error) HeadBucket(ctx context.Context) error @@ -103,6 +106,38 @@ type BackupRecord struct { RestoredAt string `json:"restored_at,omitempty"` } +type BackupObjectInfo struct { + Key string `json:"key"` + SizeBytes int64 `json:"size_bytes"` + LastModified string `json:"last_modified,omitempty"` +} + +type BackupImportPreviewItem struct { + FileName string `json:"file_name"` + S3Key string `json:"s3_key"` + SizeBytes int64 `json:"size_bytes"` + LastModified string `json:"last_modified,omitempty"` + HasRecord bool `json:"has_record"` + RecordID string `json:"record_id,omitempty"` + CanImport bool `json:"can_import"` + Reason string `json:"reason,omitempty"` +} + +type BackupImportPreview struct { + Prefix string `json:"prefix"` + TotalObjects int `json:"total_objects"` + ExistingCount int `json:"existing_count"` + MissingCount int `json:"missing_count"` + ImportableCount int `json:"importable_count"` + Items []BackupImportPreviewItem `json:"items"` +} + +type BackupImportResult struct { + ImportedCount int `json:"imported_count"` + SkippedCount int `json:"skipped_count"` + Items []BackupRecord `json:"items"` +} + // BackupService 数据库备份恢复服务 type BackupService struct { settingRepo SettingRepository @@ -393,7 +428,7 @@ func (s *BackupService) runScheduledBackup() { // 读取定时备份配置中的过期天数 schedule, _ := s.GetSchedule(ctx) - expireDays := 14 // 默认14天过期 + expireDays := defaultBackupExpireDays if schedule != nil && schedule.RetainDays > 0 { expireDays = schedule.RetainDays } @@ -956,6 +991,173 @@ func (s *BackupService) GetBackupDownloadURL(ctx context.Context, backupID strin return url, nil } +func (s *BackupService) PreviewImportBackups(ctx context.Context) (*BackupImportPreview, error) { + s3Cfg, err := s.loadS3Config(ctx) + if err != nil { + return nil, err + } + if s3Cfg == nil || !s3Cfg.IsConfigured() { + return nil, ErrBackupS3NotConfigured + } + + objectStore, err := s.getOrCreateStore(ctx, s3Cfg) + if err != nil { + return nil, fmt.Errorf("init object store: %w", err) + } + + prefix := s.buildS3Prefix(s3Cfg) + objects, err := objectStore.List(ctx, prefix) + if err != nil { + return nil, fmt.Errorf("list backup objects: %w", err) + } + + records, err := s.loadRecords(ctx) + if err != nil { + return nil, err + } + + recordByKey := make(map[string]BackupRecord, len(records)) + for _, record := range records { + if record.S3Key != "" { + recordByKey[record.S3Key] = record + } + } + + preview := &BackupImportPreview{ + Prefix: prefix, + TotalObjects: len(objects), + Items: make([]BackupImportPreviewItem, 0, len(objects)), + } + + for _, object := range objects { + item := s.buildImportPreviewItem(object, recordByKey) + if item.HasRecord { + preview.ExistingCount++ + } else { + preview.MissingCount++ + if item.CanImport { + preview.ImportableCount++ + } + } + preview.Items = append(preview.Items, item) + } + + sort.Slice(preview.Items, func(i, j int) bool { + if preview.Items[i].LastModified == preview.Items[j].LastModified { + return preview.Items[i].S3Key > preview.Items[j].S3Key + } + return preview.Items[i].LastModified > preview.Items[j].LastModified + }) + + return preview, nil +} + +func (s *BackupService) ImportMissingBackups(ctx context.Context) (*BackupImportResult, error) { + s3Cfg, err := s.loadS3Config(ctx) + if err != nil { + return nil, err + } + if s3Cfg == nil || !s3Cfg.IsConfigured() { + return nil, ErrBackupS3NotConfigured + } + + objectStore, err := s.getOrCreateStore(ctx, s3Cfg) + if err != nil { + return nil, fmt.Errorf("init object store: %w", err) + } + + prefix := s.buildS3Prefix(s3Cfg) + objects, err := objectStore.List(ctx, prefix) + if err != nil { + return nil, fmt.Errorf("list backup objects: %w", err) + } + + result := &BackupImportResult{} + + s.recordsMu.Lock() + defer s.recordsMu.Unlock() + + records, err := s.loadRecordsLocked(ctx) + if err != nil { + return nil, err + } + importExpireDays := s.loadImportExpireDays(ctx) + + recordByKey := make(map[string]BackupRecord, len(records)) + recordIndexByKey := make(map[string]int, len(records)) + for i, record := range records { + if record.S3Key != "" { + recordByKey[record.S3Key] = record + recordIndexByKey[record.S3Key] = i + } + } + + recordsChanged := false + + for _, object := range objects { + if existingIndex, exists := recordIndexByKey[object.Key]; exists { + if importExpireDays > 0 && + records[existingIndex].TriggeredBy == "imported" && + strings.TrimSpace(records[existingIndex].ExpiresAt) == "" { + startedAt := backupObjectTimestamp(object.LastModified) + records[existingIndex].ExpiresAt = startedAt.AddDate(0, 0, importExpireDays).Format(time.RFC3339) + recordByKey[object.Key] = records[existingIndex] + recordsChanged = true + } + result.SkippedCount++ + continue + } + + fileName, reason := classifyImportableBackupObject(object) + if reason != "" { + result.SkippedCount++ + continue + } + + startedAt := backupObjectTimestamp(object.LastModified) + expiresAt := "" + if importExpireDays > 0 { + expiresAt = startedAt.AddDate(0, 0, importExpireDays).Format(time.RFC3339) + } + + record := BackupRecord{ + ID: uuid.New().String()[:8], + Status: "completed", + BackupType: "postgres", + FileName: fileName, + S3Key: object.Key, + SizeBytes: object.SizeBytes, + TriggeredBy: "imported", + StartedAt: startedAt.Format(time.RFC3339), + FinishedAt: startedAt.Format(time.RFC3339), + ExpiresAt: expiresAt, + } + + records = append(records, record) + recordByKey[object.Key] = record + result.Items = append(result.Items, record) + result.ImportedCount++ + } + + if len(result.Items) == 0 && !recordsChanged { + return result, nil + } + + records = trimBackupRecords(records) + if err := s.saveRecordsLocked(ctx, records); err != nil { + return nil, err + } + + sort.Slice(result.Items, func(i, j int) bool { + if result.Items[i].StartedAt == result.Items[j].StartedAt { + return result.Items[i].S3Key > result.Items[j].S3Key + } + return result.Items[i].StartedAt > result.Items[j].StartedAt + }) + + return result, nil +} + // ─── 内部方法 ─── func (s *BackupService) loadS3Config(ctx context.Context) (*BackupS3Config, error) { @@ -1002,11 +1204,32 @@ func (s *BackupService) getOrCreateStore(ctx context.Context, cfg *BackupS3Confi } func (s *BackupService) buildS3Key(cfg *BackupS3Config, fileName string) string { - prefix := strings.TrimRight(cfg.Prefix, "/") + return fmt.Sprintf("%s%s/%s", s.buildS3Prefix(cfg), time.Now().Format("2006/01/02"), fileName) +} + +func (s *BackupService) buildS3Prefix(cfg *BackupS3Config) string { + prefix := "backups" + if cfg != nil { + prefix = strings.Trim(cfg.Prefix, "/") + } if prefix == "" { prefix = "backups" } - return fmt.Sprintf("%s/%s/%s", prefix, time.Now().Format("2006/01/02"), fileName) + return prefix + "/" +} + +func (s *BackupService) loadImportExpireDays(ctx context.Context) int { + raw, err := s.settingRepo.GetValue(ctx, settingKeyBackupSchedule) + if err != nil || strings.TrimSpace(raw) == "" { + return defaultBackupExpireDays + } + + var cfg BackupScheduleConfig + if err := json.Unmarshal([]byte(raw), &cfg); err != nil { + return defaultBackupExpireDays + } + + return cfg.RetainDays } // loadRecords 加载备份记录,区分"无数据"和"数据损坏" @@ -1058,10 +1281,7 @@ func (s *BackupService) saveRecord(ctx context.Context, record *BackupRecord) er records = append(records, *record) } - // 限制记录数量 - if len(records) > maxBackupRecords { - records = records[len(records)-maxBackupRecords:] - } + records = trimBackupRecords(records) return s.saveRecordsLocked(ctx, records) } @@ -1088,6 +1308,11 @@ func (s *BackupService) cleanupOldBackups(ctx context.Context, schedule *BackupS var toKeep []BackupRecord for i, r := range records { + if r.TriggeredBy == "imported" { + toKeep = append(toKeep, r) + continue + } + shouldDelete := false // 按保留份数清理 @@ -1135,3 +1360,81 @@ func (s *BackupService) deleteS3Object(ctx context.Context, key string) error { } return objectStore.Delete(ctx, key) } + +func (s *BackupService) buildImportPreviewItem(object BackupObjectInfo, recordByKey map[string]BackupRecord) BackupImportPreviewItem { + item := BackupImportPreviewItem{ + S3Key: object.Key, + SizeBytes: object.SizeBytes, + LastModified: object.LastModified, + } + + if record, exists := recordByKey[object.Key]; exists { + item.HasRecord = true + item.RecordID = record.ID + item.FileName = record.FileName + if item.FileName == "" { + item.FileName = path.Base(object.Key) + } + item.Reason = "already_recorded" + return item + } + + fileName, reason := classifyImportableBackupObject(object) + if reason == "" { + item.FileName = fileName + item.CanImport = true + return item + } + + item.Reason = reason + return item +} + +func classifyImportableBackupObject(object BackupObjectInfo) (string, string) { + key := strings.TrimSpace(object.Key) + if key == "" || strings.HasSuffix(key, "/") { + return "", "not_file_object" + } + + fileName := path.Base(key) + if fileName == "" || fileName == "." || fileName == "/" { + return "", "invalid_file_name" + } + + if !strings.HasSuffix(strings.ToLower(fileName), ".sql.gz") { + return fileName, "unsupported_file_type" + } + + return fileName, "" +} + +func backupObjectTimestamp(lastModified string) time.Time { + if strings.TrimSpace(lastModified) == "" { + return time.Now() + } + + ts, err := time.Parse(time.RFC3339, lastModified) + if err != nil { + return time.Now() + } + return ts +} + +func trimBackupRecords(records []BackupRecord) []BackupRecord { + if maxBackupRecords <= 0 { + return records + } + + if len(records) <= maxBackupRecords { + return records + } + + sort.SliceStable(records, func(i, j int) bool { + if records[i].StartedAt == records[j].StartedAt { + return records[i].ID < records[j].ID + } + return records[i].StartedAt < records[j].StartedAt + }) + + return records[len(records)-maxBackupRecords:] +} diff --git a/backend/internal/service/backup_service_test.go b/backend/internal/service/backup_service_test.go index b308e6d09d8..088baa634fc 100644 --- a/backend/internal/service/backup_service_test.go +++ b/backend/internal/service/backup_service_test.go @@ -188,6 +188,24 @@ func (m *mockObjectStore) Download(_ context.Context, key string) (io.ReadCloser return io.NopCloser(bytes.NewReader(data)), nil } +func (m *mockObjectStore) List(_ context.Context, prefix string) ([]BackupObjectInfo, error) { + m.mu.Lock() + defer m.mu.Unlock() + + items := make([]BackupObjectInfo, 0, len(m.objects)) + for key, data := range m.objects { + if prefix != "" && !strings.HasPrefix(key, prefix) { + continue + } + items = append(items, BackupObjectInfo{ + Key: key, + SizeBytes: int64(len(data)), + LastModified: time.Date(2026, 5, 10, 10, 0, 0, 0, time.UTC).Format(time.RFC3339), + }) + } + return items, nil +} + func (m *mockObjectStore) Delete(_ context.Context, key string) error { m.mu.Lock() delete(m.objects, key) @@ -701,3 +719,205 @@ func TestStartRestore_Async(t *testing.T) { require.NoError(t, err) require.Equal(t, "completed", final.RestoreStatus) } + +func TestStartRestore_AsyncFailure(t *testing.T) { + repo := newMockSettingRepo() + seedS3Config(t, repo) + + dumper := &mockDumper{ + dumpData: []byte("-- PostgreSQL dump\nSELECT 1;\n"), + restErr: fmt.Errorf("psql: relation already exists"), + } + store := newMockObjectStore() + svc := newTestBackupService(repo, dumper, store) + + record, err := svc.CreateBackup(context.Background(), "manual", 14) + require.NoError(t, err) + + restored, err := svc.StartRestore(context.Background(), record.ID) + require.NoError(t, err) + require.Equal(t, "running", restored.RestoreStatus) + + svc.wg.Wait() + + final, err := svc.GetBackupRecord(context.Background(), record.ID) + require.NoError(t, err) + require.Equal(t, "failed", final.RestoreStatus) + require.Contains(t, final.RestoreError, "psql: relation already exists") +} + +func TestBackupService_PreviewImportBackups(t *testing.T) { + repo := newMockSettingRepo() + seedS3Config(t, repo) + + store := newMockObjectStore() + store.objects["backups/2026/05/10/existing.sql.gz"] = []byte("existing") + store.objects["backups/2026/05/10/missing.sql.gz"] = []byte("missing") + store.objects["backups/2026/05/10/readme.txt"] = []byte("skip") + + svc := newTestBackupService(repo, &mockDumper{}, store) + require.NoError(t, svc.saveRecord(context.Background(), &BackupRecord{ + ID: "rec-existing", + Status: "completed", + S3Key: "backups/2026/05/10/existing.sql.gz", + FileName: "existing.sql.gz", + StartedAt: time.Now().Format(time.RFC3339), + })) + + preview, err := svc.PreviewImportBackups(context.Background()) + require.NoError(t, err) + require.Equal(t, "backups/", preview.Prefix) + require.Equal(t, 3, preview.TotalObjects) + require.Equal(t, 1, preview.ExistingCount) + require.Equal(t, 2, preview.MissingCount) + require.Equal(t, 1, preview.ImportableCount) + + byKey := make(map[string]BackupImportPreviewItem, len(preview.Items)) + for _, item := range preview.Items { + byKey[item.S3Key] = item + } + + require.True(t, byKey["backups/2026/05/10/existing.sql.gz"].HasRecord) + require.Equal(t, "rec-existing", byKey["backups/2026/05/10/existing.sql.gz"].RecordID) + require.True(t, byKey["backups/2026/05/10/missing.sql.gz"].CanImport) + require.Equal(t, "unsupported_file_type", byKey["backups/2026/05/10/readme.txt"].Reason) +} + +func TestBackupService_ImportMissingBackups(t *testing.T) { + repo := newMockSettingRepo() + seedS3Config(t, repo) + + store := newMockObjectStore() + store.objects["backups/2026/05/10/existing.sql.gz"] = []byte("existing") + store.objects["backups/2026/05/10/missing.sql.gz"] = []byte("missing") + store.objects["backups/2026/05/10/readme.txt"] = []byte("skip") + + svc := newTestBackupService(repo, &mockDumper{}, store) + _, err := svc.UpdateSchedule(context.Background(), BackupScheduleConfig{ + RetainDays: 7, + }) + require.NoError(t, err) + require.NoError(t, svc.saveRecord(context.Background(), &BackupRecord{ + ID: "rec-existing", + Status: "completed", + BackupType: "postgres", + S3Key: "backups/2026/05/10/existing.sql.gz", + FileName: "existing.sql.gz", + TriggeredBy: "manual", + StartedAt: time.Now().Format(time.RFC3339), + })) + + result, err := svc.ImportMissingBackups(context.Background()) + require.NoError(t, err) + require.Equal(t, 1, result.ImportedCount) + require.Equal(t, 2, result.SkippedCount) + require.Len(t, result.Items, 1) + require.Equal(t, "missing.sql.gz", result.Items[0].FileName) + require.Equal(t, "completed", result.Items[0].Status) + require.Equal(t, "postgres", result.Items[0].BackupType) + require.Equal(t, "imported", result.Items[0].TriggeredBy) + + records, err := svc.ListBackups(context.Background()) + require.NoError(t, err) + require.Len(t, records, 2) + + var imported *BackupRecord + for i := range records { + if records[i].S3Key == "backups/2026/05/10/missing.sql.gz" { + imported = &records[i] + break + } + } + require.NotNil(t, imported) + require.Equal(t, "missing.sql.gz", imported.FileName) + require.Equal(t, "completed", imported.Status) + require.Equal(t, "imported", imported.TriggeredBy) + require.NotEmpty(t, imported.StartedAt) + require.NotEmpty(t, imported.FinishedAt) + require.Equal(t, "2026-05-17T10:00:00Z", imported.ExpiresAt) + + secondRun, err := svc.ImportMissingBackups(context.Background()) + require.NoError(t, err) + require.Equal(t, 0, secondRun.ImportedCount) + require.Equal(t, 3, secondRun.SkippedCount) +} + +func TestBackupService_ImportMissingBackups_BackfillsImportedExpiry(t *testing.T) { + repo := newMockSettingRepo() + seedS3Config(t, repo) + + store := newMockObjectStore() + store.objects["backups/2026/05/10/imported.sql.gz"] = []byte("imported") + + svc := newTestBackupService(repo, &mockDumper{}, store) + _, err := svc.UpdateSchedule(context.Background(), BackupScheduleConfig{ + RetainDays: 7, + }) + require.NoError(t, err) + + require.NoError(t, svc.saveRecord(context.Background(), &BackupRecord{ + ID: "imported-1", + Status: "completed", + BackupType: "postgres", + FileName: "imported.sql.gz", + S3Key: "backups/2026/05/10/imported.sql.gz", + TriggeredBy: "imported", + StartedAt: "2026-05-10T10:00:00Z", + FinishedAt: "2026-05-10T10:00:00Z", + })) + + result, err := svc.ImportMissingBackups(context.Background()) + require.NoError(t, err) + require.Equal(t, 0, result.ImportedCount) + require.Equal(t, 1, result.SkippedCount) + + record, err := svc.GetBackupRecord(context.Background(), "imported-1") + require.NoError(t, err) + require.Equal(t, "2026-05-17T10:00:00Z", record.ExpiresAt) +} + +func TestCleanupOldBackups_KeepsImportedRecords(t *testing.T) { + repo := newMockSettingRepo() + seedS3Config(t, repo) + + store := newMockObjectStore() + store.objects["backups/2026/05/10/imported.sql.gz"] = []byte("imported") + store.objects["backups/2026/05/10/manual.sql.gz"] = []byte("manual") + svc := newTestBackupService(repo, &mockDumper{}, store) + + require.NoError(t, svc.saveRecord(context.Background(), &BackupRecord{ + ID: "imported-1", + Status: "completed", + BackupType: "postgres", + FileName: "imported.sql.gz", + S3Key: "backups/2026/05/10/imported.sql.gz", + TriggeredBy: "imported", + StartedAt: time.Now().Add(-30 * 24 * time.Hour).Format(time.RFC3339), + })) + require.NoError(t, svc.saveRecord(context.Background(), &BackupRecord{ + ID: "manual-1", + Status: "completed", + BackupType: "postgres", + FileName: "manual.sql.gz", + S3Key: "backups/2026/05/10/manual.sql.gz", + TriggeredBy: "manual", + StartedAt: time.Now().Add(-30 * 24 * time.Hour).Format(time.RFC3339), + })) + + err := svc.cleanupOldBackups(context.Background(), &BackupScheduleConfig{ + RetainDays: 7, + }) + require.NoError(t, err) + + records, err := svc.ListBackups(context.Background()) + require.NoError(t, err) + require.Len(t, records, 1) + require.Equal(t, "imported-1", records[0].ID) + + store.mu.Lock() + defer store.mu.Unlock() + _, importedExists := store.objects["backups/2026/05/10/imported.sql.gz"] + _, manualExists := store.objects["backups/2026/05/10/manual.sql.gz"] + require.True(t, importedExists) + require.False(t, manualExists) +} diff --git a/deploy/Dockerfile b/deploy/Dockerfile index a947158f190..62540747e7a 100644 --- a/deploy/Dockerfile +++ b/deploy/Dockerfile @@ -9,6 +9,7 @@ ARG NODE_IMAGE=node:24-alpine ARG GOLANG_IMAGE=golang:1.26.3-alpine ARG ALPINE_IMAGE=alpine:3.20 +ARG POSTGRES_IMAGE=postgres:18-alpine ARG GOPROXY=https://goproxy.cn,direct ARG GOSUMDB=sum.golang.google.cn @@ -20,10 +21,11 @@ FROM ${NODE_IMAGE} AS frontend-builder WORKDIR /app/frontend # Install pnpm -RUN corepack enable && corepack prepare pnpm@latest --activate +RUN corepack enable && corepack prepare pnpm@9.15.9 --activate # Install dependencies first (better caching) COPY frontend/package.json frontend/pnpm-lock.yaml ./ +COPY frontend/pnpm-workspace.yaml ./ RUN pnpm install --frozen-lockfile # Copy frontend source and build @@ -68,7 +70,12 @@ RUN CGO_ENABLED=0 GOOS=linux go build \ ./cmd/server # ----------------------------------------------------------------------------- -# Stage 3: Final Runtime Image +# Stage 3: PostgreSQL Client +# ----------------------------------------------------------------------------- +FROM ${POSTGRES_IMAGE} AS pg-client + +# ----------------------------------------------------------------------------- +# Stage 4: Final Runtime Image # ----------------------------------------------------------------------------- FROM ${ALPINE_IMAGE} @@ -83,8 +90,18 @@ RUN apk add --no-cache \ tzdata \ curl \ su-exec \ + libpq \ + zstd-libs \ + lz4-libs \ + krb5-libs \ + libldap \ + libedit \ && rm -rf /var/cache/apk/* +COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump +COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql +COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/ + # Create non-root user RUN addgroup -g 1000 sub2api && \ adduser -u 1000 -G sub2api -s /bin/sh -D sub2api diff --git a/frontend/package.json b/frontend/package.json index d33026f9296..c8f7ce9e781 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -56,5 +56,11 @@ "vite-plugin-checker": "^0.9.1", "vitest": "^2.1.9", "vue-tsc": "^2.2.0" + }, + "pnpm": { + "onlyBuiltDependencies": [ + "esbuild", + "vue-demi" + ] } } diff --git a/frontend/pnpm-workspace.yaml b/frontend/pnpm-workspace.yaml new file mode 100644 index 00000000000..6ac93c9f1f8 --- /dev/null +++ b/frontend/pnpm-workspace.yaml @@ -0,0 +1,6 @@ +packages: + - . + +onlyBuiltDependencies: + - esbuild + - vue-demi diff --git a/frontend/src/api/admin/backup.ts b/frontend/src/api/admin/backup.ts index bccb1f8023e..b7d03ebb868 100644 --- a/frontend/src/api/admin/backup.ts +++ b/frontend/src/api/admin/backup.ts @@ -35,6 +35,32 @@ export interface BackupRecord { restored_at?: string } +export interface BackupImportPreviewItem { + file_name: string + s3_key: string + size_bytes: number + last_modified?: string + has_record: boolean + record_id?: string + can_import: boolean + reason?: string +} + +export interface BackupImportPreview { + prefix: string + total_objects: number + existing_count: number + missing_count: number + importable_count: number + items: BackupImportPreviewItem[] +} + +export interface BackupImportResult { + imported_count: number + skipped_count: number + items: BackupRecord[] +} + export interface CreateBackupRequest { expire_days?: number } @@ -96,6 +122,16 @@ export async function getDownloadURL(id: string): Promise<{ url: string }> { return data } +export async function previewImportBackups(): Promise { + const { data } = await apiClient.get('/admin/backups/import-preview') + return data +} + +export async function importMissingBackups(): Promise { + const { data } = await apiClient.post('/admin/backups/import', {}) + return data +} + // Restore export async function restoreBackup(id: string, password: string): Promise { const { data } = await apiClient.post(`/admin/backups/${id}/restore`, { password }) @@ -113,6 +149,8 @@ export const backupAPI = { getBackup, deleteBackup, getDownloadURL, + previewImportBackups, + importMissingBackups, restoreBackup, } diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index d18a895c007..a4f2134902d 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1430,10 +1430,50 @@ export default { }, trigger: { manual: 'Manual', - scheduled: 'Scheduled' + scheduled: 'Scheduled', + imported: 'Imported' }, neverExpire: 'Never', empty: 'No backup records', + importRecords: { + title: 'Import Backup Records From Object Storage', + description: 'Scan the configured official backup prefix, compare it with existing backup_records, and import missing records after preview.', + scan: 'Scan Object Storage', + scanning: 'Scanning...', + importMissing: 'Import Missing Records', + importing: 'Importing...', + prefixLabel: 'Scan Prefix:', + nothingToImport: 'No missing backup records can be imported', + importConfirm: 'Import {count} missing backup records now?', + importSuccess: 'Imported {count} backup records successfully', + empty: 'No objects found under the current prefix', + summary: { + totalObjects: 'Total Objects', + existing: 'Recorded', + missing: 'Missing', + importable: 'Importable' + }, + columns: { + status: 'Status', + fileName: 'File Name', + s3Key: 'Object Key', + size: 'Size', + lastModified: 'Last Modified', + recordId: 'Recorded ID', + reason: 'Reason' + }, + status: { + recorded: 'Recorded', + missing: 'Importable', + skipped: 'Skipped' + }, + reasons: { + already_recorded: 'This object already has an official record', + not_file_object: 'This object is not a file', + invalid_file_name: 'Invalid file name', + unsupported_file_type: 'Not an official PostgreSQL backup file (.sql.gz required)' + } + }, actions: { download: 'Download', restore: 'Restore', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 4f473f946b7..888e8bf38ed 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1451,10 +1451,50 @@ export default { }, trigger: { manual: '手动', - scheduled: '定时' + scheduled: '定时', + imported: '补录' }, neverExpire: '永不过期', empty: '暂无备份记录', + importRecords: { + title: '对象存储备份记录补录', + description: '扫描当前官方备份前缀,对比已有 backup_records,预览并补录缺失记录。', + scan: '扫描对象存储', + scanning: '扫描中...', + importMissing: '导入缺失记录', + importing: '导入中...', + prefixLabel: '扫描前缀:', + nothingToImport: '没有可导入的缺失备份记录', + importConfirm: '确认导入 {count} 条缺失备份记录吗?', + importSuccess: '成功补录 {count} 条备份记录', + empty: '当前前缀下没有可预览的对象', + summary: { + totalObjects: '对象总数', + existing: '已有记录', + missing: '缺失记录', + importable: '可导入' + }, + columns: { + status: '状态', + fileName: '文件名', + s3Key: '对象 Key', + size: '大小', + lastModified: '最后修改时间', + recordId: '已有记录 ID', + reason: '说明' + }, + status: { + recorded: '已有记录', + missing: '可补录', + skipped: '跳过' + }, + reasons: { + already_recorded: '该对象已有官方记录', + not_file_object: '该对象不是文件', + invalid_file_name: '文件名无效', + unsupported_file_type: '不是官方 PostgreSQL 备份文件(需为 .sql.gz)' + } + }, actions: { download: '下载', restore: '恢复', diff --git a/frontend/src/views/admin/BackupView.vue b/frontend/src/views/admin/BackupView.vue index 903dfe321ec..45d742c3a6a 100644 --- a/frontend/src/views/admin/BackupView.vue +++ b/frontend/src/views/admin/BackupView.vue @@ -117,6 +117,96 @@ +
+
+
+

+ {{ t('admin.backup.importRecords.title') }} +

+

+ {{ t('admin.backup.importRecords.description') }} +

+
+
+ + +
+
+ +
+

+ {{ t('admin.backup.importRecords.prefixLabel') }} {{ importPreview.prefix }} +

+ +
+
+
{{ t('admin.backup.importRecords.summary.totalObjects') }}
+
{{ importPreview.total_objects }}
+
+
+
{{ t('admin.backup.importRecords.summary.existing') }}
+
{{ importPreview.existing_count }}
+
+
+
{{ t('admin.backup.importRecords.summary.missing') }}
+
{{ importPreview.missing_count }}
+
+
+
{{ t('admin.backup.importRecords.summary.importable') }}
+
{{ importPreview.importable_count }}
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
{{ t('admin.backup.importRecords.columns.status') }}{{ t('admin.backup.importRecords.columns.fileName') }}{{ t('admin.backup.importRecords.columns.s3Key') }}{{ t('admin.backup.importRecords.columns.size') }}{{ t('admin.backup.importRecords.columns.lastModified') }}{{ t('admin.backup.importRecords.columns.recordId') }}{{ t('admin.backup.importRecords.columns.reason') }}
+ + {{ importPreviewStatusLabel(item) }} + + {{ item.file_name || '-' }}{{ item.s3_key }}{{ formatSize(item.size_bytes) }}{{ formatDate(item.last_modified) }}{{ item.record_id || '-' }} + {{ item.reason ? t(`admin.backup.importRecords.reasons.${item.reason}`) : '-' }} +
+ {{ t('admin.backup.importRecords.empty') }} +
+
+
+
+
@@ -150,7 +240,7 @@ {{ record.expires_at ? formatDate(record.expires_at) : t('admin.backup.neverExpire') }}
- {{ record.triggered_by === 'scheduled' ? t('admin.backup.trigger.scheduled') : t('admin.backup.trigger.manual') }} + {{ formatTriggeredBy(record.triggered_by) }} {{ formatDate(record.started_at) }} @@ -283,7 +373,13 @@ import { computed, onBeforeUnmount, onMounted, ref } from 'vue' import { useI18n } from 'vue-i18n' import { adminAPI } from '@/api' import { useAppStore } from '@/stores' -import type { BackupS3Config, BackupScheduleConfig, BackupRecord } from '@/api/admin/backup' +import type { + BackupImportPreview, + BackupImportPreviewItem, + BackupRecord, + BackupS3Config, + BackupScheduleConfig, +} from '@/api/admin/backup' const { t } = useI18n() const appStore = useAppStore() @@ -317,6 +413,9 @@ const loadingBackups = ref(false) const creatingBackup = ref(false) const restoringId = ref('') const manualExpireDays = ref(14) +const importPreview = ref(null) +const loadingImportPreview = ref(false) +const importingMissingBackups = ref(false) // Polling const pollingTimer = ref | null>(null) @@ -520,6 +619,17 @@ async function loadBackups() { } } +async function scanImportPreview() { + loadingImportPreview.value = true + try { + importPreview.value = await adminAPI.backup.previewImportBackups() + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + loadingImportPreview.value = false + } +} + async function createBackup() { creatingBackup.value = true try { @@ -537,6 +647,29 @@ async function createBackup() { } } +async function importMissingBackupRecords() { + if (!importPreview.value || importPreview.value.importable_count <= 0) { + appStore.showWarning(t('admin.backup.importRecords.nothingToImport')) + return + } + if (!window.confirm(t('admin.backup.importRecords.importConfirm', { count: importPreview.value.importable_count }))) return + + importingMissingBackups.value = true + try { + const result = await adminAPI.backup.importMissingBackups() + if (result.imported_count > 0) { + appStore.showSuccess(t('admin.backup.importRecords.importSuccess', { count: result.imported_count })) + } else { + appStore.showWarning(t('admin.backup.importRecords.nothingToImport')) + } + await Promise.all([loadBackups(), scanImportPreview()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + importingMissingBackups.value = false + } +} + async function downloadBackup(id: string) { try { const result = await adminAPI.backup.getDownloadURL(id) @@ -576,6 +709,22 @@ async function removeBackup(id: string) { } } +function importPreviewStatusLabel(item: BackupImportPreviewItem): string { + if (item.has_record) return t('admin.backup.importRecords.status.recorded') + if (item.can_import) return t('admin.backup.importRecords.status.missing') + return t('admin.backup.importRecords.status.skipped') +} + +function importPreviewStatusClass(item: BackupImportPreviewItem): string { + if (item.has_record) { + return 'bg-green-100 text-green-700 dark:bg-green-900/30 dark:text-green-300' + } + if (item.can_import) { + return 'bg-amber-100 text-amber-700 dark:bg-amber-900/30 dark:text-amber-300' + } + return 'bg-gray-100 text-gray-700 dark:bg-dark-800 dark:text-gray-300' +} + function statusClass(status: string): string { switch (status) { case 'completed': @@ -603,6 +752,12 @@ function formatDate(value?: string): string { return date.toLocaleString() } +function formatTriggeredBy(triggeredBy: string): string { + if (triggeredBy === 'scheduled') return t('admin.backup.trigger.scheduled') + if (triggeredBy === 'imported') return t('admin.backup.trigger.imported') + return t('admin.backup.trigger.manual') +} + onMounted(async () => { document.addEventListener('visibilitychange', handleVisibilityChange) await Promise.all([loadS3Config(), loadSchedule(), loadBackups()])