From d840597cb890920fb265e935dfebf7e8f7c5b279 Mon Sep 17 00:00:00 2001 From: sirir Date: Thu, 20 Mar 2025 22:14:45 +0100 Subject: [PATCH] working --- .env.example | 12 + .gitignore | 10 + Dockerfile | 47 + cmd/backup_performer/main.go | 37 + cmd/list-backups/main.go | 98 ++ cmd/list-buckets/main.go | 43 + cmd/notify/main.go | 34 + cmd/restore/main.go | 51 + cmd/server/main.go | 26 + compose.yaml | 34 + config.yml | 58 + go.mod | 46 + go.sum | 103 ++ internal/backup/backup.go | 131 ++ internal/backup/backup_factory.go | 187 +++ internal/backup/backup_kopia.go | 767 ++++++++++ internal/backup/backup_rsync.go | 51 + internal/backup/config.go | 112 ++ internal/backup/kopia_providers.go | 277 ++++ internal/backup/restore.go | 179 +++ internal/backup/service.go | 71 + internal/backup/strategy.go | 57 + internal/client/b2_client.go | 91 ++ internal/mail/mail.go | 104 ++ internal/mail/mail_test.go | 39 + internal/server/server.go | 26 + .../web/handlers/backup_actions_handler.go | 377 +++++ internal/web/handlers/homepage_handler.go | 317 +++++ internal/web/routes/routes.go | 34 + templates/home.templ | 505 +++++++ templates/home_templ.go | 1231 +++++++++++++++++ templates/layouts/base.templ | 632 +++++++++ templates/layouts/base_templ.go | 62 + templates/notifications.templ | 39 + templates/notifications_templ.go | 213 +++ 35 files changed, 6101 insertions(+) create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 cmd/backup_performer/main.go create mode 100644 cmd/list-backups/main.go create mode 100644 cmd/list-buckets/main.go create mode 100644 cmd/notify/main.go create mode 100644 cmd/restore/main.go create mode 100644 cmd/server/main.go create mode 100644 compose.yaml create mode 100644 config.yml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/backup/backup.go create mode 100644 internal/backup/backup_factory.go create mode 100644 internal/backup/backup_kopia.go create mode 100644 internal/backup/backup_rsync.go create mode 100644 internal/backup/config.go create mode 100644 internal/backup/kopia_providers.go create mode 100644 internal/backup/restore.go create mode 100644 internal/backup/service.go create mode 100644 internal/backup/strategy.go create mode 100644 internal/client/b2_client.go create mode 100644 internal/mail/mail.go create mode 100644 internal/mail/mail_test.go create mode 100644 internal/server/server.go create mode 100644 internal/web/handlers/backup_actions_handler.go create mode 100644 internal/web/handlers/homepage_handler.go create mode 100644 internal/web/routes/routes.go create mode 100644 templates/home.templ create mode 100644 templates/home_templ.go create mode 100644 templates/layouts/base.templ create mode 100644 templates/layouts/base_templ.go create mode 100644 templates/notifications.templ create mode 100644 templates/notifications_templ.go diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..32a1990 --- /dev/null +++ b/.env.example @@ -0,0 +1,12 @@ +SMTP_HOST=mail.toto.fr +SMTP_PORT=587 +SMTP_USERNAME=backea@maric.ro +SMTP_PASSWORD=CHANGEME +SMTP_FROM=backea@maric.ro + +B2_APPLICATION_KEY=B2 +B2_KEY_ID=b2 + + + + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..85a26ab --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +.env +kopia.env + +.vscode/ + +bin/ + +TODO + +config/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..2809401 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,47 @@ +FROM golang:1.24-alpine AS builder +WORKDIR /app + +RUN apk add --no-cache git +RUN go install github.com/a-h/templ/cmd/templ@latest + +COPY go.mod go.sum* ./ + +RUN go mod download + +COPY . . + +RUN /go/bin/templ generate + +RUN CGO_ENABLED=0 GOOS=linux go build -o /app/backea-server ./cmd/server/main.go +RUN CGO_ENABLED=0 GOOS=linux go build -o /app/backup-performer ./cmd/backup_performer/main.go +RUN CGO_ENABLED=0 GOOS=linux go build -o /app/list-backups ./cmd/list-backups/main.go + +FROM alpine:latest + +RUN echo "@edge http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories + +RUN apk add --no-cache \ + rsync \ + zip \ + unzip \ + borgbackup \ + python3 \ + py3-pip \ + curl \ + fuse \ + openssh-client \ + ca-certificates \ + kopia@edge + +WORKDIR /app + +COPY --from=builder /app/backea-server . +COPY --from=builder /app/backup-performer . +COPY --from=builder /app/list-backups . + +COPY --from=builder /app/static ./static +COPY --from=builder /app/templates ./templates + +RUN chmod +x /app/backea-server /app/backup-performer /app/list-backups + +CMD ["/app/backea-server"] \ No newline at end of file diff --git a/cmd/backup_performer/main.go b/cmd/backup_performer/main.go new file mode 100644 index 0000000..ac4c4d4 --- /dev/null +++ b/cmd/backup_performer/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "backea/internal/backup" + "context" + "flag" + "log" + "strings" + + "github.com/joho/godotenv" +) + +func main() { + configPath := flag.String("config", "config.yml", "Path to config file") + serviceFlag := flag.String("service", "", "Service to backup (format: group or group.index)") + flag.Parse() + + if err := godotenv.Load(); err != nil { + log.Printf("Warning: Error loading .env file: %v", err) + } + + ctx := context.Background() + + // Parse the service flag to extract group and index if provided + var serviceName, serviceIndex string + if *serviceFlag != "" { + parts := strings.SplitN(*serviceFlag, "-", 2) + serviceName = parts[0] + if len(parts) > 1 { + serviceIndex = parts[1] + } + } + + if err := backup.PerformBackups(ctx, *configPath, serviceName, serviceIndex); err != nil { + log.Fatalf("Backup failed: %v", err) + } +} diff --git a/cmd/list-backups/main.go b/cmd/list-backups/main.go new file mode 100644 index 0000000..acb2f06 --- /dev/null +++ b/cmd/list-backups/main.go @@ -0,0 +1,98 @@ +package main + +import ( + "backea/internal/backup" + "context" + "fmt" + "log" + "os" + "text/tabwriter" + "time" + + "github.com/joho/godotenv" +) + +func main() { + ctx := context.Background() + configPath := "config.yml" + + if err := godotenv.Load(); err != nil { + log.Printf("Warning: Error loading .env file: %v", err) + } + + // Create backup factory + factory, err := backup.NewBackupFactory(configPath) + if err != nil { + log.Fatalf("Failed to create backup factory: %v", err) + } + + // Process each service group + for groupName, serviceGroup := range factory.Config.Services { + fmt.Printf("Service Group: %s (Directory: %s)\n", groupName, serviceGroup.Source.Path) + + // Process each backup config in the group + for configIndex := range serviceGroup.BackupConfigs { + // Format the full service name + serviceName := fmt.Sprintf("%s.%s", groupName, configIndex) + + // Create the appropriate backup strategy using the factory + strategy, err := factory.CreateBackupStrategyForService(groupName, configIndex) + if err != nil { + log.Printf("Failed to create backup strategy for service %s: %v", serviceName, err) + continue + } + + // List backups for this service + backups, err := strategy.ListBackups(ctx, serviceName) + if err != nil { + log.Printf("Failed to list backups for service %s: %v", serviceName, err) + continue + } + + // Display the backups + fmt.Printf("Found %d backups for service %s:\n", len(backups), serviceName) + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "----------------------------------------------------") + fmt.Fprintln(w, "ID | Date | Size | Retention") + fmt.Fprintln(w, "----------------------------------------------------") + for _, backup := range backups { + fmt.Fprintf(w, "%s | %s | %s | %s\n", + backup.ID, + backup.CreationTime.Format(time.RFC3339), + formatSize(backup.Size), + backup.RetentionTag, + ) + } + fmt.Fprintln(w, "----------------------------------------------------") + w.Flush() + + // Display storage usage + usage, err := strategy.GetStorageUsage(ctx, serviceName) + if err != nil { + log.Printf("Failed to get storage usage for service %s: %v", serviceName, err) + continue + } + + fmt.Printf("Storage Usage for %s Repository\n", serviceName) + fmt.Println("---------------------------------------") + fmt.Printf("Provider: %s\n", usage.Provider) + fmt.Printf("Repository ID: %s\n", usage.ProviderID) + fmt.Printf("Physical Size: %s\n", formatSize(usage.TotalBytes)) + + fmt.Println() // Add a blank line between services for readability + } + } +} + +func formatSize(size int64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} diff --git a/cmd/list-buckets/main.go b/cmd/list-buckets/main.go new file mode 100644 index 0000000..bdfb57c --- /dev/null +++ b/cmd/list-buckets/main.go @@ -0,0 +1,43 @@ +package main + +import ( + b2_client "backea/internal/client" + "context" + "fmt" + "os" + "time" +) + +func main() { + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create B2 client + client, err := b2_client.NewClientFromEnv() + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating B2 client: %v\n", err) + os.Exit(1) + } + + // List buckets + buckets, err := client.ListBuckets(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "Error listing buckets: %v\n", err) + os.Exit(1) + } + + // Print buckets + fmt.Println("B2 Buckets:") + fmt.Println("===========") + if len(buckets) == 0 { + fmt.Println("No buckets found") + } else { + for i, bucket := range buckets { + fmt.Printf("%d. %s (ID: %s)\n", + i+1, + bucket.Name, + bucket.ID) + } + } +} diff --git a/cmd/notify/main.go b/cmd/notify/main.go new file mode 100644 index 0000000..039a764 --- /dev/null +++ b/cmd/notify/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "log" + + "backea/internal/mail" + + "github.com/joho/godotenv" +) + +func main() { + if err := godotenv.Load(); err != nil { + log.Println("Warning: No .env file found") + } + + client := mail.NewClient() + + // Check if required environment variables are set + if client.Config.Host == "" || client.Config.Username == "" || client.Config.Password == "" { + log.Fatal("Missing required SMTP configuration") + } + + // Send notification + recipients := []string{"romaric.sirii@gmail.com"} + subject := "Backup Notification" + body := "Your backup has completed successfully!" + + if err := client.SendMail(recipients, subject, body); err != nil { + log.Fatalf("Failed to send email: %v", err) + } + + fmt.Println("Notification sent successfully") +} diff --git a/cmd/restore/main.go b/cmd/restore/main.go new file mode 100644 index 0000000..ab60f08 --- /dev/null +++ b/cmd/restore/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "backea/internal/backup" + b2_client "backea/internal/client" + "context" + "log" + "time" +) + +func main() { + log.SetFlags(log.LstdFlags | log.Lshortfile) + log.Println("Starting Kopia Restore Tool") + + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Create B2 client + b2Client, err := b2_client.NewClientFromEnv() + if err != nil { + log.Fatalf("Failed to create B2 client: %v", err) + } + + // Create restore manager + restoreManager := backup.NewRestoreManager(b2Client) + + // List snapshots for the 'montre' service + serviceName := "montre" + log.Printf("Listing snapshots for service '%s' (bucket: 'backea-%s')", serviceName, serviceName) + + err = restoreManager.ListSnapshots(ctx, serviceName) + if err != nil { + log.Fatalf("Failed to list snapshots: %v", err) + } + + // Example usage of RestoreFile function + // Use the complete snapshot ID from the output + // Looking at your output, let's use the most recent snapshot for /home/gevo/Documents/backea2 + snapshotID := "kdf031570cab920b40d296bda16d27ae7" // The latest snapshot ID + sourcePath := "" // Leave empty to restore the entire snapshot + targetPath := "./restored_files" // Local directory to restore files to + + log.Printf("Restoring files from snapshot %s to %s", snapshotID, targetPath) + err = restoreManager.RestoreFile(ctx, serviceName, snapshotID, sourcePath, targetPath) + if err != nil { + log.Fatalf("Failed to restore files: %v", err) + } + + log.Println("Restore tool completed successfully") +} diff --git a/cmd/server/main.go b/cmd/server/main.go new file mode 100644 index 0000000..0735a35 --- /dev/null +++ b/cmd/server/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "backea/internal/backup" + "backea/internal/server" + "log" + "os" +) + +func main() { + // Get port from environment or use default + port := os.Getenv("PORT") + if port == "" { + port = "8080" + } + + // Create backup factory + backupFactory, _ := backup.NewBackupFactory("config.yml") + + // Initialize and start the server + srv := server.New(backupFactory) + log.Printf("Starting server on port %s", port) + if err := srv.Start(":" + port); err != nil { + log.Fatalf("Failed to start server: %v", err) + } +} diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 0000000..4774f87 --- /dev/null +++ b/compose.yaml @@ -0,0 +1,34 @@ +services: + backea: + build: + context: . + dockerfile: Dockerfile + ports: + - "8080:8080" + volumes: + # Configuration files + - ./.env:/app/.env + - ./config.yml:/app/config.yml + - ./kopia.env:/app/kopia.env + + # Source directories to backup - map to identical paths inside container + - /home/gevo/Documents/backea2:/home/gevo/Documents/backea2 + - /home/gevo/Images:/home/gevo/Images + + # Dedicated repository storage + - /home/gevo/.kopia:/root/.kopia + + # SSH keys if needed + - /home/gevo/.ssh:/root/.ssh:ro + - ./config/kopia:/tmp/kopia_configs + environment: + - HOME=/root + cap_add: + - SYS_ADMIN + devices: + - /dev/fuse:/dev/fuse + security_opt: + - apparmor:unconfined + restart: unless-stopped + env_file: + - ./.env \ No newline at end of file diff --git a/config.yml b/config.yml new file mode 100644 index 0000000..5d7e4fe --- /dev/null +++ b/config.yml @@ -0,0 +1,58 @@ +defaults: + retention: + keep_latest: 10 + keep_hourly: 24 + keep_daily: 30 + keep_weekly: 8 + keep_monthly: 12 + keep_yearly: 3 + +services: + backealocal: + source: + host: "local" + path: "/home/gevo/Documents/backea2" + hooks: + before_hook: "ls" + after_hook: "" + backup_configs: + 1: + backup_strategy: + type: "kopia" + provider: "local" + destination: + path: "/home/gevo/Documents/backea2" + 2: + backup_strategy: + type: "kopia" + provider: "local" + destination: + path: "/home/gevo/Documents/backea2" + + imageslocal: + source: + host: "local" + path: "/home/gevo/Images/" + hooks: + before_hook: "ls" + after_hook: "" + backup_configs: + 1: + backup_strategy: + type: "kopia" + provider: "local" + destination: + host: "local" + + b2backupstotozzeeeaaaeeee: + source: + host: "local" + path: "/home/gevo/Images/" + hooks: + before_hook: "ls" + after_hook: "" + backup_configs: + 1: + backup_strategy: + type: "kopia" + provider: "b2_backblaze" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..b28e417 --- /dev/null +++ b/go.mod @@ -0,0 +1,46 @@ +module backea + +go 1.24.0 + +require ( + github.com/a-h/templ v0.3.833 + github.com/joho/godotenv v1.5.1 + github.com/labstack/echo/v4 v4.13.3 + github.com/spf13/viper v1.19.0 + gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216 +) + +require ( + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/golang/glog v1.2.4 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/readahead v0.0.0-20161222183148-eaceba169032 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c63121a --- /dev/null +++ b/go.sum @@ -0,0 +1,103 @@ +github.com/a-h/templ v0.3.833 h1:L/KOk/0VvVTBegtE0fp2RJQiBm7/52Zxv5fqlEHiQUU= +github.com/a-h/templ v0.3.833/go.mod h1:cAu4AiZhtJfBjMY0HASlyzvkrtjnHWPeEsyGK2YYmfk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/readahead v0.0.0-20161222183148-eaceba169032 h1:6Be3nkuJFyRfCgr6qTIzmRp8y9QwDIbqy/nYr9WDPos= +github.com/google/readahead v0.0.0-20161222183148-eaceba169032/go.mod h1:qYysrqQXuV4tzsizt4oOQ6mrBZQ0xnQXP3ylXX8Jk5Y= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.13.3 h1:pwhpCPrTl5qry5HRdM5FwdXnhXSLSY+WE+YQSeCaafY= +github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216 h1:2TSTkQ8PMvGOD5eeqqRVv6Z9+BYI+bowK97RCr3W+9M= +gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/backup/backup.go b/internal/backup/backup.go new file mode 100644 index 0000000..e9c2d2f --- /dev/null +++ b/internal/backup/backup.go @@ -0,0 +1,131 @@ +package backup + +import ( + "backea/internal/mail" + "context" + "fmt" + "log" + "sync" +) + +// PerformBackups executes backups for multiple services based on configuration +func PerformBackups(ctx context.Context, configPath string, serviceName string, serviceIndex string) error { + // Create backup factory + factory, err := NewBackupFactory(configPath) + if err != nil { + return err + } + + // Initialize mailer + mailer := mail.NewMailer() + + // Process services + if serviceName != "" { + // Process single service group or specific service + if serviceIndex != "" { + // Process specific service within a group + return processSpecificService(ctx, factory, serviceName, serviceIndex, mailer) + } else { + // Process all services in the specified group + return processServiceGroup(ctx, factory, serviceName, mailer) + } + } else { + // Process all service groups in parallel + var wg sync.WaitGroup + errs := make(chan error, len(factory.Config.Services)) + for groupName := range factory.Config.Services { + wg.Add(1) + go func(group string) { + defer wg.Done() + if err := processServiceGroup(ctx, factory, group, mailer); err != nil { + log.Printf("Failed to backup service group %s: %v", group, err) + errs <- fmt.Errorf("backup failed for group %s: %w", group, err) + } + }(groupName) + } + // Wait for all backups to complete + wg.Wait() + close(errs) + + // Check if any errors occurred + var lastErr error + for err := range errs { + lastErr = err + } + return lastErr + } +} + +// processServiceGroup handles the backup for all services in a group +func processServiceGroup(ctx context.Context, factory *BackupFactory, groupName string, mailer *mail.Mailer) error { + // Get service group configuration + serviceGroup, exists := factory.Config.Services[groupName] + if !exists { + log.Printf("Service group not found: %s", groupName) + return nil + } + + // Execute the before hook once for the entire group + if serviceGroup.Hooks.BeforeHook != "" { + log.Printf("Executing before hook for group %s: %s", groupName, serviceGroup.Hooks.BeforeHook) + if err := RunCommand(serviceGroup.Source.Path, serviceGroup.Hooks.BeforeHook); err != nil { + log.Printf("Failed to execute before hook for group %s: %v", groupName, err) + return err + } + } + + // Process all services in the group in parallel + var wg sync.WaitGroup + errs := make(chan error, len(serviceGroup.BackupConfigs)) + for configIndex := range serviceGroup.BackupConfigs { + wg.Add(1) + go func(group, index string) { + defer wg.Done() + if err := processSpecificService(ctx, factory, group, index, mailer); err != nil { + log.Printf("Failed to backup service %s.%s: %v", group, index, err) + errs <- fmt.Errorf("backup failed for %s.%s: %w", group, index, err) + } + }(groupName, configIndex) + } + // Wait for all backups to complete + wg.Wait() + close(errs) + + // Execute the after hook once for the entire group + if serviceGroup.Hooks.AfterHook != "" { + log.Printf("Executing after hook for group %s: %s", groupName, serviceGroup.Hooks.AfterHook) + if err := RunCommand(serviceGroup.Source.Path, serviceGroup.Hooks.AfterHook); err != nil { + log.Printf("Failed to execute after hook for group %s: %v", groupName, err) + // We don't return here because we want to process the errors from the backups + } + } + + // Check if any errors occurred + var lastErr error + for err := range errs { + lastErr = err + } + return lastErr +} + +// processSpecificService handles the backup for a specific service in a group +func processSpecificService(ctx context.Context, factory *BackupFactory, groupName string, configIndex string, mailer *mail.Mailer) error { + // Get service configuration + serviceGroup := factory.Config.Services[groupName] + + // Create the appropriate backup strategy using the factory + strategy, err := factory.CreateBackupStrategyForService(groupName, configIndex) + if err != nil { + log.Printf("Failed to create backup strategy for service %s.%s: %v", groupName, configIndex, err) + return err + } + + // Create and run service + service := NewService( + fmt.Sprintf("%s.%s", groupName, configIndex), + serviceGroup.Source.Path, + strategy, + mailer, + ) + return service.Backup(ctx) +} diff --git a/internal/backup/backup_factory.go b/internal/backup/backup_factory.go new file mode 100644 index 0000000..6b787a1 --- /dev/null +++ b/internal/backup/backup_factory.go @@ -0,0 +1,187 @@ +package backup + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +// BackupFactory creates backup strategies and managers +type BackupFactory struct { + Config *Configuration + ConfigPath string +} + +// NewBackupFactory creates a new backup factory +func NewBackupFactory(configPath string) (*BackupFactory, error) { + // Load configuration + config, err := LoadConfig(configPath) + if err != nil { + return nil, fmt.Errorf("could not load config: %w", err) + } + return &BackupFactory{ + Config: config, + ConfigPath: configPath, + }, nil +} + +// CreateKopiaStrategy creates a new Kopia backup strategy with specific retention settings and provider +func (f *BackupFactory) CreateKopiaStrategy(retention Retention, provider KopiaProvider) *KopiaStrategy { + // Create temp directory for Kopia configs if it doesn't exist + tmpConfigDir := filepath.Join(os.TempDir(), "kopia_configs") + if err := os.MkdirAll(tmpConfigDir, 0755); err != nil { + log.Printf("Warning: failed to create temp config directory: %v", err) + // Fall back to default config path + tmpConfigDir = os.TempDir() + } + + // Generate a unique config file path for this instance + configPath := filepath.Join(tmpConfigDir, fmt.Sprintf("kopia_%s_%d.config", + provider.GetProviderType(), time.Now().UnixNano())) + + // Need to update this line to pass configPath + return NewKopiaStrategy(retention, provider, configPath) +} + +// CreateKopiaProvider creates the appropriate Kopia provider based on config +func (f *BackupFactory) CreateKopiaProvider(strategyConfig StrategyConfig) (KopiaProvider, error) { + switch strategyConfig.Provider { + case "b2_backblaze": + return NewKopiaB2Provider(), nil + case "local": + // Extract options for local path if specified + basePath := "" + if strategyConfig.Options != "" { + basePath = strategyConfig.Options + } else { + // Default to ~/.backea/repos if not specified + basePath = filepath.Join(os.Getenv("HOME"), ".backea", "repos") + } + return NewKopiaLocalProvider(basePath), nil + case "sftp": + // Parse options for SFTP - expected format: "user@host:/path/to/backups" + host := strategyConfig.Destination.Host + path := strategyConfig.Destination.Path + sshKey := strategyConfig.Destination.SSHKey + + if host == "" { + return nil, fmt.Errorf("SFTP provider requires host in destination config") + } + if path == "" { + return nil, fmt.Errorf("SFTP provider requires path in destination config") + } + + // Default SSH key if not specified + if sshKey == "" { + sshKey = filepath.Join(os.Getenv("HOME"), ".ssh", "id_rsa") + } + + // Extract username from host if in format user@host + username := "root" // default + if hostParts := strings.Split(host, "@"); len(hostParts) > 1 { + username = hostParts[0] + host = hostParts[1] + } + + return NewKopiaSFTPProvider(host, path, username, sshKey), nil + default: + return nil, fmt.Errorf("unsupported Kopia provider: %s", strategyConfig.Provider) + } +} + +// CreateBackupStrategyForService creates a backup strategy for the specified service within a group +func (f *BackupFactory) CreateBackupStrategyForService(groupName string, serviceIndex string) (Strategy, error) { + // Find service group + serviceGroup, exists := f.Config.Services[groupName] + if !exists { + return nil, fmt.Errorf("service group not found: %s", groupName) + } + + // Find specific backup config within the group + backupConfig, exists := serviceGroup.BackupConfigs[serviceIndex] + if !exists { + return nil, fmt.Errorf("backup config not found: %s.%s", groupName, serviceIndex) + } + + // Create appropriate strategy based on type + strategyConfig := backupConfig.BackupStrategy + + // Extract retention settings once + retention := Retention{ + KeepLatest: strategyConfig.Retention.KeepLatest, + KeepHourly: strategyConfig.Retention.KeepHourly, + KeepDaily: strategyConfig.Retention.KeepDaily, + KeepWeekly: strategyConfig.Retention.KeepWeekly, + KeepMonthly: strategyConfig.Retention.KeepMonthly, + KeepYearly: strategyConfig.Retention.KeepYearly, + } + + switch strategyConfig.Type { + case "kopia": + // Create appropriate provider based on configuration + provider, err := f.CreateKopiaProvider(strategyConfig) + if err != nil { + return nil, fmt.Errorf("failed to create kopia provider: %w", err) + } + return f.CreateKopiaStrategy(retention, provider), nil + case "rsync": + // Uncomment when rsync implementation is ready + // return NewRsyncStrategy( + // strategyConfig.Options, + // Destination{ + // Host: strategyConfig.Destination.Host, + // Path: strategyConfig.Destination.Path, + // SSHKey: strategyConfig.Destination.SSHKey, + // }, + // ), nil + fallthrough + default: + // Default to local kopia if type is unknown or rsync (temporarily) + provider, _ := f.CreateKopiaProvider(StrategyConfig{Provider: "local"}) + return f.CreateKopiaStrategy(retention, provider), nil + } +} + +// ExecuteGroupHooks runs the hooks for a service group +func (f *BackupFactory) ExecuteGroupHooks(groupName string, isBeforeHook bool) error { + serviceGroup, exists := f.Config.Services[groupName] + if !exists { + return fmt.Errorf("service group not found: %s", groupName) + } + + var hook string + if isBeforeHook { + hook = serviceGroup.Hooks.BeforeHook + } else { + hook = serviceGroup.Hooks.AfterHook + } + + if hook == "" { + return nil + } + + // Execute the hook + cmd := exec.Command("sh", "-c", hook) + output, err := cmd.CombinedOutput() + + if err != nil { + return fmt.Errorf("hook execution failed: %w, output: %s", err, string(output)) + } + + log.Printf("Hook executed successfully: %s, output: %s", hook, string(output)) + return nil +} + +// ReloadConfig refreshes the configuration from the config file +func (f *BackupFactory) ReloadConfig() error { + config, err := LoadConfig(f.ConfigPath) + if err != nil { + return fmt.Errorf("could not reload config: %w", err) + } + f.Config = config + return nil +} diff --git a/internal/backup/backup_kopia.go b/internal/backup/backup_kopia.go new file mode 100644 index 0000000..c3b4e66 --- /dev/null +++ b/internal/backup/backup_kopia.go @@ -0,0 +1,767 @@ +package backup + +import ( + "archive/zip" + "bufio" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" +) + +// KopiaStrategy implements the backup strategy using Kopia +type KopiaStrategy struct { + Retention Retention + Provider KopiaProvider + ConfigPath string +} + +// NewKopiaStrategy creates a new kopia strategy with specified provider +func NewKopiaStrategy(retention Retention, provider KopiaProvider, configPath string) *KopiaStrategy { + return &KopiaStrategy{ + Retention: retention, + Provider: provider, + ConfigPath: configPath, + } +} + +// Execute performs the kopia backup +func (s *KopiaStrategy) Execute(ctx context.Context, serviceName, directory string) error { + log.Printf("Performing kopia backup for service %s: %s", serviceName, directory) + + // Get or create password for this service + password, err := getOrCreatePassword(serviceName, 48) + if err != nil { + return fmt.Errorf("failed to get or create password: %w", err) + } + + // Ensure Kopia config directory exists + kopiaConfigDir := filepath.Join(os.Getenv("HOME"), ".kopia") + if err := os.MkdirAll(kopiaConfigDir, 0755); err != nil { + return fmt.Errorf("failed to create kopia config directory: %w", err) + } + + // Repository not connected, connect or create via provider + log.Printf("Connecting to repository for %s", serviceName) + if err := s.Provider.Connect(ctx, serviceName, password, s.ConfigPath); err != nil { + return fmt.Errorf("failed to connect to repository: %w", err) + } + + // Create snapshot + log.Printf("Creating snapshot for directory: %s", directory) + snapshotCmd := exec.Command("kopia", "--config-file", s.ConfigPath, "snapshot", "create", directory) + snapshotOutput, err := snapshotCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create snapshot: %w\nOutput: %s", err, snapshotOutput) + } + + // Set retention policy + log.Printf("Setting retention policy for %s", serviceName) + args := []string{ + "--config-file", s.ConfigPath, + "policy", "set", + "--keep-latest", fmt.Sprintf("%d", s.Retention.KeepLatest), + "--keep-hourly", fmt.Sprintf("%d", s.Retention.KeepHourly), + "--keep-daily", fmt.Sprintf("%d", s.Retention.KeepDaily), + "--keep-weekly", fmt.Sprintf("%d", s.Retention.KeepWeekly), + "--keep-monthly", fmt.Sprintf("%d", s.Retention.KeepMonthly), + "--keep-annual", fmt.Sprintf("%d", s.Retention.KeepYearly), + directory, + } + policyCmd := exec.Command("kopia", args...) + policyOutput, err := policyCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to set policy: %w\nOutput: %s", err, policyOutput) + } + + log.Printf("Snapshot and policy set successfully for %s", serviceName) + return nil +} + +// ListBackups returns information about existing backups +func (s *KopiaStrategy) ListBackups(ctx context.Context, serviceName string) ([]BackupInfo, error) { + // Parse service group and index from service name (e.g., "backealocal.1") + groupName, _, err := parseServiceName(serviceName) + if err != nil { + return nil, fmt.Errorf("invalid service name format: %w", err) + } + + // Get service directory from config + factory, err := NewBackupFactory("config.yml") + if err != nil { + return nil, fmt.Errorf("failed to create factory: %w", err) + } + + // Find service group + serviceGroup, exists := factory.Config.Services[groupName] + if !exists { + return nil, fmt.Errorf("service group not found: %s", groupName) + } + + // Get directory from the service group level + directoryPath := strings.TrimRight(serviceGroup.Source.Path, "/") + + // Ensure we're connected to the repository + err = s.EnsureRepositoryConnected(ctx, serviceName) + if err != nil { + return nil, fmt.Errorf("failed to connect to repository: %w", err) + } + + // Run kopia snapshot list command with JSON output + cmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", s.ConfigPath, + "snapshot", + "list", + "--json", + ) + + output, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to list snapshots: %w", err) + } + + // Parse the JSON output + var snapshots []map[string]interface{} + if err := json.Unmarshal(output, &snapshots); err != nil { + return nil, fmt.Errorf("failed to parse snapshot list: %w", err) + } + + // Convert to BackupInfo + var result []BackupInfo + for _, snap := range snapshots { + // Get basic info + id, _ := snap["id"].(string) + endTime, _ := snap["endTime"].(string) + + // Only include snapshots for the requested service by directory path + source, ok := snap["source"].(map[string]interface{}) + if !ok { + continue + } + + sourcePath, _ := source["path"].(string) + // Match by exact directory path, not service name + if sourcePath != directoryPath { + continue + } + + // Parse time + var creationTime time.Time + if endTime != "" { + t, err := time.Parse(time.RFC3339, endTime) + if err == nil { + creationTime = t + } + } + + // Get size information from stats + var size int64 + if stats, ok := snap["stats"].(map[string]interface{}); ok { + if totalSize, ok := stats["totalSize"].(float64); ok { + size = int64(totalSize) + } + } + + // Determine retention tag + retentionTag := "none" + if reasons, ok := snap["retentionReason"].([]interface{}); ok && len(reasons) > 0 { + // Get the first reason which indicates the highest priority retention + if reason, ok := reasons[0].(string); ok { + parts := strings.SplitN(reason, "-", 2) + if len(parts) > 0 { + retentionTag = parts[0] + } + } + } + + result = append(result, BackupInfo{ + ID: id, + CreationTime: creationTime, + Size: size, + Source: sourcePath, + Type: "kopia", + RetentionTag: retentionTag, + }) + } + + return result, nil +} + +// GetStorageUsage returns information about the total storage used by the repository +func (s *KopiaStrategy) GetStorageUsage(ctx context.Context, serviceName string) (*StorageUsageInfo, error) { + // Ensure we're connected to the repository + err := s.EnsureRepositoryConnected(ctx, serviceName) + if err != nil { + return nil, fmt.Errorf("failed to connect to repository: %w", err) + } + + // Get provider type (b2, local, sftp) + providerType := "unknown" + switch s.Provider.(type) { + case *KopiaB2Provider: + providerType = "b2" + case *KopiaLocalProvider: + providerType = "local" + case *KopiaSFTPProvider: + providerType = "sftp" + } + + // Initialize storage info + info := &StorageUsageInfo{ + Provider: providerType, + ProviderID: s.Provider.GetBucketName(serviceName), + } + + // Calculate logical size by summing up the sizes of all snapshots + backups, err := s.ListBackups(ctx, serviceName) + if err == nil { + var totalLogicalBytes int64 + for _, backup := range backups { + totalLogicalBytes += backup.Size + } + info.TotalBytes = totalLogicalBytes + } + + // Try to get physical storage stats using blob stats command + blobStatsCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", s.ConfigPath, + "blob", + "stats", + ) + + blobStatsOutput, err := blobStatsCmd.CombinedOutput() + if err == nil { + // Parse the text output + outputStr := string(blobStatsOutput) + log.Printf("Blob stats output: %s", outputStr) + + // Look for the line with "Total:" + lines := strings.Split(outputStr, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Total:") { + parts := strings.SplitN(line, ":", 2) + if len(parts) == 2 { + sizeStr := strings.TrimSpace(parts[1]) + size, err := parseHumanSize(sizeStr) + if err == nil { + info.TotalBytes = size + log.Printf("Got physical size from blob stats: %d bytes", size) + break + } else { + log.Printf("Failed to parse size '%s': %v", sizeStr, err) + } + } + } + } + } else { + log.Printf("Blob stats command failed: %v - %s", err, string(blobStatsOutput)) + } + + return info, nil +} + +// Helper method to ensure the repository is connected before operations +func (s *KopiaStrategy) EnsureRepositoryConnected(ctx context.Context, serviceName string) error { + // Check if kopia repository is already connected with config file + cmd := exec.Command("kopia", "--config-file", s.ConfigPath, "repository", "status") + err := cmd.Run() + if err != nil { + // Repository not connected, try to connect + password, err := getOrCreatePassword(serviceName, 48) + if err != nil { + return fmt.Errorf("failed to get password: %w", err) + } + + // Connect using provider + if err := s.Provider.Connect(ctx, serviceName, password, s.ConfigPath); err != nil { + return fmt.Errorf("failed to connect to repository: %w", err) + } + } + + return nil +} + +// parseServiceName splits a service name in the format "group.index" into its components +func parseServiceName(serviceName string) (string, string, error) { + parts := strings.SplitN(serviceName, ".", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("service name must be in format 'group.index', got '%s'", serviceName) + } + return parts[0], parts[1], nil +} + +// getOrCreatePassword retrieves a password from kopia.env or creates a new one if it doesn't exist +// Extracted to a package-level function since it's utility functionality +func getOrCreatePassword(serviceName string, length int) (string, error) { + // Define the expected key in the env file + // Replace dots with underscores for environment variable name + safeServiceName := strings.ReplaceAll(serviceName, ".", "_") + passwordKey := fmt.Sprintf("KOPIA_%s_PASSWORD", strings.ToUpper(safeServiceName)) + + // Try to read from kopia.env first + kopiaEnvPath := "kopia.env" + if _, err := os.Stat(kopiaEnvPath); err == nil { + // File exists, check if the password is already there + file, err := os.Open(kopiaEnvPath) + if err != nil { + return "", fmt.Errorf("failed to open kopia.env: %w", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, passwordKey+"=") { + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + // Found the password, remove quotes if present + password := parts[1] + password = strings.Trim(password, "\"") + return password, nil + } + } + } + + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error reading kopia.env: %w", err) + } + } + + // Password not found or file doesn't exist, generate a new password + password, err := generateSecurePassword(length) + if err != nil { + return "", fmt.Errorf("failed to generate password: %w", err) + } + + // Create or append to kopia.env + file, err := os.OpenFile(kopiaEnvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return "", fmt.Errorf("failed to open kopia.env for writing: %w", err) + } + defer file.Close() + + // Write the new password entry with quotes + passwordEntry := fmt.Sprintf("%s=\"%s\"\n", passwordKey, password) + if _, err := file.WriteString(passwordEntry); err != nil { + return "", fmt.Errorf("failed to write to kopia.env: %w", err) + } + + log.Printf("Created new password for service %s and stored in kopia.env", serviceName) + return password, nil +} + +// generateSecurePassword creates a cryptographically secure random password +// using only safe characters that work well with command line tools +func generateSecurePassword(length int) (string, error) { + // Use a more robust but safe character set + // Avoiding characters that might cause shell interpretation issues + // No quotes, backslashes, spaces, or common special chars that need escaping + const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + result := make([]byte, length) + + // Create a secure source of randomness + randomBytes := make([]byte, length) + if _, err := rand.Read(randomBytes); err != nil { + return "", err + } + + // Ensure minimum complexity requirements + // At least 2 uppercase, 2 lowercase, 2 digits + if length >= 6 { + // Force first characters to meet minimum requirements + result[0] = 'A' + byte(randomBytes[0]%26) // Uppercase + result[1] = 'A' + byte(randomBytes[1]%26) // Uppercase + result[2] = 'a' + byte(randomBytes[2]%26) // Lowercase + result[3] = 'a' + byte(randomBytes[3]%26) // Lowercase + result[4] = '0' + byte(randomBytes[4]%10) // Digit + result[5] = '0' + byte(randomBytes[5]%10) // Digit + + // Fill the rest with random chars + for i := 6; i < length; i++ { + result[i] = chars[int(randomBytes[i])%len(chars)] + } + + // Shuffle the result to avoid predictable pattern + for i := length - 1; i > 0; i-- { + j := int(randomBytes[i]) % (i + 1) + result[i], result[j] = result[j], result[i] + } + } else { + // For very short passwords just use random chars + for i := 0; i < length; i++ { + result[i] = chars[int(randomBytes[i])%len(chars)] + } + } + + return string(result), nil +} + +// parseHumanSize parses a human-readable size string (e.g., "32.8 MB") into bytes +func parseHumanSize(sizeStr string) (int64, error) { + parts := strings.Fields(sizeStr) + if len(parts) != 2 { + return 0, fmt.Errorf("invalid size format: %s", sizeStr) + } + + value, err := strconv.ParseFloat(parts[0], 64) + if err != nil { + return 0, fmt.Errorf("invalid size value: %w", err) + } + + unit := strings.ToUpper(parts[1]) + switch unit { + case "B": + return int64(value), nil + case "KB", "KIB": + return int64(value * 1024), nil + case "MB", "MIB": + return int64(value * 1024 * 1024), nil + case "GB", "GIB": + return int64(value * 1024 * 1024 * 1024), nil + case "TB", "TIB": + return int64(value * 1024 * 1024 * 1024 * 1024), nil + default: + return 0, fmt.Errorf("unknown size unit: %s", unit) + } +} + +// RestoreBackup restores a backup with the given ID +func (s *KopiaStrategy) RestoreBackup(ctx context.Context, backupID string, serviceName string) error { + // Ensure repository is connected + err := s.EnsureRepositoryConnected(ctx, serviceName) + if err != nil { + return fmt.Errorf("failed to connect to repository: %w", err) + } + + // Parse service group and index from service name + groupName, _, err := parseServiceName(serviceName) + if err != nil { + return fmt.Errorf("invalid service name format: %w", err) + } + + // Get service directory from config + factory, err := NewBackupFactory("config.yml") + if err != nil { + return fmt.Errorf("failed to create factory: %w", err) + } + + // Find service group + serviceGroup, exists := factory.Config.Services[groupName] + if !exists { + return fmt.Errorf("service group not found: %s", groupName) + } + + // Create a temporary directory for restore + restoreDir := filepath.Join(os.TempDir(), fmt.Sprintf("backea-restore-%s-%d", serviceName, time.Now().Unix())) + if err := os.MkdirAll(restoreDir, 0755); err != nil { + return fmt.Errorf("failed to create restore directory: %w", err) + } + + log.Printf("Restoring backup %s to temporary directory %s", backupID, restoreDir) + + // Run kopia restore command to restore the snapshot to the temporary directory + restoreCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", s.ConfigPath, + "snapshot", + "restore", + backupID, + restoreDir, + ) + + restoreOutput, err := restoreCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to restore snapshot: %w\nOutput: %s", err, restoreOutput) + } + + // Now we need to sync the restored data to the original directory + targetDir := serviceGroup.Source.Path + log.Printf("Syncing restored data from %s to %s", restoreDir, targetDir) + + // Use rsync for the final transfer to avoid permissions issues + syncCmd := exec.CommandContext( + ctx, + "rsync", + "-av", + "--delete", // Delete extraneous files from target + restoreDir+"/", // Source directory with trailing slash to copy contents + targetDir, // Target directory + ) + + syncOutput, err := syncCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to sync restored data: %w\nOutput: %s", err, syncOutput) + } + + // Clean up the temporary directory + go func() { + time.Sleep(5 * time.Minute) // Wait 5 minutes before cleaning up + log.Printf("Cleaning up temporary restore directory %s", restoreDir) + os.RemoveAll(restoreDir) + }() + + log.Printf("Successfully restored backup %s to %s", backupID, targetDir) + return nil +} + +// DownloadBackup provides a reader with backup summary information in text format +func (s *KopiaStrategy) DownloadBackup(ctx context.Context, backupID string, serviceName string) (io.ReadCloser, error) { + + // Ensure repository is connected + err := s.EnsureRepositoryConnected(ctx, serviceName) + if err != nil { + return nil, fmt.Errorf("failed to connect to repository: %w", err) + } + + // Create temporary directories for restore and ZIP creation + tempDir, err := os.MkdirTemp("", "backea-download-*") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + + restoreDir := filepath.Join(tempDir, "restore") + zipFile := filepath.Join(tempDir, "backup.zip") + + if err := os.MkdirAll(restoreDir, 0755); err != nil { + os.RemoveAll(tempDir) + return nil, fmt.Errorf("failed to create restore directory: %w", err) + } + + // Restore the snapshot to the temporary directory using the proper snapshot ID + log.Printf("Restoring snapshot %s to temporary directory %s", backupID, restoreDir) + + restoreCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", s.ConfigPath, + "snapshot", + "restore", + backupID, + restoreDir, + ) + + restoreOutput, err := restoreCmd.CombinedOutput() + if err != nil { + os.RemoveAll(tempDir) + return nil, fmt.Errorf("failed to restore snapshot: %w\nOutput: %s", err, restoreOutput) + } + + // Create ZIP archive of the restored files + log.Printf("Creating ZIP archive at %s", zipFile) + + // Use Go's zip package instead of command line tools + zipWriter, err := os.Create(zipFile) + if err != nil { + os.RemoveAll(tempDir) + return nil, fmt.Errorf("failed to create zip file: %w", err) + } + + defer zipWriter.Close() + + // Create ZIP writer + archive := zip.NewWriter(zipWriter) + defer archive.Close() + + // Walk the restore directory and add files to ZIP + err = filepath.Walk(restoreDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories, we only want to add files + if info.IsDir() { + return nil + } + + // Create ZIP header + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + + // Make the path relative to the restore directory + relPath, err := filepath.Rel(restoreDir, path) + if err != nil { + return err + } + + // Set the name in the archive to the relative path + header.Name = relPath + + // Create the file in the ZIP + writer, err := archive.CreateHeader(header) + if err != nil { + return err + } + + // Open the file + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + // Copy the file content to the ZIP + _, err = io.Copy(writer, file) + return err + }) + + // Close the ZIP writer + archive.Close() + zipWriter.Close() + + if err != nil { + os.RemoveAll(tempDir) + return nil, fmt.Errorf("failed to create zip archive: %w", err) + } + + // Open the ZIP file for reading + zipReader, err := os.Open(zipFile) + if err != nil { + os.RemoveAll(tempDir) + return nil, fmt.Errorf("failed to open zip archive: %w", err) + } + + // Return a reader that will clean up when closed + return &cleanupReadCloser{ + ReadCloser: zipReader, + cleanup: func() { + zipReader.Close() + os.RemoveAll(tempDir) + }, + }, nil +} + +// GetBackupInfo returns detailed information about a specific backup +func (s *KopiaStrategy) GetBackupInfo(ctx context.Context, backupID string, serviceName string) (*BackupInfo, error) { + + err := s.EnsureRepositoryConnected(ctx, serviceName) + if err != nil { + return nil, fmt.Errorf("failed to connect to repository: %w", err) + } + + // Run kopia snapshot describe command with JSON output + cmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", s.ConfigPath, + "snapshot", + "describe", + "--json", + backupID, + ) + + output, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to describe snapshot: %w", err) + } + + // Parse the JSON output + var snap map[string]interface{} + if err := json.Unmarshal(output, &snap); err != nil { + return nil, fmt.Errorf("failed to parse snapshot info: %w", err) + } + + // Extract the relevant information + id, _ := snap["id"].(string) + endTime, _ := snap["endTime"].(string) + + // Parse time + var creationTime time.Time + if endTime != "" { + t, err := time.Parse(time.RFC3339, endTime) + if err == nil { + creationTime = t + } + } + + // Get size information from stats + var size int64 + if stats, ok := snap["stats"].(map[string]interface{}); ok { + if totalSize, ok := stats["totalSize"].(float64); ok { + size = int64(totalSize) + } + } + + // Get source path + sourcePath := "" + if source, ok := snap["source"].(map[string]interface{}); ok { + if path, ok := source["path"].(string); ok { + sourcePath = path + } + } + + // Determine retention tag + retentionTag := "none" + if reasons, ok := snap["retentionReason"].([]interface{}); ok && len(reasons) > 0 { + // Get the first reason which indicates the highest priority retention + if reason, ok := reasons[0].(string); ok { + parts := strings.SplitN(reason, "-", 2) + if len(parts) > 0 { + retentionTag = parts[0] + } + } + } + + return &BackupInfo{ + ID: id, + CreationTime: creationTime, + Size: size, + Source: sourcePath, + Type: "kopia", + RetentionTag: retentionTag, + }, nil +} + +// cleanupReadCloser is a wrapper around io.ReadCloser that performs cleanup when closed +type cleanupReadCloser struct { + io.ReadCloser + cleanup func() +} + +// Close closes the underlying ReadCloser and performs cleanup +func (c *cleanupReadCloser) Close() error { + err := c.ReadCloser.Close() + c.cleanup() + return err +} + +// extractServiceNameFromBackupID extracts the service name from a backup ID +// This is an approximation as the exact format depends on your ID structure +func extractServiceNameFromBackupID(backupID string) string { + // Kopia snapshot IDs don't directly include the service name + // You may need to adjust this based on your actual ID format + + // Try to extract the pattern from your backups list + // If you're using ListBackups to find all backups, you might have + // a mapping of IDs to service names that you can use + + // For this example, let's assume we're using environment variables to track this + envVar := fmt.Sprintf("BACKEA_SNAPSHOT_%s", backupID) + if serviceName := os.Getenv(envVar); serviceName != "" { + return serviceName + } + + // Fallback: For testing, we'll return a default or parse from the first part + // This should be replaced with your actual logic + parts := strings.Split(backupID, "-") + if len(parts) > 0 { + return parts[0] + } + + return "unknown" +} diff --git a/internal/backup/backup_rsync.go b/internal/backup/backup_rsync.go new file mode 100644 index 0000000..8dec2c9 --- /dev/null +++ b/internal/backup/backup_rsync.go @@ -0,0 +1,51 @@ +package backup + +import ( + "context" + "fmt" + "log" + "os" + "os/exec" +) + +// RsyncStrategy implements the backup strategy using rsync +type RsyncStrategy struct { + Options string + Destination Destination +} + +// NewRsyncStrategy creates a new rsync strategy +func NewRsyncStrategy(options string, destination Destination) *RsyncStrategy { + return &RsyncStrategy{ + Options: options, + Destination: destination, + } +} + +// Execute performs the rsync backup +func (s *RsyncStrategy) Execute(ctx context.Context, serviceName, directory string) error { + log.Printf("Performing rsync backup for %s", serviceName) + + // Build rsync command + dst := s.Destination + sshKeyOption := "" + + // Without + if dst.SSHKey != "" { + sshKeyOption = fmt.Sprintf("-e 'ssh -i %s'", dst.SSHKey) + } + + destination := fmt.Sprintf("%s:%s", dst.Host, dst.Path) + rsyncCmd := fmt.Sprintf("rsync %s %s %s %s", + s.Options, + sshKeyOption, + directory, + destination) + + // Run rsync command + log.Printf("Running: %s", rsyncCmd) + cmd := exec.CommandContext(ctx, "sh", "-c", rsyncCmd) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/internal/backup/config.go b/internal/backup/config.go new file mode 100644 index 0000000..d436a04 --- /dev/null +++ b/internal/backup/config.go @@ -0,0 +1,112 @@ +package backup + +import ( + "log" + + "github.com/joho/godotenv" + "github.com/spf13/viper" +) + +// Configuration holds the complete application configuration +type Configuration struct { + Defaults DefaultsConfig `mapstructure:"defaults"` + Services map[string]ServiceGroup `mapstructure:"services"` +} + +// DefaultsConfig holds the default settings for all services +type DefaultsConfig struct { + Retention RetentionConfig `mapstructure:"retention"` +} + +// ServiceGroup represents a service group with a common directory and multiple backup configurations +type ServiceGroup struct { + Directory string `mapstructure:"directory,omitempty"` + Source SourceConfig `mapstructure:"source"` + Hooks HooksConfig `mapstructure:"hooks"` + BackupConfigs map[string]BackupConfig `mapstructure:"backup_configs"` +} + +// SourceConfig represents source configuration for backups +type SourceConfig struct { + Host string `mapstructure:"host"` + Path string `mapstructure:"path"` + SSHKey string `mapstructure:"ssh_key"` +} + +// HooksConfig contains the hooks to run before and after the backup +type HooksConfig struct { + BeforeHook string `mapstructure:"before_hook"` + AfterHook string `mapstructure:"after_hook"` +} + +// BackupConfig represents a specific backup configuration +type BackupConfig struct { + BackupStrategy StrategyConfig `mapstructure:"backup_strategy"` +} + +// StrategyConfig represents a backup strategy configuration +type StrategyConfig struct { + Type string `mapstructure:"type"` + Provider string `mapstructure:"provider"` + Options string `mapstructure:"options,omitempty"` + Retention RetentionConfig `mapstructure:"retention,omitempty"` + Destination DestConfig `mapstructure:"destination"` +} + +// RetentionConfig represents retention policy configuration +type RetentionConfig struct { + KeepLatest int `mapstructure:"keep_latest"` + KeepHourly int `mapstructure:"keep_hourly"` + KeepDaily int `mapstructure:"keep_daily"` + KeepWeekly int `mapstructure:"keep_weekly"` + KeepMonthly int `mapstructure:"keep_monthly"` + KeepYearly int `mapstructure:"keep_yearly"` +} + +// DestConfig represents destination configuration for remote backups +type DestConfig struct { + Host string `mapstructure:"host,omitempty"` + Path string `mapstructure:"path"` + SSHKey string `mapstructure:"ssh_key,omitempty"` +} + +// LoadConfig loads the application configuration from a file +func LoadConfig(configPath string) (*Configuration, error) { + if err := godotenv.Load(); err != nil { + log.Printf("Warning: Error loading .env file: %v", err) + } + + viper.SetConfigFile(configPath) + if err := viper.ReadInConfig(); err != nil { + return nil, err + } + + var config Configuration + if err := viper.Unmarshal(&config); err != nil { + return nil, err + } + + // Apply default retention settings where needed + for serviceName, service := range config.Services { + for configID, backupConfig := range service.BackupConfigs { + // If retention is not set, use defaults + if isRetentionEmpty(backupConfig.BackupStrategy.Retention) { + backupConfig.BackupStrategy.Retention = config.Defaults.Retention + service.BackupConfigs[configID] = backupConfig + } + } + config.Services[serviceName] = service + } + + return &config, nil +} + +// isRetentionEmpty checks if a retention config is empty (all zeros) +func isRetentionEmpty(retention RetentionConfig) bool { + return retention.KeepLatest == 0 && + retention.KeepHourly == 0 && + retention.KeepDaily == 0 && + retention.KeepWeekly == 0 && + retention.KeepMonthly == 0 && + retention.KeepYearly == 0 +} diff --git a/internal/backup/kopia_providers.go b/internal/backup/kopia_providers.go new file mode 100644 index 0000000..ce9f345 --- /dev/null +++ b/internal/backup/kopia_providers.go @@ -0,0 +1,277 @@ +package backup + +import ( + b2_client "backea/internal/client" + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// KopiaProvider defines the interface for different Kopia storage backends +type KopiaProvider interface { + // Connect connects to an existing repository or creates a new one + Connect(ctx context.Context, serviceName string, password string, configPath string) error + // GetRepositoryParams returns the parameters needed for repository operations + GetRepositoryParams(serviceName string) ([]string, error) + // GetBucketName returns the storage identifier (bucket name or path) + GetBucketName(serviceName string) string + // GetProviderType returns a string identifying the provider type + GetProviderType() string +} + +// KopiaB2Provider implements the KopiaProvider interface for Backblaze B2 +type KopiaB2Provider struct{} + +// NewKopiaB2Provider creates a new B2 provider +func NewKopiaB2Provider() *KopiaB2Provider { + return &KopiaB2Provider{} +} + +// Connect connects to a B2 repository +func (p *KopiaB2Provider) Connect(ctx context.Context, serviceName string, password string, configPath string) error { + // Load environment variables for B2 credentials + keyID := os.Getenv("B2_KEY_ID") + applicationKey := os.Getenv("B2_APPLICATION_KEY") + if keyID == "" || applicationKey == "" { + return fmt.Errorf("B2_KEY_ID and B2_APPLICATION_KEY must be set in .env file") + } + + bucketName := p.GetBucketName(serviceName) + + // Create B2 client and check if bucket exists, create if not + B2Client, _ := b2_client.NewClientFromEnv() + if B2Client == nil { + return fmt.Errorf("B2 client not initialized") + } + + _, err := B2Client.GetBucket(ctx, bucketName) + if err != nil { + log.Printf("Bucket %s not found, creating...", bucketName) + _, err = B2Client.CreateBucket(ctx, bucketName, false) + if err != nil { + return fmt.Errorf("failed to create bucket: %w", err) + } + log.Printf("Created bucket: %s", bucketName) + } + + // Try to connect to existing repository with config file + connectCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "connect", "b2", + "--bucket", bucketName, + "--key-id", keyID, + "--key", applicationKey, + "--password", password, + ) + err = connectCmd.Run() + if err != nil { + // Connection failed, create new repository + log.Printf("Creating new B2 repository for %s", serviceName) + createCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "create", "b2", + "--bucket", bucketName, + "--key-id", keyID, + "--key", applicationKey, + "--password", password, + ) + createOutput, err := createCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create repository: %w\nOutput: %s", err, createOutput) + } + } + + return nil +} + +// GetRepositoryParams returns parameters for B2 operations +func (p *KopiaB2Provider) GetRepositoryParams(serviceName string) ([]string, error) { + keyID := os.Getenv("B2_KEY_ID") + applicationKey := os.Getenv("B2_APPLICATION_KEY") + if keyID == "" || applicationKey == "" { + return nil, fmt.Errorf("B2_KEY_ID and B2_APPLICATION_KEY must be set in .env file") + } + + bucketName := p.GetBucketName(serviceName) + + return []string{ + "b2", + "--bucket", bucketName, + "--key-id", keyID, + "--key", applicationKey, + }, nil +} + +// Modify the GetBucketName method in your KopiaB2Provider +func (p *KopiaB2Provider) GetBucketName(serviceName string) string { + // Backblaze dont allow . + sanitized := strings.ReplaceAll(serviceName, ".", "-") + + // Add a prefix to make the bucket name unique + return fmt.Sprintf("backea-%s", sanitized) +} + +// GetProviderType returns the provider type identifier +func (p *KopiaB2Provider) GetProviderType() string { + return "b2" +} + +// KopiaLocalProvider implements the KopiaProvider interface for local storage +type KopiaLocalProvider struct { + BasePath string +} + +// NewKopiaLocalProvider creates a new local provider +func NewKopiaLocalProvider(basePath string) *KopiaLocalProvider { + // If basePath is empty, use a default location + if basePath == "" { + basePath = filepath.Join(os.Getenv("HOME"), ".backea", "repos") + } + return &KopiaLocalProvider{ + BasePath: basePath, + } +} + +// Connect connects to a local repository +func (p *KopiaLocalProvider) Connect(ctx context.Context, serviceName string, password string, configPath string) error { + repoPath := filepath.Join(p.BasePath, serviceName) + log.Printf("Connecting to local repository at %s with config: %s", repoPath, configPath) + + // Ensure the directory exists + if err := os.MkdirAll(repoPath, 0755); err != nil { + return fmt.Errorf("failed to create repository directory: %w", err) + } + + // Try to connect to existing repository with config file + connectCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "connect", "filesystem", + "--path", repoPath, + "--password", password, + ) + err := connectCmd.Run() + if err != nil { + // Connection failed, create new repository + log.Printf("Creating new local repository for %s at destination: %s", serviceName, repoPath) + createCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "create", "filesystem", + "--path", repoPath, + "--password", password, + ) + createOutput, err := createCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create repository: %w\nOutput: %s", err, createOutput) + } + } + + return nil +} + +// GetRepositoryParams returns parameters for local filesystem operations +func (p *KopiaLocalProvider) GetRepositoryParams(serviceName string) ([]string, error) { + repoPath := filepath.Join(p.BasePath, serviceName) + return []string{ + "filesystem", + "--path", repoPath, + }, nil +} + +// GetBucketName returns the path for a service +func (p *KopiaLocalProvider) GetBucketName(serviceName string) string { + return filepath.Join(p.BasePath, serviceName) +} + +// GetProviderType returns the provider type identifier +func (p *KopiaLocalProvider) GetProviderType() string { + return "local" +} + +// KopiaSFTPProvider implements the KopiaProvider interface for SFTP remote storage +type KopiaSFTPProvider struct { + Host string + BasePath string + Username string + KeyFile string +} + +// NewKopiaSFTPProvider creates a new SFTP provider +func NewKopiaSFTPProvider(host, basePath, username, keyFile string) *KopiaSFTPProvider { + return &KopiaSFTPProvider{ + Host: host, + BasePath: basePath, + Username: username, + KeyFile: keyFile, + } +} + +// Connect connects to an SFTP repository +func (p *KopiaSFTPProvider) Connect(ctx context.Context, serviceName string, password string, configPath string) error { + repoPath := fmt.Sprintf("%s@%s:%s/%s", p.Username, p.Host, p.BasePath, serviceName) + + // Try to connect to existing repository with config file + connectCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "connect", "sftp", + "--path", repoPath, + "--keyfile", p.KeyFile, + "--known-hosts", filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts"), + "--password", password, + ) + err := connectCmd.Run() + if err != nil { + // Connection failed, create new repository + log.Printf("Creating new SFTP repository for %s at %s", serviceName, repoPath) + createCmd := exec.CommandContext( + ctx, + "kopia", + "--config-file", configPath, + "repository", "create", "sftp", + "--path", repoPath, + "--keyfile", p.KeyFile, + "--known-hosts", filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts"), + "--password", password, + ) + createOutput, err := createCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create repository: %w\nOutput: %s", err, createOutput) + } + } + + return nil +} + +// GetRepositoryParams returns parameters for SFTP operations +func (p *KopiaSFTPProvider) GetRepositoryParams(serviceName string) ([]string, error) { + repoPath := fmt.Sprintf("%s@%s:%s/%s", p.Username, p.Host, p.BasePath, serviceName) + return []string{ + "sftp", + "--path", repoPath, + "--keyfile", p.KeyFile, + "--known-hosts", filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts"), + }, nil +} + +// GetBucketName returns the path for a service +func (p *KopiaSFTPProvider) GetBucketName(serviceName string) string { + return fmt.Sprintf("%s/%s", p.BasePath, serviceName) +} + +// GetProviderType returns the provider type identifier +func (p *KopiaSFTPProvider) GetProviderType() string { + return "sftp" +} diff --git a/internal/backup/restore.go b/internal/backup/restore.go new file mode 100644 index 0000000..0632f98 --- /dev/null +++ b/internal/backup/restore.go @@ -0,0 +1,179 @@ +package backup + +import ( + client "backea/internal/client" + "bufio" + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// RestoreManager handles restoring files from Kopia backups +type RestoreManager struct { + B2Client *client.Client +} + +// NewRestoreManager creates a new restore manager +func NewRestoreManager(b2Client *client.Client) *RestoreManager { + return &RestoreManager{ + B2Client: b2Client, + } +} + +// getStoredPassword retrieves a password from kopia.env file +func (r *RestoreManager) getStoredPassword(serviceName string) (string, error) { + // Define the expected key in the env file + passwordKey := fmt.Sprintf("KOPIA_%s_PASSWORD", strings.ToUpper(serviceName)) + + // Try to read from kopia.env + kopiaEnvPath := "kopia.env" + if _, err := os.Stat(kopiaEnvPath); err != nil { + return "", fmt.Errorf("kopia.env file not found: %w", err) + } + + // File exists, check if the password is there + file, err := os.Open(kopiaEnvPath) + if err != nil { + return "", fmt.Errorf("failed to open kopia.env: %w", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, passwordKey+"=") { + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 { + // Found the password, remove quotes if present + password := parts[1] + password = strings.Trim(password, "\"") + return password, nil + } + } + } + + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error reading kopia.env: %w", err) + } + + return "", fmt.Errorf("password for service %s not found in kopia.env", serviceName) +} + +// connectToRepository connects to an existing Kopia repository +func (r *RestoreManager) connectToRepository(ctx context.Context, serviceName string) error { + if r.B2Client == nil { + return fmt.Errorf("B2 client not initialized") + } + + // Load environment variables for B2 credentials + keyID := os.Getenv("B2_KEY_ID") + applicationKey := os.Getenv("B2_APPLICATION_KEY") + if keyID == "" || applicationKey == "" { + return fmt.Errorf("B2_KEY_ID and B2_APPLICATION_KEY must be set in .env file") + } + + // Get stored password for this service + password, err := r.getStoredPassword(serviceName) + if err != nil { + return fmt.Errorf("failed to get stored password: %w", err) + } + + // Generate bucket name from service name + bucketName := fmt.Sprintf("backea-%s", strings.ToLower(serviceName)) + + // Check if bucket exists + _, err = r.B2Client.GetBucket(ctx, bucketName) + if err != nil { + return fmt.Errorf("bucket %s not found: %w", bucketName, err) + } + + // Check if kopia repository is already connected + cmd := exec.Command("kopia", "repository", "status") + err = cmd.Run() + if err == nil { + // Already connected + log.Printf("Already connected to a repository, disconnecting first") + disconnectCmd := exec.Command("kopia", "repository", "disconnect") + if err := disconnectCmd.Run(); err != nil { + return fmt.Errorf("failed to disconnect from current repository: %w", err) + } + } + + // Connect to the repository + log.Printf("Connecting to B2 repository for %s", serviceName) + connectCmd := exec.Command( + "kopia", "repository", "connect", "b2", + "--bucket", bucketName, + "--key-id", keyID, + "--key", applicationKey, + "--password", password, + ) + connectOutput, err := connectCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to connect to repository: %w\nOutput: %s", err, connectOutput) + } + + log.Printf("Successfully connected to repository for %s", serviceName) + return nil +} + +// ListSnapshots lists all snapshots in the repository for a given service +func (r *RestoreManager) ListSnapshots(ctx context.Context, serviceName string) error { + // Connect to the repository + if err := r.connectToRepository(ctx, serviceName); err != nil { + return err + } + + // List all snapshots + log.Printf("Listing snapshots for %s", serviceName) + listCmd := exec.Command("kopia", "snapshot", "list") + listOutput, err := listCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to list snapshots: %w\nOutput: %s", err, listOutput) + } + + // Print the output + fmt.Println("Available snapshots:") + fmt.Println(string(listOutput)) + + return nil +} + +// RestoreFile restores a file or directory from a specific snapshot to a target location +func (r *RestoreManager) RestoreFile(ctx context.Context, serviceName, snapshotID, sourcePath, targetPath string) error { + // Connect to the repository + if err := r.connectToRepository(ctx, serviceName); err != nil { + return err + } + + // Create a subdirectory with the snapshot ID name + snapshotDirPath := filepath.Join(targetPath, snapshotID) + if err := os.MkdirAll(snapshotDirPath, 0755); err != nil { + return fmt.Errorf("failed to create target directory: %w", err) + } + + // Construct the kopia restore command + log.Printf("Restoring from snapshot %s to %s", snapshotID, snapshotDirPath) + + // Build the command with all required parameters + restoreCmd := exec.Command( + "kopia", "snapshot", "restore", + snapshotID, // Just the snapshot ID + snapshotDirPath, // Target location where files will be restored + ) + + // Execute the command and capture output + restoreOutput, err := restoreCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to restore files: %w\nOutput: %s", err, restoreOutput) + } + + log.Printf("Successfully restored files to %s", snapshotDirPath) + log.Printf("Restore output: %s", string(restoreOutput)) + + return nil +} diff --git a/internal/backup/service.go b/internal/backup/service.go new file mode 100644 index 0000000..48851fe --- /dev/null +++ b/internal/backup/service.go @@ -0,0 +1,71 @@ +package backup + +import ( + "backea/internal/mail" + "context" + "fmt" + "log" + "os" + "os/exec" + "time" +) + +// Service represents a service to be backed up +type Service struct { + Name string + Directory string + Strategy Strategy + Mailer *mail.Mailer +} + +// NewService creates a new backup service +func NewService(name string, directory string, strategy Strategy, mailer *mail.Mailer) *Service { + return &Service{ + Name: name, + Directory: directory, + Strategy: strategy, + Mailer: mailer, + } +} + +// Backup performs the backup for this service +func (s *Service) Backup(ctx context.Context) error { + log.Printf("Backing up service: %s", s.Name) + startTime := time.Now() + + // Ensure directory exists + if _, err := os.Stat(s.Directory); os.IsNotExist(err) { + return fmt.Errorf("directory does not exist: %s", s.Directory) + } + + // Execute the backup strategy + if err := s.Strategy.Execute(ctx, s.Name, s.Directory); err != nil { + if s.Mailer != nil { + s.Mailer.SendErrorNotification(s.Name, err) + } + return fmt.Errorf("backup failed: %w", err) + } + + // Record backup completion + duration := time.Since(startTime) + log.Printf("Backup completed for %s in %v", s.Name, duration) + + // Send success notification + if s.Mailer != nil { + s.Mailer.SendSuccessNotification(s.Name, duration) + } + + return nil +} + +// RunCommand executes a shell command in the specified directory +// This is now a package function rather than a method on Service +func RunCommand(dir, command string) error { + cmd := exec.Command("sh", "-c", command) + if dir != "" { + cmd.Dir = dir + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/internal/backup/strategy.go b/internal/backup/strategy.go new file mode 100644 index 0000000..dd0d64f --- /dev/null +++ b/internal/backup/strategy.go @@ -0,0 +1,57 @@ +package backup + +import ( + "context" + "io" + "time" +) + +// Strategy defines the backup/restore strategy interface +type Strategy interface { + // Execute performs a backup + Execute(ctx context.Context, serviceName, directory string) error + + ListBackups(ctx context.Context, serviceName string) ([]BackupInfo, error) + + GetStorageUsage(ctx context.Context, serviceName string) (*StorageUsageInfo, error) + + RestoreBackup(ctx context.Context, backupID string, serviceName string) error + + DownloadBackup(ctx context.Context, backupID, serviceName string) (io.ReadCloser, error) + + GetBackupInfo(context.Context, string, string) (*BackupInfo, error) +} + +// BackupInfo contains information about a single backup +type BackupInfo struct { + ID string // Unique identifier for the backup + CreationTime time.Time // When the backup was created + Size int64 // Size in bytes + Source string // Original source path + Type string // Type of backup (e.g., "kopia", "restic") + RetentionTag string // Retention policy applied (e.g., "latest", "daily", "weekly") +} + +// StorageUsageInfo contains information about backup storage usage +type StorageUsageInfo struct { + TotalBytes int64 // Total bytes stored + Provider string // Storage provider (e.g., "local", "b2", "s3") + ProviderID string // Provider-specific ID (bucket name, path, etc.) +} + +// Retention represents retention policy for backups +type Retention struct { + KeepLatest int + KeepHourly int + KeepDaily int + KeepWeekly int + KeepMonthly int + KeepYearly int +} + +// Destination represents rsync destination +type Destination struct { + Host string + Path string + SSHKey string +} diff --git a/internal/client/b2_client.go b/internal/client/b2_client.go new file mode 100644 index 0000000..326deb6 --- /dev/null +++ b/internal/client/b2_client.go @@ -0,0 +1,91 @@ +package b2_client + +import ( + "context" + "fmt" + "os" + + "github.com/joho/godotenv" + "gopkg.in/kothar/go-backblaze.v0" +) + +// Client represents a B2 client +type Client struct { + b2 *backblaze.B2 +} + +// NewClientFromEnv creates a new B2 client using credentials from .env +func NewClientFromEnv() (*Client, error) { + // Load .env file + if err := godotenv.Load(); err != nil { + return nil, fmt.Errorf("error loading .env file: %w", err) + } + + // Get credentials from environment + keyID := os.Getenv("B2_KEY_ID") + applicationKey := os.Getenv("B2_APPLICATION_KEY") + + if keyID == "" || applicationKey == "" { + return nil, fmt.Errorf("B2_KEY_ID and B2_APPLICATION_KEY must be set in .env file") + } + + // Create B2 client + b2, err := backblaze.NewB2(backblaze.Credentials{ + AccountID: keyID, + ApplicationKey: applicationKey, + }) + + if err != nil { + return nil, fmt.Errorf("error creating B2 client: %w", err) + } + + return &Client{ + b2: b2, + }, nil +} + +// ListBuckets lists all buckets +func (c *Client) ListBuckets(ctx context.Context) ([]*backblaze.Bucket, error) { + // List all buckets + buckets, err := c.b2.ListBuckets() + if err != nil { + return nil, fmt.Errorf("error listing buckets: %w", err) + } + + return buckets, nil +} + +// GetBucket gets a bucket by name +func (c *Client) GetBucket(ctx context.Context, name string) (*backblaze.Bucket, error) { + // List all buckets + buckets, err := c.b2.ListBuckets() + if err != nil { + return nil, fmt.Errorf("error listing buckets: %w", err) + } + + // Find bucket by name + for _, bucket := range buckets { + if bucket.Name == name { + return bucket, nil + } + } + + return nil, fmt.Errorf("bucket not found: %s", name) +} + +// CreateBucket creates a new bucket +func (c *Client) CreateBucket(ctx context.Context, name string, public bool) (*backblaze.Bucket, error) { + // Set bucket type + bucketType := backblaze.AllPrivate + if public { + bucketType = backblaze.AllPublic + } + + // Create bucket + bucket, err := c.b2.CreateBucket(name, bucketType) + if err != nil { + return nil, fmt.Errorf("error creating bucket: %w", err) + } + + return bucket, nil +} diff --git a/internal/mail/mail.go b/internal/mail/mail.go new file mode 100644 index 0000000..fa2b3d8 --- /dev/null +++ b/internal/mail/mail.go @@ -0,0 +1,104 @@ +package mail + +import ( + "fmt" + "net/smtp" + "os" + "time" +) + +// Config holds SMTP server configuration +type Config struct { + Host string + Port string + Username string + Password string + From string +} + +// Client handles email operations +type Client struct { + Config Config +} + +// Mailer is an alias for Client to maintain compatibility with backup package +type Mailer struct { + Config Config +} + +// NewClient creates a new mail client from environment variables +func NewClient() *Client { + return &Client{ + Config: Config{ + Host: os.Getenv("SMTP_HOST"), + Port: os.Getenv("SMTP_PORT"), + Username: os.Getenv("SMTP_USERNAME"), + Password: os.Getenv("SMTP_PASSWORD"), + From: os.Getenv("SMTP_FROM"), + }, + } +} + +// NewMailer creates a new mailer from environment variables +func NewMailer() *Mailer { + client := NewClient() + return &Mailer{ + Config: client.Config, + } +} + +// SendMail sends an email notification +func (c *Client) SendMail(to []string, subject, body string) error { + addr := fmt.Sprintf("%s:%s", c.Config.Host, c.Config.Port) + // Compose message + message := []byte(fmt.Sprintf("From: %s\r\n"+ + "To: %s\r\n"+ + "Subject: %s\r\n"+ + "\r\n"+ + "%s\r\n", c.Config.From, to[0], subject, body)) + // Authenticate + auth := smtp.PlainAuth("", c.Config.Username, c.Config.Password, c.Config.Host) + // Send mail + return smtp.SendMail(addr, auth, c.Config.From, to, message) +} + +// SendMail sends an email notification (Mailer version) +func (m *Mailer) SendMail(to []string, subject, body string) error { + addr := fmt.Sprintf("%s:%s", m.Config.Host, m.Config.Port) + // Compose message + message := []byte(fmt.Sprintf("From: %s\r\n"+ + "To: %s\r\n"+ + "Subject: %s\r\n"+ + "\r\n"+ + "%s\r\n", m.Config.From, to[0], subject, body)) + // Authenticate + auth := smtp.PlainAuth("", m.Config.Username, m.Config.Password, m.Config.Host) + // Send mail + return smtp.SendMail(addr, auth, m.Config.From, to, message) +} + +// SendSuccessNotification sends a backup success notification +func (m *Mailer) SendSuccessNotification(serviceName string, duration time.Duration) error { + recipients := []string{os.Getenv("NOTIFICATION_EMAIL")} + if recipients[0] == "" { + recipients[0] = m.Config.From // Fallback to sender if no notification email set + } + + subject := fmt.Sprintf("Backup Successful: %s", serviceName) + body := fmt.Sprintf("The backup for service %s completed successfully in %v.", serviceName, duration) + + return m.SendMail(recipients, subject, body) +} + +// SendErrorNotification sends a backup error notification +func (m *Mailer) SendErrorNotification(serviceName string, err error) error { + recipients := []string{os.Getenv("NOTIFICATION_EMAIL")} + if recipients[0] == "" { + recipients[0] = m.Config.From // Fallback to sender if no notification email set + } + + subject := fmt.Sprintf("Backup Failed: %s", serviceName) + body := fmt.Sprintf("The backup for service %s failed with error: %v", serviceName, err) + + return m.SendMail(recipients, subject, body) +} diff --git a/internal/mail/mail_test.go b/internal/mail/mail_test.go new file mode 100644 index 0000000..89ce707 --- /dev/null +++ b/internal/mail/mail_test.go @@ -0,0 +1,39 @@ +package mail + +import ( + "os" + "testing" + + "github.com/joho/godotenv" +) + +func TestSendMailWithRealEnv(t *testing.T) { + // Load the real .env file + err := godotenv.Load() + if err != nil { + t.Skip("Skipping test: .env file not found") + } + + // Check if SMTP configuration exists + host := os.Getenv("SMTP_HOST") + username := os.Getenv("SMTP_USERNAME") + password := os.Getenv("SMTP_PASSWORD") + + if host == "" || username == "" || password == "" { + t.Skip("Skipping test: SMTP configuration not found in .env") + } + + // Create a real client using environment variables + client := NewClient() + + // Test with a real email address (consider using a test address) + recipients := []string{"romaric.sirii@gmail.com"} + subject := "Test Email from Go Test" + body := "This is a test email sent from the mail package test" + + // Send the email + err = client.SendMail(recipients, subject, body) + if err != nil { + t.Errorf("SendMail failed with error: %v", err) + } +} diff --git a/internal/server/server.go b/internal/server/server.go new file mode 100644 index 0000000..246749b --- /dev/null +++ b/internal/server/server.go @@ -0,0 +1,26 @@ +package server + +import ( + "backea/internal/backup" + "backea/internal/web/routes" + + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" +) + +// New creates a new configured Echo server +func New(factory *backup.BackupFactory) *echo.Echo { + e := echo.New() + + // Add middleware + e.Use(middleware.Logger()) + e.Use(middleware.Recover()) + + // Setup static file serving + e.Static("/static", "static") + + // Register routes + routes.RegisterRoutes(e, factory) + + return e +} diff --git a/internal/web/handlers/backup_actions_handler.go b/internal/web/handlers/backup_actions_handler.go new file mode 100644 index 0000000..6480aea --- /dev/null +++ b/internal/web/handlers/backup_actions_handler.go @@ -0,0 +1,377 @@ +package handlers + +import ( + "backea/internal/backup" + "context" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/labstack/echo/v4" +) + +// BackupActionsHandler handles backup-related actions like restore and download +type BackupActionsHandler struct { + backupFactory *backup.BackupFactory +} + +// NewBackupActionsHandler creates a new backup actions handler +func NewBackupActionsHandler(factory *backup.BackupFactory) *BackupActionsHandler { + return &BackupActionsHandler{ + backupFactory: factory, + } +} + +// RestoreRequest represents the request data for a backup restore +type RestoreRequest struct { + BackupID string `json:"backupID" form:"backupID"` + GroupName string `json:"groupName" form:"groupName"` + ServiceIndex string `json:"serviceIndex" form:"serviceIndex"` + DestinationPath string `json:"destinationPath" form:"destinationPath"` +} + +// RestoreBackup handles the request to restore a backup with destination path support +func (h *BackupActionsHandler) RestoreBackup(c echo.Context) error { + backupID := c.Param("backupID") + if backupID == "" { + return echo.NewHTTPError(http.StatusBadRequest, "backupID is required") + } + + // Try to parse the group and service from query parameters + groupName := c.QueryParam("groupName") + serviceIndex := c.QueryParam("serviceIndex") + destinationPath := c.QueryParam("destinationPath") + + // If not in query parameters, check form data + if groupName == "" || serviceIndex == "" { + var req RestoreRequest + if err := c.Bind(&req); err == nil { + groupName = req.GroupName + serviceIndex = req.ServiceIndex + destinationPath = req.DestinationPath + } + } + + if groupName == "" || serviceIndex == "" { + return echo.NewHTTPError(http.StatusBadRequest, + "groupName and serviceIndex are required either as query parameters or in the request body") + } + + // Construct full service name + serviceName := fmt.Sprintf("%s.%s", groupName, serviceIndex) + + log.Printf("Restoring backup ID=%s to service %s", backupID, serviceName) + if destinationPath != "" { + log.Printf("Restoring to custom destination: %s", destinationPath) + } + + // Get the appropriate strategy for this service + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, serviceIndex) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, + fmt.Sprintf("Failed to get backup strategy: %v", err)) + } + + // If destination path is provided, use our custom restore function + if destinationPath != "" { + err = h.restoreBackupToCustomDestination(c.Request().Context(), strategy, backupID, serviceName, destinationPath) + } else { + // Otherwise use the regular restore + err = strategy.RestoreBackup(c.Request().Context(), backupID, serviceName) + } + + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, + fmt.Sprintf("Failed to restore backup: %v", err)) + } + + return c.JSON(http.StatusOK, map[string]string{ + "status": "success", + "message": fmt.Sprintf("Backup %s restored successfully", backupID), + }) +} + +// RestoreBackupForm shows the restore form with directory input +func (h *BackupActionsHandler) RestoreBackupForm(c echo.Context) error { + backupID := c.QueryParam("backupID") + groupName := c.QueryParam("groupName") + serviceIndex := c.QueryParam("serviceIndex") + + if backupID == "" || groupName == "" || serviceIndex == "" { + return echo.NewHTTPError(http.StatusBadRequest, "backupID, groupName, and serviceIndex are required") + } + + // Build service name for lookup + serviceName := fmt.Sprintf("%s.%s", groupName, serviceIndex) + + // Get backup info if available + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, serviceIndex) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, + fmt.Sprintf("Failed to get backup strategy: %v", err)) + } + + // Try to get backup info + backupInfo, err := strategy.GetBackupInfo(c.Request().Context(), backupID, serviceName) + var backupTimeStr string + var backupSizeStr string + var defaultPath string + + if err == nil && backupInfo != nil { + backupTimeStr = backupInfo.CreationTime.Format("Jan 02, 2006 15:04:05") + backupSizeStr = formatSize(backupInfo.Size) + defaultPath = backupInfo.Source // Use original source as default path + } else { + backupTimeStr = "Unknown" + backupSizeStr = "Unknown" + defaultPath = "" + } + + // TODO : create a real template + return c.HTML(http.StatusOK, fmt.Sprintf(` +
+

Restore Backup

+
+

Backup Date: %s

+

Size: %s

+
+ +
+ + + + +
+ + +
+ +
+ + +
+
+
+ `, backupTimeStr, backupSizeStr, backupID, backupID, groupName, serviceIndex, defaultPath)) +} + +// restoreBackupToCustomDestination handles restoring a backup to a custom directory +func (h *BackupActionsHandler) restoreBackupToCustomDestination(ctx context.Context, strategy backup.Strategy, backupID, serviceName, destinationPath string) error { + // Validate destination path + destinationPath = filepath.Clean(destinationPath) + + // Check if destination exists + stat, err := os.Stat(destinationPath) + if err != nil { + if os.IsNotExist(err) { + // Create directory if it doesn't exist + if err := os.MkdirAll(destinationPath, 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + } else { + return fmt.Errorf("failed to check destination directory: %w", err) + } + } else if !stat.IsDir() { + return fmt.Errorf("destination path is not a directory") + } + + // Create a temporary directory to store the downloaded backup + tempDir, err := os.MkdirTemp("", "backea-restore-download-*") + if err != nil { + return fmt.Errorf("failed to create temporary download directory: %w", err) + } + defer os.RemoveAll(tempDir) + + // Create a temporary directory for extraction + extractDir, err := os.MkdirTemp("", "backea-restore-extract-*") + if err != nil { + return fmt.Errorf("failed to create temporary extraction directory: %w", err) + } + defer os.RemoveAll(extractDir) + + // Download the backup to a temporary file + log.Printf("Downloading backup %s to temporary location for service %s", backupID, serviceName) + reader, err := strategy.DownloadBackup(ctx, backupID, serviceName) + if err != nil { + return fmt.Errorf("failed to download backup: %w", err) + } + defer reader.Close() + + // Create a temporary file to store the zip + zipFile, err := os.CreateTemp(tempDir, "backup-*.zip") + if err != nil { + return fmt.Errorf("failed to create temporary file: %w", err) + } + zipPath := zipFile.Name() + defer os.Remove(zipPath) + + // Copy the zip content to the temp file + log.Printf("Writing backup data to temporary file: %s", zipPath) + _, err = io.Copy(zipFile, reader) + if err != nil { + zipFile.Close() + return fmt.Errorf("failed to write backup to disk: %w", err) + } + + // Flush to disk and close the file + zipFile.Sync() + zipFile.Close() + + // Extract the zip to the extraction directory + log.Printf("Extracting backup to %s", extractDir) + if err := extractZip(zipPath, extractDir); err != nil { + return fmt.Errorf("failed to extract backup: %w", err) + } + + // Sync extracted files to the destination using rsync + log.Printf("Syncing files from %s to %s", extractDir, destinationPath) + if err := syncDirectories(extractDir, destinationPath); err != nil { + return fmt.Errorf("failed to sync files to destination: %w", err) + } + + log.Printf("Backup successfully restored to %s", destinationPath) + return nil +} + +// extractZip extracts a zip file to a destination directory +func extractZip(zipFile, destDir string) error { + // Use unzip command for extraction + cmd := exec.Command("unzip", "-o", "-q", zipFile, "-d", destDir) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to extract zip: %w, output: %s", err, string(output)) + } + return nil +} + +// syncDirectories syncs files from source to destination using rsync +func syncDirectories(src, dest string) error { + // Ensure source path ends with a slash to copy contents, not the directory itself + if !strings.HasSuffix(src, "/") { + src = src + "/" + } + + cmd := exec.Command("rsync", "-av", "--delete", src, dest) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to sync directories: %w, output: %s", err, string(output)) + } + return nil +} + +// Helper function to format file size +func formatSize(size int64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} + +// DownloadBackup handles the backup download request +func (h *BackupActionsHandler) DownloadBackup(c echo.Context) error { + // Get query parameters + backupID := c.QueryParam("backupID") + groupName := c.QueryParam("groupName") + serviceIndex := c.QueryParam("serviceIndex") + + // Validate required parameters + if backupID == "" || groupName == "" || serviceIndex == "" { + return echo.NewHTTPError(http.StatusBadRequest, "backupID, groupName, and serviceIndex are required") + } + + // Create service name for logging + serviceName := fmt.Sprintf("%s.%s", groupName, serviceIndex) + log.Printf("Downloading backup: ID=%s, Service=%s", backupID, serviceName) + + // Get the appropriate strategy for this service + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, serviceIndex) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, + fmt.Sprintf("Failed to get backup strategy: %v", err)) + } + + // Get all backups for this service + backups, err := strategy.ListBackups(c.Request().Context(), serviceName) + if err != nil { + log.Printf("Warning: Failed to list backups: %v. Will try with provided ID directly.", err) + } else { + // Look for a matching backup with the proper snapshot ID + found := false + for _, backup := range backups { + // If we find an exact match, or if the backup ID contains our ID (partial match) + if backup.ID == backupID { + found = true + break + } + } + + if !found { + log.Printf("Warning: Backup ID %s not found in service backups. Will try direct download anyway.", backupID) + } + } + + // Attempt to download the backup using the provided ID + backupReader, err := strategy.DownloadBackup(c.Request().Context(), backupID, serviceName) + if err != nil { + return echo.NewHTTPError(http.StatusInternalServerError, + fmt.Sprintf("Failed to download backup: %v", err)) + } + defer backupReader.Close() + + // Set filename for download + filename := fmt.Sprintf("%s_%s_%s.zip", + groupName, + serviceIndex, + time.Now().Format("20060102_150405"), + ) + + // Set response headers for zip download + c.Response().Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename)) + c.Response().Header().Set("Content-Type", "application/zip") + + // Stream the file to the client + log.Printf("Streaming backup as %s", filename) + _, err = io.Copy(c.Response().Writer, backupReader) + if err != nil { + log.Printf("Error streaming backup: %v", err) + return err + } + + log.Printf("Backup download for %s completed successfully", backupID) + return nil +} diff --git a/internal/web/handlers/homepage_handler.go b/internal/web/handlers/homepage_handler.go new file mode 100644 index 0000000..09f2d04 --- /dev/null +++ b/internal/web/handlers/homepage_handler.go @@ -0,0 +1,317 @@ +package handlers + +import ( + "backea/internal/backup" + "backea/templates" + "context" + "fmt" + "log" + "net/http" + "os" + "sort" + "sync" + "time" + + "github.com/labstack/echo/v4" +) + +type HomepageHandler struct { + backupFactory *backup.BackupFactory +} + +func NewHomepageHandler(factory *backup.BackupFactory) *HomepageHandler { + return &HomepageHandler{ + backupFactory: factory, + } +} + +// Home handles the homepage request and displays latest backups by service +func (h *HomepageHandler) Home(c echo.Context) error { + // Create the data structures + serviceBackups := make(map[string]map[string][]backup.BackupInfo) + serviceConfigs := make(map[string]map[string]templates.ServiceProviderInfo) + groupDirectories := make(map[string]string) // Store directories by group name + + // Process each service group + for groupName, serviceGroup := range h.backupFactory.Config.Services { + // Initialize maps for this group + serviceBackups[groupName] = make(map[string][]backup.BackupInfo) + serviceConfigs[groupName] = make(map[string]templates.ServiceProviderInfo) + + // Store the directory at the group level in a separate map + groupDirectories[groupName] = serviceGroup.Source.Path + + // Process each backup config in the group + for configIndex, backupConfig := range serviceGroup.BackupConfigs { + // Store service configuration + serviceConfigs[groupName][configIndex] = templates.ServiceProviderInfo{ + Type: backupConfig.BackupStrategy.Type, + Provider: backupConfig.BackupStrategy.Provider, + Directory: serviceGroup.Source.Path, + } + } + } + + // Get sorted group names for alphabetical ordering + var sortedGroupNames []string + for groupName := range serviceBackups { + sortedGroupNames = append(sortedGroupNames, groupName) + } + sort.Strings(sortedGroupNames) + + // Render the template with sorted group names and directories + // We don't load actual backup data on initial page load, it will be loaded via HTMX + component := templates.Home(serviceBackups, serviceConfigs, sortedGroupNames, groupDirectories) + return component.Render(c.Request().Context(), c.Response().Writer) +} + +// ServiceGroupHeader returns the header section for a specific service group (HTMX endpoint) +func (h *HomepageHandler) ServiceGroupHeader(c echo.Context) error { + groupName := c.Param("groupName") + + // Check if the service group exists + serviceGroup, exists := h.backupFactory.Config.Services[groupName] + if !exists { + return echo.NewHTTPError(404, "Service group not found") + } + + // Create data structures + serviceBackups := make(map[string][]backup.BackupInfo) + serviceConfigs := make(map[string]templates.ServiceProviderInfo) + + // Setup synchronization + var wg sync.WaitGroup + var mu sync.Mutex + + // Process each backup config in the group + for configIndex, backupConfig := range serviceGroup.BackupConfigs { + // Store service configuration + serviceConfigs[configIndex] = templates.ServiceProviderInfo{ + Type: backupConfig.BackupStrategy.Type, + Provider: backupConfig.BackupStrategy.Provider, + Directory: serviceGroup.Source.Path, + } + + // Fetch backups in parallel + wg.Add(1) + go func(index string) { + defer wg.Done() + + // Get backup strategy + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, index) + if err != nil { + log.Printf("Error creating strategy for %s.%s: %v", groupName, index, err) + return + } + + // Get backups + backups, err := strategy.ListBackups(context.Background(), groupName+"."+index) + if err != nil { + log.Printf("Error listing backups for %s.%s: %v", groupName, index, err) + return + } + + // Sort backups by time (newest first) + sort.Slice(backups, func(i, j int) bool { + return backups[i].CreationTime.After(backups[j].CreationTime) + }) + + // Store result + mu.Lock() + serviceBackups[index] = backups + mu.Unlock() + }(configIndex) + } + + // Wait for all goroutines to finish + wg.Wait() + + // Render just the header component + component := templates.GroupHeaderComponent( + groupName, + serviceBackups, + serviceConfigs, + serviceGroup.Source.Path, + ) + return component.Render(c.Request().Context(), c.Response().Writer) +} + +// ServiceGroupBackups returns just the backups table for a specific service group (HTMX endpoint) +func (h *HomepageHandler) ServiceGroupBackups(c echo.Context) error { + groupName := c.Param("groupName") + + // Check if the service group exists + serviceGroup, exists := h.backupFactory.Config.Services[groupName] + if !exists { + return echo.NewHTTPError(404, "Service group not found") + } + + // Create data structures + serviceBackups := make(map[string][]backup.BackupInfo) + serviceConfigs := make(map[string]templates.ServiceProviderInfo) + + // Setup synchronization + var wg sync.WaitGroup + var mu sync.Mutex + + // Process each backup config in the group + for configIndex, backupConfig := range serviceGroup.BackupConfigs { + // Store service configuration + serviceConfigs[configIndex] = templates.ServiceProviderInfo{ + Type: backupConfig.BackupStrategy.Type, + Provider: backupConfig.BackupStrategy.Provider, + Directory: serviceGroup.Source.Path, + } + + // Fetch backups in parallel + wg.Add(1) + go func(index string) { + defer wg.Done() + + // Get backup strategy + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, index) + if err != nil { + log.Printf("Error creating strategy for %s.%s: %v", groupName, index, err) + return + } + + // Get backups + backups, err := strategy.ListBackups(context.Background(), groupName+"."+index) + if err != nil { + log.Printf("Error listing backups for %s.%s: %v", groupName, index, err) + return + } + + // Sort backups by time (newest first) + sort.Slice(backups, func(i, j int) bool { + return backups[i].CreationTime.After(backups[j].CreationTime) + }) + + // Store result + mu.Lock() + serviceBackups[index] = backups + mu.Unlock() + }(configIndex) + } + + // Wait for all goroutines to finish + wg.Wait() + + // Create a map with just the group for the template + groupServiceBackups := make(map[string]map[string][]backup.BackupInfo) + groupServiceBackups[groupName] = serviceBackups + + // Create a map with just the group configs for the template + groupServiceConfigs := make(map[string]map[string]templates.ServiceProviderInfo) + groupServiceConfigs[groupName] = serviceConfigs + + // Render only the backups table component + component := templates.ServiceGroupBackupsTable(groupName, serviceBackups, groupServiceConfigs) + return component.Render(c.Request().Context(), c.Response().Writer) +} + +// ServiceGroupAllBackups returns all backups for a specific service group (HTMX endpoint) +func (h *HomepageHandler) ServiceGroupAllBackups(c echo.Context) error { + groupName := c.Param("groupName") + + // Check if the service group exists + serviceGroup, exists := h.backupFactory.Config.Services[groupName] + if !exists { + return echo.NewHTTPError(404, "Service group not found") + } + + // Create data structures + serviceBackups := make(map[string][]backup.BackupInfo) + serviceConfigs := make(map[string]templates.ServiceProviderInfo) + + // Setup synchronization + var wg sync.WaitGroup + var mu sync.Mutex + + // Process each backup config in the group + for configIndex, backupConfig := range serviceGroup.BackupConfigs { + // Store service configuration + serviceConfigs[configIndex] = templates.ServiceProviderInfo{ + Type: backupConfig.BackupStrategy.Type, + Provider: backupConfig.BackupStrategy.Provider, + Directory: serviceGroup.Source.Path, + } + + // Fetch backups in parallel + wg.Add(1) + go func(index string) { + defer wg.Done() + + // Get backup strategy + strategy, err := h.backupFactory.CreateBackupStrategyForService(groupName, index) + if err != nil { + log.Printf("Error creating strategy for %s.%s: %v", groupName, index, err) + return + } + + // Get backups + backups, err := strategy.ListBackups(context.Background(), groupName+"."+index) + if err != nil { + log.Printf("Error listing backups for %s.%s: %v", groupName, index, err) + return + } + + // Sort backups by time (newest first) + sort.Slice(backups, func(i, j int) bool { + return backups[i].CreationTime.After(backups[j].CreationTime) + }) + + // Store result + mu.Lock() + serviceBackups[index] = backups + mu.Unlock() + }(configIndex) + } + + // Wait for all goroutines to finish + wg.Wait() + + // Create a map with just the group for the template + groupServiceBackups := make(map[string]map[string][]backup.BackupInfo) + groupServiceBackups[groupName] = serviceBackups + + // Create a map with just the group configs for the template + groupServiceConfigs := make(map[string]map[string]templates.ServiceProviderInfo) + groupServiceConfigs[groupName] = serviceConfigs + + // Also trigger a header update to refresh stats + go func() { + // Create a new context since the original one might be cancelled + ctx := context.Background() + + // Sleep briefly to ensure the table loads first + time.Sleep(100 * time.Millisecond) + + // Make an HTTP request to refresh the header + url := fmt.Sprintf("http://localhost:%s/api/service-group/%s/header", + os.Getenv("PORT"), + groupName, + ) + + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + log.Printf("Error creating request to refresh header: %v", err) + return + } + + // Set HTMX headers to target the correct element + req.Header.Set("HX-Request", "true") + req.Header.Set("HX-Target", fmt.Sprintf("group-header-%s", groupName)) + + // Make the request + client := &http.Client{} + _, err = client.Do(req) + if err != nil { + log.Printf("Error refreshing header: %v", err) + } + }() + + // Render the all backups table component + component := templates.ServiceGroupAllBackupsTable(groupName, serviceBackups, groupServiceConfigs) + return component.Render(c.Request().Context(), c.Response().Writer) +} diff --git a/internal/web/routes/routes.go b/internal/web/routes/routes.go new file mode 100644 index 0000000..3ec3326 --- /dev/null +++ b/internal/web/routes/routes.go @@ -0,0 +1,34 @@ +package routes + +import ( + "backea/internal/backup" + "backea/internal/web/handlers" + "fmt" + + "github.com/labstack/echo/v4" +) + +// RegisterRoutes sets up all the routes for the web application +func RegisterRoutes(e *echo.Echo, factory *backup.BackupFactory) { + // Create handlers with backup factory + homeHandler := handlers.NewHomepageHandler(factory) + actionsHandler := handlers.NewBackupActionsHandler(factory) + + // Register main routes + e.GET("/", homeHandler.Home) + + // Register HTMX API endpoints for lazy loading + e.GET("/api/service-group/:groupName/header", homeHandler.ServiceGroupHeader) + e.GET("/api/service-group/:groupName/backups", homeHandler.ServiceGroupBackups) + e.GET("/api/service-group/:groupName/all-backups", homeHandler.ServiceGroupAllBackups) + + // Register backup action routes - using named parameter for backupID + e.POST("/api/backups/:backupID/restore", actionsHandler.RestoreBackup) + e.GET("/api/backups/restore-form", actionsHandler.RestoreBackupForm) + e.GET("/api/backups/download", actionsHandler.DownloadBackup) + + // Add this after routes registration but before server start to see all routes + for _, route := range e.Routes() { + fmt.Printf("Method: %s, Path: %s, Handler: %s\n", route.Method, route.Path, route.Name) + } +} diff --git a/templates/home.templ b/templates/home.templ new file mode 100644 index 0000000..a5bd9e0 --- /dev/null +++ b/templates/home.templ @@ -0,0 +1,505 @@ +package templates +import ( + "backea/internal/backup" + "backea/templates/layouts" + "fmt" + "sort" + "time" +) + +// FormatSize formats byte size to human-readable format +func FormatSize(size int64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} + +// FormatTime formats time to a readable format +func FormatTime(t time.Time) string { + return t.Format("Jan 02, 2006 15:04:05") +} + +// FormatTimeSince formats the duration since a time in a human-readable way +func FormatTimeSince(t time.Time) string { + timeSince := time.Since(t) + hours := int(timeSince.Hours()) + + if hours < 1 { + return fmt.Sprintf("%d minutes ago", int(timeSince.Minutes())) + } + return fmt.Sprintf("%d hours ago", hours) +} + +// GetStatusClass returns the appropriate status class based on backup age +func GetStatusClass(t time.Time) string { + timeSince := time.Since(t) + hours := int(timeSince.Hours()) + + if hours > 72 { + return "Failed" + } else if hours > 24 { + return "Warning" + } + return "Healthy" +} + +// FormatServiceName returns a display-friendly service name +func FormatServiceName(groupName, serviceIndex string) string { + if serviceIndex == "" { + return groupName + } + return fmt.Sprintf("%s - %s", groupName, serviceIndex) +} + +// CalculateTotalSize calculates total size of all backups +func CalculateTotalSize(backups []backup.BackupInfo) int64 { + var total int64 + for _, b := range backups { + total += b.Size + } + return total +} + +// CalculateGroupTotalSize calculates total size of all backups for a service group +func CalculateGroupTotalSize(serviceGroup map[string][]backup.BackupInfo) int64 { + var total int64 + for _, backups := range serviceGroup { + for _, b := range backups { + total += b.Size + } + } + return total +} + +// GetGroupTotalBackupCount returns the total number of backups across all services in a group +func GetGroupTotalBackupCount(serviceGroup map[string][]backup.BackupInfo) int { + count := 0 + for _, backups := range serviceGroup { + count += len(backups) + } + return count +} + +// GetLatestBackupTime returns the most recent backup time for a service group +func GetLatestBackupTime(serviceGroup map[string][]backup.BackupInfo) (time.Time, bool) { + var latestTime time.Time + found := false + + for _, backups := range serviceGroup { + if len(backups) > 0 && (latestTime.IsZero() || backups[0].CreationTime.After(latestTime)) { + latestTime = backups[0].CreationTime + found = true + } + } + + return latestTime, found +} + +// GetGroupStatus returns the status of a service group based on the most recent backup +func GetGroupStatus(serviceGroup map[string][]backup.BackupInfo) string { + latestTime, found := GetLatestBackupTime(serviceGroup) + if !found { + return "No Backups" + } + return GetStatusClass(latestTime) +} + +// ServiceProviderInfo holds the backup strategy info for a service +type ServiceProviderInfo struct { + Type string + Provider string + Directory string +} + +// BackupWithService represents a backup with its service identifier +type BackupWithService struct { + ServiceIndex string + Backup backup.BackupInfo +} + +// GetSortedBackups collects all backups from a service group and sorts them by time +func GetSortedBackups(serviceGroup map[string][]backup.BackupInfo) []BackupWithService { + var allBackups []BackupWithService + + // Collect all backups with their service indices + for serviceIndex, backups := range serviceGroup { + for _, b := range backups { + allBackups = append(allBackups, BackupWithService{ + ServiceIndex: serviceIndex, + Backup: b, + }) + } + } + + // Sort by creation time (newest first) + sort.Slice(allBackups, func(i, j int) bool { + return allBackups[i].Backup.CreationTime.After(allBackups[j].Backup.CreationTime) + }) + + return allBackups +} + +// Home renders the homepage with lazy-loaded backup information +templ Home(serviceBackups map[string]map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo, sortedGroupNames []string, groupDirectories map[string]string) { + @layouts.Base("Backea - Backup Dashboard") { +
+
+

Welcome to Backea

+

Unified guardians.

+
+ +

Latest Backups by Service

+ + + if len(serviceBackups) == 0 { +
+

No backup services configured or no backups found.

+
+ } else { +
+ + for _, groupName := range sortedGroupNames { +
+ +
+
+
+

+ + if directory, exists := groupDirectories[groupName]; exists && directory != "" { + { directory } + } else { + Unknown Directory + } + { groupName } +

+
+ + if len(serviceBackups[groupName]) > 0 { + for serviceIndex := range serviceBackups[groupName] { + if providerInfo, exists := serviceConfigs[groupName][serviceIndex]; exists { + { providerInfo.Type } + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + B2 Backblaze + } else if providerInfo.Provider == "ftp" { + FTP + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + SSH + } else if providerInfo.Provider == "s3" { + S3 + } else { + { providerInfo.Provider } + } + } + } + } else { + Unknown + } +
+
+ + +
+
+

Total Size

+

--

+
+
+

Backups

+

--

+
+
+

Last Backup

+

--

+
+
+

Status

+

--

+
+
+
+
+ + +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ } +
+ } +
+ } +} + +// GroupHeaderComponent renders just the group header with up-to-date stats +templ GroupHeaderComponent(groupName string, serviceBackups map[string][]backup.BackupInfo, serviceConfigs map[string]ServiceProviderInfo, directory string) { +
+
+

+ if directory != "" { + { directory } + } else { + Unknown Directory + } + { groupName } +

+
+ + if len(serviceBackups) > 0 { + for serviceIndex := range serviceBackups { + if providerInfo, exists := serviceConfigs[serviceIndex]; exists { + { providerInfo.Type } + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + B2 Backblaze + } else if providerInfo.Provider == "ftp" { + FTP + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + SSH + } else if providerInfo.Provider == "s3" { + S3 + } else { + { providerInfo.Provider } + } + } + } + } else { + Unknown + } +
+
+ + +
+
+

Total Size

+

{ FormatSize(CalculateGroupTotalSize(serviceBackups)) }

+
+
+

Backups

+

{ fmt.Sprintf("%d", GetGroupTotalBackupCount(serviceBackups)) }

+
+
+

Last Backup

+ if latestTime, found := GetLatestBackupTime(serviceBackups); found { +

{ FormatTimeSince(latestTime) }

+ } else { +

Never

+ } +
+
+

Status

+ if status := GetGroupStatus(serviceBackups); status == "No Backups" { +

No Backups

+ } else if status == "Failed" { +

Failed

+ } else if status == "Warning" { +

Warning

+ } else { +

Healthy

+ } +
+
+
+} + +// Updated table templates with action column +templ ServiceGroupBackupsTable(groupName string, serviceGroup map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo) { +
+ if GetGroupTotalBackupCount(serviceGroup) > 0 { +
+ + + + + + + + + + + + + + + @renderSortedBackups(groupName, GetSortedBackups(serviceGroup), 5, serviceConfigs) + +
ServiceDateSizeTypeRetentionLocationActions
+ + + if GetGroupTotalBackupCount(serviceGroup) > 5 { +
+ +
+ } +
+ } else { +
+

No backups found for this service.

+

Backups will appear here when created.

+
+ } +
+} + +// ServiceGroupAllBackupsTable template for showing all backups +templ ServiceGroupAllBackupsTable(groupName string, serviceGroup map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo) { + + if GetGroupTotalBackupCount(serviceGroup) > 0 { +
+ + + + + + + + + + + + + + + @renderSortedBackups(groupName, GetSortedBackups(serviceGroup), 9999, serviceConfigs) + +
ServiceDateSizeTypeRetentionLocationActions
+ + +
+ +
+
+ } else { +
+

No backups found for this service.

+

Backups will appear here when created.

+
+ } + + +
+} + +// renderSortedBackups with action buttons +templ renderSortedBackups(groupName string, sortedBackups []BackupWithService, limit int, serviceConfigs map[string]map[string]ServiceProviderInfo) { + for i := 0; i < len(sortedBackups) && i < limit; i++ { + + { FormatServiceName(groupName, sortedBackups[i].ServiceIndex) } + { FormatTime(sortedBackups[i].Backup.CreationTime) } + { FormatSize(sortedBackups[i].Backup.Size) } + { sortedBackups[i].Backup.Type } + { sortedBackups[i].Backup.RetentionTag } + + if providerInfo, exists := serviceConfigs[groupName][sortedBackups[i].ServiceIndex]; exists { + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + { providerInfo.Type } B2 Backblaze + } else if providerInfo.Provider == "ftp" { + { providerInfo.Type } FTP + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + { providerInfo.Type } SSH + } else if providerInfo.Provider == "s3" { + { providerInfo.Type } S3 + } else { + { providerInfo.Type } { providerInfo.Provider } + } + } else { + Unknown + } + + + + + Download + + + + } +} + + +// backupsTableRowsSorted renders backup rows sorted by creation time across all services +templ backupsTableRowsSorted(groupName string, serviceGroup map[string][]backup.BackupInfo, limit int, serviceConfigs map[string]map[string]ServiceProviderInfo) { + @renderSortedBackups(groupName, GetSortedBackups(serviceGroup), limit, serviceConfigs) +} + diff --git a/templates/home_templ.go b/templates/home_templ.go new file mode 100644 index 0000000..68be3bb --- /dev/null +++ b/templates/home_templ.go @@ -0,0 +1,1231 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package templates + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "backea/internal/backup" + "backea/templates/layouts" + "fmt" + "sort" + "time" +) + +// FormatSize formats byte size to human-readable format +func FormatSize(size int64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} + +// FormatTime formats time to a readable format +func FormatTime(t time.Time) string { + return t.Format("Jan 02, 2006 15:04:05") +} + +// FormatTimeSince formats the duration since a time in a human-readable way +func FormatTimeSince(t time.Time) string { + timeSince := time.Since(t) + hours := int(timeSince.Hours()) + + if hours < 1 { + return fmt.Sprintf("%d minutes ago", int(timeSince.Minutes())) + } + return fmt.Sprintf("%d hours ago", hours) +} + +// GetStatusClass returns the appropriate status class based on backup age +func GetStatusClass(t time.Time) string { + timeSince := time.Since(t) + hours := int(timeSince.Hours()) + + if hours > 72 { + return "Failed" + } else if hours > 24 { + return "Warning" + } + return "Healthy" +} + +// FormatServiceName returns a display-friendly service name +func FormatServiceName(groupName, serviceIndex string) string { + if serviceIndex == "" { + return groupName + } + return fmt.Sprintf("%s - %s", groupName, serviceIndex) +} + +// CalculateTotalSize calculates total size of all backups +func CalculateTotalSize(backups []backup.BackupInfo) int64 { + var total int64 + for _, b := range backups { + total += b.Size + } + return total +} + +// CalculateGroupTotalSize calculates total size of all backups for a service group +func CalculateGroupTotalSize(serviceGroup map[string][]backup.BackupInfo) int64 { + var total int64 + for _, backups := range serviceGroup { + for _, b := range backups { + total += b.Size + } + } + return total +} + +// GetGroupTotalBackupCount returns the total number of backups across all services in a group +func GetGroupTotalBackupCount(serviceGroup map[string][]backup.BackupInfo) int { + count := 0 + for _, backups := range serviceGroup { + count += len(backups) + } + return count +} + +// GetLatestBackupTime returns the most recent backup time for a service group +func GetLatestBackupTime(serviceGroup map[string][]backup.BackupInfo) (time.Time, bool) { + var latestTime time.Time + found := false + + for _, backups := range serviceGroup { + if len(backups) > 0 && (latestTime.IsZero() || backups[0].CreationTime.After(latestTime)) { + latestTime = backups[0].CreationTime + found = true + } + } + + return latestTime, found +} + +// GetGroupStatus returns the status of a service group based on the most recent backup +func GetGroupStatus(serviceGroup map[string][]backup.BackupInfo) string { + latestTime, found := GetLatestBackupTime(serviceGroup) + if !found { + return "No Backups" + } + return GetStatusClass(latestTime) +} + +// ServiceProviderInfo holds the backup strategy info for a service +type ServiceProviderInfo struct { + Type string + Provider string + Directory string +} + +// BackupWithService represents a backup with its service identifier +type BackupWithService struct { + ServiceIndex string + Backup backup.BackupInfo +} + +// GetSortedBackups collects all backups from a service group and sorts them by time +func GetSortedBackups(serviceGroup map[string][]backup.BackupInfo) []BackupWithService { + var allBackups []BackupWithService + + // Collect all backups with their service indices + for serviceIndex, backups := range serviceGroup { + for _, b := range backups { + allBackups = append(allBackups, BackupWithService{ + ServiceIndex: serviceIndex, + Backup: b, + }) + } + } + + // Sort by creation time (newest first) + sort.Slice(allBackups, func(i, j int) bool { + return allBackups[i].Backup.CreationTime.After(allBackups[j].Backup.CreationTime) + }) + + return allBackups +} + +// Home renders the homepage with lazy-loaded backup information +func Home(serviceBackups map[string]map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo, sortedGroupNames []string, groupDirectories map[string]string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Var2 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Welcome to Backea

Unified guardians.

Latest Backups by Service

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(serviceBackups) == 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

No backup services configured or no backups found.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, groupName := range sortedGroupNames { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if directory, exists := groupDirectories[groupName]; exists && directory != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(directory) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 180, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Unknown Directory ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(groupName) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 184, Col: 83} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(serviceBackups[groupName]) > 0 { + for serviceIndex := range serviceBackups[groupName] { + if providerInfo, exists := serviceConfigs[groupName][serviceIndex]; exists { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 191, Col: 118} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "B2 Backblaze") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "FTP") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "SSH") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "s3" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "S3") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Provider) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 201, Col: 127} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "Unknown") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "

Total Size

--

Backups

--

Last Backup

--

Status

--

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) + templ_7745c5c3_Err = layouts.Base("Backea - Backup Dashboard").Render(templ.WithChildren(ctx, templ_7745c5c3_Var2), templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// GroupHeaderComponent renders just the group header with up-to-date stats +func GroupHeaderComponent(groupName string, serviceBackups map[string][]backup.BackupInfo, serviceConfigs map[string]ServiceProviderInfo, directory string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var14 := templ.GetChildren(ctx) + if templ_7745c5c3_Var14 == nil { + templ_7745c5c3_Var14 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if directory != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var16 string + templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(directory) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 284, Col: 60} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "Unknown Directory ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(groupName) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 288, Col: 55} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(serviceBackups) > 0 { + for serviceIndex := range serviceBackups { + if providerInfo, exists := serviceConfigs[serviceIndex]; exists { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 295, Col: 90} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "B2 Backblaze") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "FTP") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "SSH") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "s3" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "S3") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var19 string + templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Provider) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 305, Col: 99} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "Unknown") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "

Total Size

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(FormatSize(CalculateGroupTotalSize(serviceBackups))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 319, Col: 95} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "

Backups

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var21 string + templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", GetGroupTotalBackupCount(serviceBackups))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 323, Col: 103} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "

Last Backup

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if latestTime, found := GetLatestBackupTime(serviceBackups); found { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var22 string + templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(FormatTimeSince(latestTime)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 328, Col: 75} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "

Never

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "

Status

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if status := GetGroupStatus(serviceBackups); status == "No Backups" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "

No Backups

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if status == "Failed" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "

Failed

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if status == "Warning" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "

Warning

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "

Healthy

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// Updated table templates with action column +func ServiceGroupBackupsTable(groupName string, serviceGroup map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var23 := templ.GetChildren(ctx) + if templ_7745c5c3_Var23 == nil { + templ_7745c5c3_Var23 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if GetGroupTotalBackupCount(serviceGroup) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = renderSortedBackups(groupName, GetSortedBackups(serviceGroup), 5, serviceConfigs).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
ServiceDateSizeTypeRetentionLocationActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if GetGroupTotalBackupCount(serviceGroup) > 5 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "

No backups found for this service.

Backups will appear here when created.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// ServiceGroupAllBackupsTable template for showing all backups +func ServiceGroupAllBackupsTable(groupName string, serviceGroup map[string][]backup.BackupInfo, serviceConfigs map[string]map[string]ServiceProviderInfo) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var30 := templ.GetChildren(ctx) + if templ_7745c5c3_Var30 == nil { + templ_7745c5c3_Var30 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if GetGroupTotalBackupCount(serviceGroup) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = renderSortedBackups(groupName, GetSortedBackups(serviceGroup), 9999, serviceConfigs).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "
ServiceDateSizeTypeRetentionLocationActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "

No backups found for this service.

Backups will appear here when created.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// renderSortedBackups with action buttons +func renderSortedBackups(groupName string, sortedBackups []BackupWithService, limit int, serviceConfigs map[string]map[string]ServiceProviderInfo) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var37 := templ.GetChildren(ctx) + if templ_7745c5c3_Var37 == nil { + templ_7745c5c3_Var37 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + for i := 0; i < len(sortedBackups) && i < limit; i++ { + var templ_7745c5c3_Var38 = []any{templ.Classes( + templ.KV("gruvbox-bg-hard", i%2 == 0), + templ.KV("gruvbox-bg0", i%2 != 0), + )} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var38...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var40 string + templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(FormatServiceName(groupName, sortedBackups[i].ServiceIndex)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 456, Col: 124} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var41 string + templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(FormatTime(sortedBackups[i].Backup.CreationTime)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 457, Col: 113} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var42 string + templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(FormatSize(sortedBackups[i].Backup.Size)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 458, Col: 105} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var43 string + templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(sortedBackups[i].Backup.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 459, Col: 93} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var44 string + templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(sortedBackups[i].Backup.RetentionTag) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 460, Col: 101} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if providerInfo, exists := serviceConfigs[groupName][sortedBackups[i].ServiceIndex]; exists { + if providerInfo.Provider == "b2" || providerInfo.Provider == "backblaze" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var45 string + templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 464, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, " B2 Backblaze") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var46 string + templ_7745c5c3_Var46, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 466, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var46)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, " FTP") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "ssh" || providerInfo.Provider == "sftp" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var47 string + templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 468, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, " SSH") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if providerInfo.Provider == "s3" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var48 string + templ_7745c5c3_Var48, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 470, Col: 85} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var48)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, " S3") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var49 string + templ_7745c5c3_Var49, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Type) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 472, Col: 87} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var49)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var50 string + templ_7745c5c3_Var50, templ_7745c5c3_Err = templ.JoinStringErrs(providerInfo.Provider) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/home.templ`, Line: 472, Col: 113} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var50)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "Unknown") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, " Download") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + return nil + }) +} + +// backupsTableRowsSorted renders backup rows sorted by creation time across all services +func backupsTableRowsSorted(groupName string, serviceGroup map[string][]backup.BackupInfo, limit int, serviceConfigs map[string]map[string]ServiceProviderInfo) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var53 := templ.GetChildren(ctx) + if templ_7745c5c3_Var53 == nil { + templ_7745c5c3_Var53 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = renderSortedBackups(groupName, GetSortedBackups(serviceGroup), limit, serviceConfigs).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate diff --git a/templates/layouts/base.templ b/templates/layouts/base.templ new file mode 100644 index 0000000..48a69ef --- /dev/null +++ b/templates/layouts/base.templ @@ -0,0 +1,632 @@ +package layouts + +// Base provides the basic HTML structure for all pages +templ Base(title string) { + + + + + + { title } + + + + + + + + + + + +
+
+
+ +

Backea

+
+ +
+
+
+ + +
+ +
+ { children... } +
+ + + + + +} \ No newline at end of file diff --git a/templates/layouts/base_templ.go b/templates/layouts/base_templ.go new file mode 100644 index 0000000..19bbd9f --- /dev/null +++ b/templates/layouts/base_templ.go @@ -0,0 +1,62 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package layouts + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +// Base provides the basic HTML structure for all pages +func Base(title string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(title) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/layouts/base.templ`, Line: 10, Col: 15} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templ_7745c5c3_Var1.Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate diff --git a/templates/notifications.templ b/templates/notifications.templ new file mode 100644 index 0000000..cd2ec85 --- /dev/null +++ b/templates/notifications.templ @@ -0,0 +1,39 @@ +package templates + +import "fmt" + +// Notification displays a temporary notification message +templ Notification(message string, notificationType string) { +
+

{ message }

+
+} + +// Success notification +templ SuccessNotification(message string) { + @Notification(message, "success") +} + +// Error notification +templ ErrorNotification(message string) { + @Notification(message, "error") +} + +// RestoreSuccessResponse returns a response for successful restore operation +templ RestoreSuccessResponse(backupID string) { +
+ @SuccessNotification("Backup restoration started successfully") +
+} + +// RestoreErrorResponse returns a response for failed restore operation +templ RestoreErrorResponse(errorMessage string) { +
+ @ErrorNotification(fmt.Sprintf("Backup restoration failed: %s", errorMessage)) +
+} \ No newline at end of file diff --git a/templates/notifications_templ.go b/templates/notifications_templ.go new file mode 100644 index 0000000..f17c809 --- /dev/null +++ b/templates/notifications_templ.go @@ -0,0 +1,213 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package templates + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import "fmt" + +// Notification displays a temporary notification message +func Notification(message string, notificationType string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + var templ_7745c5c3_Var2 = []any{templ.Classes( + "notification", + templ.KV(notificationType, true), + )} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(message) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `templates/notifications.templ`, Line: 13, Col: 20} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// Success notification +func SuccessNotification(message string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var5 := templ.GetChildren(ctx) + if templ_7745c5c3_Var5 == nil { + templ_7745c5c3_Var5 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = Notification(message, "success").Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// Error notification +func ErrorNotification(message string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var6 := templ.GetChildren(ctx) + if templ_7745c5c3_Var6 == nil { + templ_7745c5c3_Var6 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = Notification(message, "error").Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// RestoreSuccessResponse returns a response for successful restore operation +func RestoreSuccessResponse(backupID string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var7 := templ.GetChildren(ctx) + if templ_7745c5c3_Var7 == nil { + templ_7745c5c3_Var7 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = SuccessNotification("Backup restoration started successfully").Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// RestoreErrorResponse returns a response for failed restore operation +func RestoreErrorResponse(errorMessage string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var8 := templ.GetChildren(ctx) + if templ_7745c5c3_Var8 == nil { + templ_7745c5c3_Var8 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = ErrorNotification(fmt.Sprintf("Backup restoration failed: %s", errorMessage)).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate