From 6142a1f66aa75bce2cbcf6a67e6aa8b3cd92a2d0 Mon Sep 17 00:00:00 2001 From: Ivan Reshetnikov Date: Sat, 23 Nov 2024 18:34:03 +0500 Subject: [PATCH] Initial commit --- Dockerfile | 12 ++++ README.md | 3 + go.mod | 3 + main.go | 201 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 219 insertions(+) create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 go.mod create mode 100644 main.go diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3337424 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.23-alpine AS builder + +WORKDIR /app +COPY go.mod main.go ./ +RUN go build -o main + +FROM alpine:3.20 +RUN apk add postgresql16-client +COPY --from=builder /app/main /usr/local/bin/postgresql-backuper +RUN mkdir /backups +ENV BACKUPS_DIR="/backups" +CMD ["postgresql-backuper"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..2afdf97 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# Postgresql backuper + +A small program for creating postgresql backups at certain intervals. diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..fc51e56 --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module git.comfycamp.space/lumin/backuper/postgresql + +go 1.22.7 diff --git a/main.go b/main.go new file mode 100644 index 0000000..1054e19 --- /dev/null +++ b/main.go @@ -0,0 +1,201 @@ +package main + +import ( + "compress/gzip" + "errors" + "io" + "io/fs" + "log/slog" + "os" + "os/exec" + "os/signal" + "path" + "strings" + "syscall" + "time" +) + +const ( + FilenameTemplate = "postgresql-backup-2006-01-02T15:04:05Z07:00.sql.gz" +) + +func getStrFromEnv(varName, defaultValue string) string { + value := os.Getenv(varName) + if value == "" { + return defaultValue + } + + return value +} + +func backup(backupsDir, pgdumpBin string, customArgs []string) { + filename := time.Now().Format(FilenameTemplate) + filepath := path.Join(backupsDir, filename) + slog.Info("creating a new backup", "path", filepath) + backupFile, err := os.Create(filepath) + if err != nil { + slog.Error("unable to create backup file", "err", err) + return + } + defer backupFile.Close() + + cmd := exec.Command(pgdumpBin, customArgs...) + stdout, err := cmd.StdoutPipe() + if err != nil { + slog.Error("unable to connect to stdout", "err", err) + return + } + + if err := cmd.Start(); err != nil { + slog.Error("unable to start the program", "err", err) + return + } + + gzipWriter, err := gzip.NewWriterLevel(backupFile, gzip.BestCompression) + if err != nil { + slog.Error("error creating gzip writer", "err", err) + return + } + defer gzipWriter.Close() + + _, err = io.Copy(gzipWriter, stdout) + if err != nil { + slog.Error("error writing to the backup file", "err", err) + return + } + + if err := cmd.Wait(); err != nil { + slog.Error("command failed", "err", err) + return + } + + slog.Info("backup is successful") +} + +func getLatestBackupTime(backupsDir string) (*time.Time, error) { + entries, err := os.ReadDir(backupsDir) + if err != nil { + return nil, err + } + + var maxTs *time.Time + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + filename := entry.Name() + fileTimestamp, err := time.Parse(FilenameTemplate, filename) + if err != nil { + continue + } + + if maxTs == nil || fileTimestamp.After(*maxTs) { + maxTs = &fileTimestamp + } + } + + return maxTs, nil +} + +// Delete backups older than retention period. +func cleanup(backupsDir string, retention time.Duration) { + entries, err := os.ReadDir(backupsDir) + if err != nil { + slog.Error("unable to list backup dir contents", "err", err) + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + filename := entry.Name() + fileTimestamp, err := time.Parse(FilenameTemplate, filename) + if err != nil { + continue + } + + if time.Since(fileTimestamp) <= retention { + continue + } + + fullpath := path.Join(backupsDir, filename) + err = os.Remove(fullpath) + if err != nil { + slog.Error("unable to remove old backup", "path", fullpath, "err", err) + } + } +} + +func sleepUntilNextBackup(lastBackupTime *time.Time, backupInterval time.Duration) { + if lastBackupTime == nil { + return + } + + timeSinceLastBackup := time.Since(*lastBackupTime) + if timeSinceLastBackup >= backupInterval { + return + } + + time.Sleep(backupInterval - timeSinceLastBackup) +} + +func main() { + backupsDir := getStrFromEnv("BACKUPS_DIR", ".") + err := os.Mkdir(backupsDir, 0750) + if err != nil && !errors.Is(err, fs.ErrExist) { + slog.Error("unable to create backups directory", "err", err) + os.Exit(-1) + } + + pgdumpBin := getStrFromEnv("PGDUMP_BINARY", "pg_dumpall") + + var customArgs []string + customArgsStr := getStrFromEnv("CUSTOM_ARGS", "") + if customArgsStr != "" { + customArgs = strings.Split(customArgsStr, " ") + } + + retention, err := time.ParseDuration(getStrFromEnv("RETENTION", "168h")) + if err != nil { + slog.Error("retention is incorrect", "err", err) + os.Exit(-1) + } + + backupIntervalStr := getStrFromEnv("BACKUP_INTERVAL", "24h") + backupInterval, err := time.ParseDuration(backupIntervalStr) + if err != nil { + slog.Error("backup interval is incorrect", "err", err) + os.Exit(-1) + } + + exit := make(chan os.Signal, 1) + signal.Notify(exit, os.Interrupt, syscall.SIGTERM) + + lastTimestamp, err := getLatestBackupTime(backupsDir) + if err != nil { + slog.Error("unable to list existing backups", "err", err) + os.Exit(-1) + } + + sleepUntilNextBackup(lastTimestamp, backupInterval) + + backup(backupsDir, pgdumpBin, customArgs) + cleanup(backupsDir, retention) + + timer := time.NewTicker(backupInterval) + +forloop: + for { + select { + case <-timer.C: + backup(backupsDir, pgdumpBin, customArgs) + cleanup(backupsDir, retention) + case <-exit: + break forloop + } + } +}