package main import ( "compress/gzip" "errors" "io" "io/fs" "log/slog" "os" "os/exec" "os/signal" "path" "strings" "syscall" "time" ) const ( FilenameTemplate = "postgresql-backup-2006-01-02T15:04:05Z07:00.sql.gz" ) func getStrFromEnv(varName, defaultValue string) string { value := os.Getenv(varName) if value == "" { return defaultValue } return value } func backup(backupsDir, pgdumpBin string, customArgs []string) { filename := time.Now().Format(FilenameTemplate) filepath := path.Join(backupsDir, filename) slog.Info("creating a new backup", "path", filepath) backupFile, err := os.Create(filepath) if err != nil { slog.Error("unable to create backup file", "err", err) return } defer backupFile.Close() cmd := exec.Command(pgdumpBin, customArgs...) stdout, err := cmd.StdoutPipe() if err != nil { slog.Error("unable to connect to stdout", "err", err) return } if err := cmd.Start(); err != nil { slog.Error("unable to start the program", "err", err) return } gzipWriter, err := gzip.NewWriterLevel(backupFile, gzip.BestCompression) if err != nil { slog.Error("error creating gzip writer", "err", err) return } defer gzipWriter.Close() _, err = io.Copy(gzipWriter, stdout) if err != nil { slog.Error("error writing to the backup file", "err", err) return } if err := cmd.Wait(); err != nil { slog.Error("command failed", "err", err) return } slog.Info("backup is successful") } func getLatestBackupTime(backupsDir string) (*time.Time, error) { entries, err := os.ReadDir(backupsDir) if err != nil { return nil, err } var maxTs *time.Time for _, entry := range entries { if entry.IsDir() { continue } filename := entry.Name() fileTimestamp, err := time.Parse(FilenameTemplate, filename) if err != nil { continue } if maxTs == nil || fileTimestamp.After(*maxTs) { maxTs = &fileTimestamp } } return maxTs, nil } // Delete backups older than retention period. func cleanup(backupsDir string, retention time.Duration) { entries, err := os.ReadDir(backupsDir) if err != nil { slog.Error("unable to list backup dir contents", "err", err) return } for _, entry := range entries { if entry.IsDir() { continue } filename := entry.Name() fileTimestamp, err := time.Parse(FilenameTemplate, filename) if err != nil { continue } if time.Since(fileTimestamp) <= retention { continue } fullpath := path.Join(backupsDir, filename) err = os.Remove(fullpath) if err != nil { slog.Error("unable to remove old backup", "path", fullpath, "err", err) } } } func sleepUntilNextBackup(lastBackupTime *time.Time, backupInterval time.Duration) { if lastBackupTime == nil { return } timeSinceLastBackup := time.Since(*lastBackupTime) if timeSinceLastBackup >= backupInterval { return } time.Sleep(backupInterval - timeSinceLastBackup) } func main() { backupsDir := getStrFromEnv("BACKUPS_DIR", ".") err := os.Mkdir(backupsDir, 0750) if err != nil && !errors.Is(err, fs.ErrExist) { slog.Error("unable to create backups directory", "err", err) os.Exit(-1) } pgdumpBin := getStrFromEnv("PGDUMP_BINARY", "pg_dumpall") var customArgs []string customArgsStr := getStrFromEnv("CUSTOM_ARGS", "") if customArgsStr != "" { customArgs = strings.Split(customArgsStr, " ") } retention, err := time.ParseDuration(getStrFromEnv("RETENTION", "168h")) if err != nil { slog.Error("retention is incorrect", "err", err) os.Exit(-1) } backupIntervalStr := getStrFromEnv("BACKUP_INTERVAL", "24h") backupInterval, err := time.ParseDuration(backupIntervalStr) if err != nil { slog.Error("backup interval is incorrect", "err", err) os.Exit(-1) } exit := make(chan os.Signal, 1) signal.Notify(exit, os.Interrupt, syscall.SIGTERM) lastTimestamp, err := getLatestBackupTime(backupsDir) if err != nil { slog.Error("unable to list existing backups", "err", err) os.Exit(-1) } sleepUntilNextBackup(lastTimestamp, backupInterval) backup(backupsDir, pgdumpBin, customArgs) cleanup(backupsDir, retention) timer := time.NewTicker(backupInterval) forloop: for { select { case <-timer.C: backup(backupsDir, pgdumpBin, customArgs) cleanup(backupsDir, retention) case <-exit: break forloop } } }