Initial commit
This commit is contained in:
commit
6142a1f66a
4 changed files with 219 additions and 0 deletions
12
Dockerfile
Normal file
12
Dockerfile
Normal file
|
@ -0,0 +1,12 @@
|
|||
FROM golang:1.23-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod main.go ./
|
||||
RUN go build -o main
|
||||
|
||||
FROM alpine:3.20
|
||||
RUN apk add postgresql16-client
|
||||
COPY --from=builder /app/main /usr/local/bin/postgresql-backuper
|
||||
RUN mkdir /backups
|
||||
ENV BACKUPS_DIR="/backups"
|
||||
CMD ["postgresql-backuper"]
|
3
README.md
Normal file
3
README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Postgresql backuper
|
||||
|
||||
A small program for creating postgresql backups at certain intervals.
|
3
go.mod
Normal file
3
go.mod
Normal file
|
@ -0,0 +1,3 @@
|
|||
module git.comfycamp.space/lumin/backuper/postgresql
|
||||
|
||||
go 1.22.7
|
201
main.go
Normal file
201
main.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
FilenameTemplate = "postgresql-backup-2006-01-02T15:04:05Z07:00.sql.gz"
|
||||
)
|
||||
|
||||
func getStrFromEnv(varName, defaultValue string) string {
|
||||
value := os.Getenv(varName)
|
||||
if value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func backup(backupsDir, pgdumpBin string, customArgs []string) {
|
||||
filename := time.Now().Format(FilenameTemplate)
|
||||
filepath := path.Join(backupsDir, filename)
|
||||
slog.Info("creating a new backup", "path", filepath)
|
||||
backupFile, err := os.Create(filepath)
|
||||
if err != nil {
|
||||
slog.Error("unable to create backup file", "err", err)
|
||||
return
|
||||
}
|
||||
defer backupFile.Close()
|
||||
|
||||
cmd := exec.Command(pgdumpBin, customArgs...)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
slog.Error("unable to connect to stdout", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
slog.Error("unable to start the program", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
gzipWriter, err := gzip.NewWriterLevel(backupFile, gzip.BestCompression)
|
||||
if err != nil {
|
||||
slog.Error("error creating gzip writer", "err", err)
|
||||
return
|
||||
}
|
||||
defer gzipWriter.Close()
|
||||
|
||||
_, err = io.Copy(gzipWriter, stdout)
|
||||
if err != nil {
|
||||
slog.Error("error writing to the backup file", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
slog.Error("command failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
slog.Info("backup is successful")
|
||||
}
|
||||
|
||||
func getLatestBackupTime(backupsDir string) (*time.Time, error) {
|
||||
entries, err := os.ReadDir(backupsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var maxTs *time.Time
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
filename := entry.Name()
|
||||
fileTimestamp, err := time.Parse(FilenameTemplate, filename)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if maxTs == nil || fileTimestamp.After(*maxTs) {
|
||||
maxTs = &fileTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
return maxTs, nil
|
||||
}
|
||||
|
||||
// Delete backups older than retention period.
|
||||
func cleanup(backupsDir string, retention time.Duration) {
|
||||
entries, err := os.ReadDir(backupsDir)
|
||||
if err != nil {
|
||||
slog.Error("unable to list backup dir contents", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
filename := entry.Name()
|
||||
fileTimestamp, err := time.Parse(FilenameTemplate, filename)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(fileTimestamp) <= retention {
|
||||
continue
|
||||
}
|
||||
|
||||
fullpath := path.Join(backupsDir, filename)
|
||||
err = os.Remove(fullpath)
|
||||
if err != nil {
|
||||
slog.Error("unable to remove old backup", "path", fullpath, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sleepUntilNextBackup(lastBackupTime *time.Time, backupInterval time.Duration) {
|
||||
if lastBackupTime == nil {
|
||||
return
|
||||
}
|
||||
|
||||
timeSinceLastBackup := time.Since(*lastBackupTime)
|
||||
if timeSinceLastBackup >= backupInterval {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(backupInterval - timeSinceLastBackup)
|
||||
}
|
||||
|
||||
func main() {
|
||||
backupsDir := getStrFromEnv("BACKUPS_DIR", ".")
|
||||
err := os.Mkdir(backupsDir, 0750)
|
||||
if err != nil && !errors.Is(err, fs.ErrExist) {
|
||||
slog.Error("unable to create backups directory", "err", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
pgdumpBin := getStrFromEnv("PGDUMP_BINARY", "pg_dumpall")
|
||||
|
||||
var customArgs []string
|
||||
customArgsStr := getStrFromEnv("CUSTOM_ARGS", "")
|
||||
if customArgsStr != "" {
|
||||
customArgs = strings.Split(customArgsStr, " ")
|
||||
}
|
||||
|
||||
retention, err := time.ParseDuration(getStrFromEnv("RETENTION", "168h"))
|
||||
if err != nil {
|
||||
slog.Error("retention is incorrect", "err", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
backupIntervalStr := getStrFromEnv("BACKUP_INTERVAL", "24h")
|
||||
backupInterval, err := time.ParseDuration(backupIntervalStr)
|
||||
if err != nil {
|
||||
slog.Error("backup interval is incorrect", "err", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
exit := make(chan os.Signal, 1)
|
||||
signal.Notify(exit, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
lastTimestamp, err := getLatestBackupTime(backupsDir)
|
||||
if err != nil {
|
||||
slog.Error("unable to list existing backups", "err", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
sleepUntilNextBackup(lastTimestamp, backupInterval)
|
||||
|
||||
backup(backupsDir, pgdumpBin, customArgs)
|
||||
cleanup(backupsDir, retention)
|
||||
|
||||
timer := time.NewTicker(backupInterval)
|
||||
|
||||
forloop:
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
backup(backupsDir, pgdumpBin, customArgs)
|
||||
cleanup(backupsDir, retention)
|
||||
case <-exit:
|
||||
break forloop
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue