Scripts to take MySQL 8.* full and incremental backups using mysqldump
You can perform both full and incremental backups of MySQL 8.* using the provided scripts.
These scripts are designed for standalone MySQL installations and include optional features such as S3 cloud storage uploads and encryption support for enhanced security.
- Script 1: Handles configuration settings, including backup and restore metadata, retention policies, and environment-specific parameters.
- Script 2: Executes the actual backup process. It performs a full backup if none exists for the current date. If a full backup is already present, it captures incremental changes by backing up binary logs. All output is logged to a file in the
logs/directory for auditability.
You can schedule backup execution using cron based on your operational requirements.
📁 Backup directory structure:
/backups/mysql/
├── full/
├── incr/
└── logs/
Create a dedicated backups user named as myadmin.
mysql -uroot -proot@123
CREATE USER 'myadmin'@'%' IDENTIFIED BY 'myadmin@123';
GRANT ALL PRIVILEGES ON *.* TO 'myadmin'@'%' WITH GRANT OPTION;
FLUSH PRIVILEGES;
Script 1: backup_restore.conf – Configuration File
# MySQL credentials
MYSQL_USER="myadmin"
MYSQL_PASSWORD="myadmin@123"
MYSQL_HOST="localhost"
MYSQL_PORT="3306"
# Backup settings
BACKUP_DIR="/backups/mysql"
FULL_BACKUP_DIR="$BACKUP_DIR/full"
INCR_BACKUP_DIR="$BACKUP_DIR/incr"
LOG_DIR="$BACKUP_DIR/logs"
RETENTION_DAYS=7
# GPG encryption (optional)
GPG_ENABLED=false
GPG_RECIPIENT="backup@yourdomain.com"
# S3 upload (optional)
S3_ENABLED=false
S3_BUCKET="s3://your-bucket-name"
AWS_PROFILE="default"
# Binary log settings for PITR
BINLOG_DIR="/var/lib/mysql"
START_BINLOG="" # Set this to the binary log file name at maximum PITR restore time
END_BINLOG="" # Set this to the binary log file name at maximum PITR restore time
# Usage notes:
# - Ensure BINLOG_DIR points to the directory where MySQL binary logs are stored.
# - Binary logs naming format assumed is binlog.*
# - START_BINLOG should be set to the binary log file name corresponding to the backup's last log position.
# - These variables are used by restore and verify scripts to apply binary logs for point-in-time recovery.
# - Keep backup_restore.conf secure as it contains sensitive credentials and keys.
Script 2: backup_standalone.sh – Full & Incremental Backup Script
#!/bin/bash
set -euo pipefail
source "$(dirname "$0")/backup_restore.conf"
TODAY=$(date +%F)
NOW=$(date +%H%M%S)
LOG_FILE="$LOG_DIR/backup-$TODAY.log"
ROWCOUNT_FILE="$LOG_DIR/backup_rowcounts.txt"
ENCRYPTED_ROWCOUNT="$ROWCOUNT_FILE.gpg"
exec > >(tee -a "$LOG_FILE") 2>&1
mkdir -p "$FULL_BACKUP_DIR" "$INCR_BACKUP_DIR" "$LOG_DIR"
echo "=== MySQL Backup Started at $(date) ==="
# === STEP 1: Check for today's full backup ===
LATEST_FULL=$(find "$FULL_BACKUP_DIR" -type f -name "full-$TODAY*.sql.gz" | sort | tail -n 1)
if [[ -z "$LATEST_FULL" ]]; then
echo "⚠️ No full backup for today found. Creating full backup..."
FULL_PATH="$FULL_BACKUP_DIR/full-$TODAY-$NOW.sql.gz"
mysqldump --user="$MYSQL_USER" --password="$MYSQL_PASSWORD" --host="$MYSQL_HOST" --port="$MYSQL_PORT" \
--single-transaction --flush-logs --master-data=2 --all-databases --routines --events --triggers --delete-master-logs | gzip > "$FULL_PATH"
echo "✅ Full backup created: $FULL_PATH"
# Upload full backup to S3
if $S3_ENABLED; then
S3_KEY="full/full-$TODAY-$NOW.sql.gz"
echo "Uploading full backup to S3 as $S3_KEY..."
aws s3 cp "$FULL_PATH" "$S3_BUCKET/$S3_KEY" --profile "$AWS_PROFILE"
aws s3api head-object --bucket "$(echo "$S3_BUCKET" | cut -d'/' -f3)" --key "$S3_KEY" --profile "$AWS_PROFILE" >/dev/null \
&& echo "✅ S3 upload verified: $S3_KEY" \
|| echo "❌ S3 upload verification failed for $S3_KEY"
fi
LATEST_FULL="$FULL_PATH"
else
echo "✅ Found full backup for today: $LATEST_FULL"
fi
# === STEP 2: Extract last binlog from full backup ===
LAST_BINLOG=$(gunzip -c "$LATEST_FULL" | awk -F"'" '/CHANGE MASTER TO/ {print $2}' | head -n 1)
if [[ -z "$LAST_BINLOG" ]]; then
echo "❌ Could not extract binlog name."
exit 1
fi
echo "Last binlog: $LAST_BINLOG"
# === STEP 3: Flush logs to rotate binlog ===
mysqladmin --user="$MYSQL_USER" --password="$MYSQL_PASSWORD" flush-logs
# === STEP 4: Copy binlogs into INCR_PATH (robust) ===
INCR_PATH="$INCR_BACKUP_DIR/incr-$TODAY"
mkdir -p "$INCR_PATH"
# Enable safe globbing: unmatched patterns expand to empty list
shopt -s nullglob
# Collect binlog files already in INCR_PATH (exclude binlog.index)
COPIED_BINLOGS=( "$INCR_PATH"/binlog.* )
for i in "${!COPIED_BINLOGS[@]}"; do
[[ "$(basename "${COPIED_BINLOGS[$i]}")" == "binlog.index" ]] && unset 'COPIED_BINLOGS[$i]'
done
LAST_COPIED_NAME=""
if [[ ${#COPIED_BINLOGS[@]} -gt 0 ]]; then
mapfile -t sorted < <(for f in "${COPIED_BINLOGS[@]}"; do basename "$f"; done | sort)
LAST_COPIED_NAME="${sorted[-1]}"
fi
if [[ -z "$LAST_COPIED_NAME" ]]; then
echo "No previous binlogs found in $INCR_PATH. Copying ALL binlogs from $BINLOG_DIR..."
for LOG in "$BINLOG_DIR"/binlog.*; do
[[ -f "$LOG" ]] || continue
[[ "$(basename "$LOG")" == "binlog.index" ]] && continue
LOG_NAME=$(basename "$LOG")
echo "Backing up $LOG_NAME"
cp "$LOG" "$INCR_PATH/"
done
else
echo "Found previous binlogs in $INCR_PATH. Skipping duplicates, but re-copying last binlog: $LAST_COPIED_NAME"
for LOG in "$BINLOG_DIR"/binlog.*; do
[[ -f "$LOG" ]] || continue
[[ "$(basename "$LOG")" == "binlog.index" ]] && continue
LOG_NAME=$(basename "$LOG")
if [[ "$LOG_NAME" == "$LAST_COPIED_NAME" ]]; then
echo "🔄 Re-copying last binlog: $LOG_NAME"
elif [[ -f "$INCR_PATH/$LOG_NAME" ]]; then
echo "⏭️ Skipping already copied binlog: $LOG_NAME"
continue
fi
echo "Backing up $LOG_NAME"
cp "$LOG" "$INCR_PATH/"
done
fi
# Always re-copy binlog.index once
if [[ -f "$BINLOG_DIR/binlog.index" ]]; then
echo "Backing up binlog.index"
cp "$BINLOG_DIR/binlog.index" "$INCR_PATH/"
fi
# Restore globbing
shopt -u nullglob
# === STEP 5: Upload incremental backup to S3 ===
if $S3_ENABLED; then
S3_KEY="incr/incr-$TODAY-$NOW"
echo "Uploading incremental backup to S3 as $S3_KEY/"
aws s3 cp "$INCR_BACKUP_DIR" "$S3_BUCKET/$S3_KEY/" --recursive --profile "$AWS_PROFILE"
echo "✅ Incremental backup uploaded to S3."
fi
# === STEP 6: Generate row count snapshot ===
echo "Generating row count snapshot..."
> "$ROWCOUNT_FILE"
DBS=$(mysql -N -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" -h"$MYSQL_HOST" -P"$MYSQL_PORT" \
-e "SHOW DATABASES;" | grep -Ev "^(information_schema|performance_schema|mysql|sys)$")
for db in $DBS; do
TABLES=$(mysql -N -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" -h"$MYSQL_HOST" -P"$MYSQL_PORT" \
-e "SHOW TABLES IN \`$db\`;")
for table in $TABLES; do
COUNT=$(mysql -N -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" -h"$MYSQL_HOST" -P"$MYSQL_PORT" \
-e "SELECT COUNT(*) FROM \`$db\`.\`$table\`;")
echo "$db $table $COUNT" >> "$ROWCOUNT_FILE"
echo "✅ $db.$table: $COUNT rows"
done
done
if $GPG_ENABLED; then
gpg --yes --batch -r "$GPG_RECIPIENT" -o "$ENCRYPTED_ROWCOUNT" -e "$ROWCOUNT_FILE"
echo "✅ Encrypted row count file: $ENCRYPTED_ROWCOUNT"
fi
if $S3_ENABLED; then
S3_KEY="rowcounts/backup_rowcounts-$TODAY-$NOW.txt.gpg"
aws s3 cp "$ENCRYPTED_ROWCOUNT" "$S3_BUCKET/$S3_KEY" --profile "$AWS_PROFILE"
echo "✅ Row count snapshot uploaded to S3: $S3_KEY"
fi
# === Summary ===
echo "=== Backup Summary ==="
echo "Full backup: $LATEST_FULL"
echo "Incremental binlogs: $(ls "$INCR_PATH" | wc -l) files"
[[ -f "$ENCRYPTED_ROWCOUNT" ]] && echo "Row count snapshot: $ENCRYPTED_ROWCOUNT"
echo "Log file: $LOG_FILE"
echo "=== MySQL Backup Finished at $(date) ==="



Comments
Post a Comment