Rclone 数据迁移完全指南 / 第 11 章 - Docker 集成
第 11 章 - Docker 集成
11.1 Docker 中的 Rclone
Rclone 官方提供 Docker 镜像,适用于以下场景:
- 在容器化环境中运行备份任务
- CI/CD 流水线中的数据同步
- 使用 Docker Compose 编排多服务
- 在 NAS 或服务器上运行定时同步
官方镜像
# 拉取最新镜像
docker pull rclone/rclone
# 指定版本
docker pull rclone/rclone:v1.69.0
# 镜像大小约 30MB(基于 Alpine Linux)
11.2 基本用法
运行一次性命令
# 列出远程存储
docker run --rm \
-v ~/.config/rclone:/config/rclone \
rclone/rclone \
listremotes
# 列出文件
docker run --rm \
-v ~/.config/rclone:/config/rclone \
rclone/rclone \
ls s3:my-bucket/
# 复制文件
docker run --rm \
-v ~/.config/rclone:/config/rclone \
-v /data:/data:ro \
rclone/rclone \
copy /data s3:my-bucket/data/ --progress
挂载本地目录
# 将本地目录挂载到容器中进行备份
docker run --rm \
-v ~/.config/rclone:/config/rclone \
-v /home/user/documents:/source:ro \
-v /home/user/backup:/dest \
rclone/rclone \
sync /source /dest --progress
# 挂载远程存储到本地目录
docker run --rm \
-v ~/.config/rclone:/config/rclone \
-v /mnt/cloud:/mnt:shared \
--device /dev/fuse \
--cap-add SYS_ADMIN \
rclone/rclone \
mount gdrive: /mnt --vfs-cache-mode full
11.3 数据备份方案
完整备份脚本
#!/bin/bash
# docker-backup.sh - Docker 环境下的 Rclone 备份
set -euo pipefail
# 配置
SOURCE="/data/important"
DEST="s3:my-backup/$(hostname)/"
LOG_DIR="/var/log/rclone"
CONFIG_DIR="$HOME/.config/rclone"
FILTER_FILE="$HOME/rclone-backup-filters.txt"
# 创建目录
mkdir -p "$LOG_DIR"
# 创建过滤文件
cat > "$FILTER_FILE" << 'EOF'
- *.tmp
- *.temp
- .cache/**
- node_modules/**
- __pycache__/**
+ *
EOF
# 执行备份
docker run --rm \
-v "$CONFIG_DIR":/config/rclone:ro \
-v "$SOURCE":/source:ro \
-v "$LOG_DIR":/logs \
-v "$FILTER_FILE":/filters.txt:ro \
rclone/rclone \
sync /source "$DEST" \
--filter-from /filters.txt \
--transfers 8 \
--checkers 16 \
--log-file /logs/backup-$(date +%Y%m%d).log \
--log-level INFO \
--stats 30s
echo "Backup completed at $(date)"
增量备份与版本保留
#!/bin/bash
# docker-incremental-backup.sh
DATE=$(date +%Y%m%d)
DEST="s3:my-backup/daily/${DATE}/"
BACKUP_DIR="s3:my-backup/deleted/${DATE}/"
docker run --rm \
-v ~/.config/rclone:/config/rclone:ro \
-v /data:/source:ro \
rclone/rclone \
sync /source "$DEST" \
--backup-dir "$BACKUP_DIR" \
--transfers 8 \
--progress \
--log-file /var/log/rclone-backup-${DATE}.log
11.4 Docker Compose 配置
基础 Compose 文件
# docker-compose.yml
version: '3.8'
services:
# Rclone 配置初始化
rclone-config:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone
command: config show
profiles:
- config
# 备份服务
backup:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone:ro
- /data:/source:ro
- backup-logs:/logs
command: >
sync /source s3:my-backup/
--transfers 8
--progress
--log-file /logs/backup.log
--log-level INFO
profiles:
- backup
# 定时备份(使用 cron)
backup-cron:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone:ro
- /data:/source:ro
- backup-logs:/logs
- ./crontab:/etc/crontabs/root:ro
entrypoint: /bin/sh
command: -c "crond -f -l 2"
restart: unless-stopped
# WebDAV 服务
webdav:
image: rclone/rclone
ports:
- "8080:8080"
volumes:
- rclone-config:/config/rclone:ro
- /data/documents:/data:ro
command: >
serve webdav /data
--addr :8080
--user admin
--pass mypassword
--read-only
restart: unless-stopped
# Rclone 挂载
mount:
image: rclone/rclone
privileged: true
volumes:
- rclone-config:/config/rclone:ro
- /mnt/cloud:/mnt:shared
- /dev/fuse:/dev/fuse
command: >
mount gdrive: /mnt
--vfs-cache-mode full
--vfs-cache-max-size 10G
--allow-other
restart: unless-stopped
volumes:
rclone-config:
backup-logs:
Crontab 文件
# crontab
# 每天凌晨 2 点执行备份
0 2 * * * rclone sync /source s3:my-backup/daily/$(date +\%Y\%m\%d)/ --transfers 8 --log-file /logs/backup-$(date +\%Y\%m\%d).log --log-level INFO
# 每周日凌晨 3 点执行全量备份
0 3 * * 0 rclone sync /source s3:my-backup/weekly/$(date +\%Y\%W)/ --transfers 16 --log-file /logs/weekly-$(date +\%Y\%W).log --log-level INFO
# 每月 1 日清理 30 天前的日志
0 4 1 * * find /logs -name "*.log" -mtime +30 -delete
使用 Docker Compose 运行
# 初始化配置
docker compose run --rm rclone-config config
# 执行一次性备份
docker compose run --rm backup
# 启动定时备份
docker compose up -d backup-cron
# 启动 WebDAV 服务
docker compose up -d webdav
# 启动挂载服务
docker compose up -d mount
# 查看日志
docker compose logs -f backup-cron
11.5 多云同步方案
架构设计
┌──────────┐ ┌──────────┐ ┌──────────┐
│ NAS │────▶│ Rclone │────▶│ AWS S3 │
│ (本地) │ │ (Docker) │ │ (备份) │
└──────────┘ └────┬─────┘ └──────────┘
│
▼
┌──────────┐
│Google │
│Drive │
│(同步) │
└──────────┘
Compose 配置
version: '3.8'
services:
# NAS → S3 备份
backup-s3:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone:ro
- /nas/data:/source:ro
- backup-logs:/logs
command: >
sync /source s3:backup-bucket/
--transfers 8
--log-file /logs/s3-backup.log
--log-level INFO
restart: unless-stopped
# NAS → Google Drive 同步
sync-gdrive:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone:ro
- /nas/documents:/source:ro
- backup-logs:/logs
command: >
sync /source gdrive:Documents/
--transfers 4
--log-file /logs/gdrive-sync.log
--log-level INFO
restart: unless-stopped
# Google Drive → OneDrive 镜像
mirror-onedrive:
image: rclone/rclone
volumes:
- rclone-config:/config/rclone:ro
- backup-logs:/logs
command: >
sync gdrive:SharedDocs/ onedrive:Backup/
--transfers 4
--log-file /logs/mirror.log
--log-level INFO
restart: unless-stopped
volumes:
rclone-config:
backup-logs:
11.6 CI/CD 集成
GitLab CI
# .gitlab-ci.yml
stages:
- build
- deploy
deploy-assets:
stage: deploy
image: rclone/rclone
before_script:
- mkdir -p /config/rclone
- echo "$RCLONE_CONFIG" > /config/rclone/rclone.conf
script:
- rclone sync ./dist/ s3:cdn-bucket/assets/ --progress
only:
- main
variables:
RCLONE_CONFIG: |
[s3]
type = s3
provider = AWS
access_key_id = $AWS_ACCESS_KEY_ID
secret_access_key = $AWS_SECRET_ACCESS_KEY
region = us-east-1
GitHub Actions
# .github/workflows/deploy.yml
name: Deploy Assets
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rclone
run: curl https://rclone.org/install.sh | sudo bash
- name: Configure Rclone
run: |
mkdir -p ~/.config/rclone
cat > ~/.config/rclone/rclone.conf << EOF
[s3]
type = s3
provider = AWS
access_key_id = ${{ secrets.AWS_ACCESS_KEY_ID }}
secret_access_key = ${{ secrets.AWS_SECRET_ACCESS_KEY }}
region = us-east-1
EOF
- name: Deploy to S3
run: rclone sync ./dist/ s3:cdn-bucket/assets/ --progress
11.7 数据迁移脚本
跨云迁移
#!/bin/bash
# migrate-cloud.sh - 跨云数据迁移
set -euo pipefail
SOURCE="aws-s3:source-bucket/"
DEST="ali-oss:dest-bucket/"
LOG="/var/log/migration-$(date +%Y%m%d).log"
echo "Starting migration: $SOURCE → $DEST"
echo "Log file: $LOG"
docker run --rm \
-v ~/.config/rclone:/config/rclone:ro \
-v /var/log:/logs \
rclone/rclone \
sync "$SOURCE" "$DEST" \
--transfers 32 \
--checkers 16 \
--multi-thread-streams 8 \
--buffer-size 64M \
--log-file "$LOG" \
--log-level INFO \
--stats 1m \
--progress
echo "Migration completed at $(date)"
echo "Please verify the data with: rclone check $SOURCE $DEST"
注意事项
⚠️ 权限:挂载本地目录时注意权限设置。使用
:ro标记只读挂载以防意外修改。
⚠️ FUSE 挂载:在 Docker 中使用
mount命令需要--privileged或--cap-add SYS_ADMIN,存在安全风险。
⚠️ 配置文件安全:不要在 Docker 镜像中硬编码凭据。使用 Docker secrets 或环境变量。
💡 日志管理:使用 Docker volume 持久化日志,便于排查问题。
💡 资源限制:在 Compose 文件中添加
deploy.resources.limits限制 CPU 和内存使用。
扩展阅读
上一章:← 第 10 章 - 双向同步 下一章:第 12 章 - 最佳实践 →